content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def test_wint_exceptions():
"""Test wint function exceptions."""
dep_vector = np.array([0.99, 1 + 3j, 0.5])
wobj = std_wobj(dep_name="wobj_a", dep_vector=dep_vector)
AE(peng.wint, TypeError, "Cannot convert complex to integer", wobj) | 35,300 |
def set_task_state(success: bool, task_id: str):
"""Update the state of the Airflow task.
:param success: whether the task was successful or not.
:param task_id: the task id.
:return: None.
"""
if success:
logging.info(f"{task_id} success")
else:
msg_failed = f"{task_id} failed"
logging.error(msg_failed)
raise AirflowException(msg_failed) | 35,301 |
def has_columns(df, columns):
"""Check if DataFrame has necessary columns.
Args:
df (pd.DataFrame): DataFrame.
columns (list(str): columns to check for.
Returns:
bool: True if DataFrame has specified columns.
"""
result = True
for column in columns:
if column not in df.columns:
print("Missing column: {} in DataFrame".format(column))
result = False
return result | 35,302 |
def render(template, **kwargs):
"""Render template with default values set"""
return JINJA_ENV.get_template(template).render(
autograde=autograde,
css=CSS,
favicon=FAVICON,
timestamp=timestamp_utc_iso(),
**kwargs
) | 35,303 |
def _cytof_analysis_derivation(context: DeriveFilesContext) -> DeriveFilesResult:
"""Generate a combined CSV for CyTOF analysis data"""
cell_counts_analysis_csvs = pd.json_normalize(
data=context.trial_metadata,
record_path=["assays", "cytof", "records"],
meta=[prism.PROTOCOL_ID_FIELD_NAME],
)
artifacts = []
for combined_f_kind in [
"cell_counts_assignment",
"cell_counts_compartment",
"cell_counts_profiling",
]:
res_df = pd.DataFrame()
for index, row in cell_counts_analysis_csvs.iterrows():
obj_url = row[f"output_files.{combined_f_kind}.object_url"]
cell_counts_csv = context.fetch_artifact(obj_url, True)
if not cell_counts_csv:
raise Exception(
f"Failed to read {obj_url} building Cytof analysis derivation"
)
df = pd.read_csv(cell_counts_csv)
# Each cell_counts_... file consist of just records for one sample.
# The first column of each cell_counts_csv (CellSubset) contains cell group types
# and the second contains counts for those types.
# Create a new, transposed dataframe with cell group types as column headers
# and a single row of cell count data.
df = df.set_index("CellSubset")
df = df.drop(
columns="Unnamed: 0", axis=1
) # Cell counts files contain an unnamed index column
df = df.transpose()
# and adding metadata, so we can distinguish different samples
df = df.rename(index={"N": row["cimac_id"]})
df["cimac_id"] = row["cimac_id"]
df["cimac_participant_id"] = participant_id_from_cimac(row["cimac_id"])
df[prism.PROTOCOL_ID_FIELD_NAME] = row[prism.PROTOCOL_ID_FIELD_NAME]
# finally combine them
res_df = pd.concat([res_df, df])
# and add as artifact
artifacts.append(
_build_artifact(
context=context,
file_name=f"combined_{combined_f_kind}.csv",
data=res_df.to_csv(index=False),
data_format="csv", # confusing, but right
file_type=combined_f_kind.replace("_", " "),
include_upload_type=True,
)
)
return DeriveFilesResult(
artifacts, context.trial_metadata # return metadata without updates
) | 35,304 |
def get_field(name, data):
"""
Return a valid Field by given data
"""
if isinstance(data, AbstractField):
return data
data = keys_to_string(data)
type = data.get('type', 'object')
if type == "string":
return StringField(name=name, **data)
elif type == "boolean":
return BooleanField(name=name, **data)
elif type == "short":
return ShortField(name=name, **data)
elif type == "integer":
return IntegerField(name=name, **data)
elif type == "long":
return LongField(name=name, **data)
elif type == "float":
return FloatField(name=name, **data)
elif type == "double":
return DoubleField(name=name, **data)
elif type == "ip":
return IpField(name=name, **data)
elif type == "date":
return DateField(name=name, **data)
elif type == "multi_field":
return MultiField(name=name, **data)
elif type == "geo_point":
return GeoPointField(name=name, **data)
elif type == "attachment":
return AttachmentField(name=name, **data)
elif type == "object":
if '_all' in data:
return DocumentObjectField(name=name, **data)
return ObjectField(name=name, **data)
raise RuntimeError("Invalid type: %s" % type) | 35,305 |
def data_context_connectivity_context_connectivity_serviceuuid_namevalue_name_get(uuid, value_name): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_namevalue_name_get
returns tapi.common.NameAndValue # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param value_name: Id of name
:type value_name: str
:rtype: TapiCommonNameAndValue
"""
return 'do some magic!' | 35,306 |
def update_google_analytics(context, request, ga_config, filename, file_size_downloaded,
file_at_id, lab, user_uuid, user_groups, file_experiment_type, file_type='other'):
""" Helper for @@download that updates GA in response to a download.
"""
ga_cid = request.cookies.get("clientIdentifier")
if not ga_cid: # Fallback, potentially can stop working as GA is updated
ga_cid = request.cookies.get("_ga")
if ga_cid:
ga_cid = ".".join(ga_cid.split(".")[2:])
ga_tid = ga_config["hostnameTrackerIDMapping"].get(request.host,
ga_config["hostnameTrackerIDMapping"].get("default"))
if ga_tid is None:
raise Exception("No valid tracker id found in ga_config.json > hostnameTrackerIDMapping")
# We're sending 2 things here, an Event and a Transaction of a Product. (Reason 1 for redundancies)
# Some fields/names are re-used for multiple things, such as filename for event label + item name dimension + product name + page title dimension (unusued) + ...
ga_payload = {
"v": 1,
"tid": ga_tid,
"t": "event", # Hit type. Could also be event, transaction, pageview, etc.
# Override IP address. Else will send detail about EC2 server which not too useful.
"uip": request.remote_addr,
"ua": request.user_agent,
"dl": request.url,
"dt": filename,
# This cid below is a ~ random ID/number (?). Used as fallback, since one is required
# if don't provided uid. While we still allow users to not be logged in,
# should at least be able to preserve/track their anon downloads..
# '555' is in examples and seemed to be referred to as example for anonymous sessions in some Google doc.
# But not 100% sure and wasn't explicitly stated to be "555 for anonymous sessions" aside from usage in example.
# Unsure if groups under 1 session or not.
"cid": "555",
"an": "4DN Data Portal EC2 Server", # App name, unsure if used yet
"ec": "Serverside File Download", # Event Category
"ea": "Range Query" if request.range else "File Download", # Event Action
"el": filename, # Event Label
"ev": file_size_downloaded, # Event Value
# Product fields
"pa": "purchase",
"ti": str(uuid4()), # We need to send a unique transaction id along w. 'transactions' like purchases
"pr1id": file_at_id, # Product ID/SKU
"pr1nm": filename, # Product Name
"pr1br": lab.get("display_title"), # Product Branch
"pr1qt": 1, # Product Quantity
# Product Category from @type, e.g. "File/FileProcessed"
"pr1ca": "/".join([ty for ty in reversed(context.jsonld_type()[:-1])]),
# Product "Variant" (supposed to be like black, gray, etc), we repurpose for filetype for reporting
"pr1va": file_type
# "other" MATCHES THAT IN `file_type_detaild` calc property, since file_type_detailed is used on frontend when performing "Select All" files.
}
# Custom dimensions
# See https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#pr_cm_
if "name" in ga_config["dimensionNameMap"]:
ga_payload["dimension" + str(ga_config["dimensionNameMap"]["name"])] = filename
ga_payload["pr1cd" + str(ga_config["dimensionNameMap"]["name"])] = filename
if "experimentType" in ga_config["dimensionNameMap"]:
ga_payload["dimension" + str(ga_config["dimensionNameMap"]["experimentType"])] = file_experiment_type or None
ga_payload["pr1cd" + str(ga_config["dimensionNameMap"]["experimentType"])] = file_experiment_type or None
if "filesize" in ga_config["metricNameMap"]:
ga_payload["metric" + str(ga_config["metricNameMap"]["filesize"])] = file_size_downloaded
ga_payload["pr1cm" + str(ga_config["metricNameMap"]["filesize"])] = file_size_downloaded
if "downloads" in ga_config["metricNameMap"]:
ga_payload["metric" + str(ga_config["metricNameMap"]["downloads"])] = 0 if request.range else 1
ga_payload["pr1cm" + str(ga_config["metricNameMap"]["downloads"])] = 0 if request.range else 1
# client id (`cid`) or user id (`uid`) is required. uid shall be user uuid.
# client id might be gotten from Google Analytics cookie, but not stable to use and wont work on programmatic requests...
if user_uuid:
ga_payload['uid'] = user_uuid
if user_groups:
groups_json = json.dumps(user_groups, separators=(',', ':')) # Compcact JSON; aligns w. what's passed from JS.
ga_payload["dimension" + str(ga_config["dimensionNameMap"]["userGroups"])] = groups_json
ga_payload["pr1cd" + str(ga_config["dimensionNameMap"]["userGroups"])] = groups_json
if ga_cid:
ga_payload['cid'] = ga_cid
# Catch error here
try:
resp = requests.post(
"https://ssl.google-analytics.com/collect?z=" + str(datetime.datetime.utcnow().timestamp()),
data=urllib.parse.urlencode(ga_payload),
timeout=5.0,
headers={'user-agent': ga_payload['ua']}
)
except Exception as e:
log.error('Exception encountered posting to GA: %s' % e) | 35,307 |
def open_alleles_file(N0, n, U, Es, mmax, mwt, mutmax, rep):
"""
This function opens the output files and returns file
handles to each.
"""
sim_id = 'N%d_n%d_U%.6f_Es%.5f_mmax%.2f_mwt%.2f_mutmax%d_rep%d' %(N0, n, U, Es, mmax, mwt, mutmax, rep)
data_dir = '../SIM_DATA'
outfile = open("%s/alleles_%s.csv" %(data_dir,sim_id),"w")
return outfile | 35,308 |
def approximateWcs(wcs, camera_wrapper=None, detector_name=None,
obs_metadata=None,
order=3, nx=20, ny=20, iterations=3,
skyTolerance=0.001*LsstGeom.arcseconds, pixelTolerance=0.02):
"""
Approximate an existing WCS as a TAN-SIP WCS
The fit is performed by evaluating the WCS at a uniform grid of
points within a bounding box.
@param[in] wcs wcs to approximate
@param[in] camera_wrapper is an instantiation of GalSimCameraWrapper
@param[in] detector_name is the name of the detector
@param[in] obs_metadata is an ObservationMetaData characterizing
the telescope pointing
@param[in] order order of SIP fit
@param[in] nx number of grid points along x
@param[in] ny number of grid points along y
@param[in] iterations number of times to iterate over fitting
@param[in] skyTolerance maximum allowed difference in world
coordinates between input wcs and approximate wcs
(default is 0.001 arcsec)
@param[in] pixelTolerance maximum allowed difference in pixel
coordinates between input wcs and approximate wcs
(default is 0.02 pixels) @return the fit TAN-SIP WCS
"""
tanWcs = wcs
# create a matchList consisting of a grid of points covering the bbox
refSchema = afwTable.SimpleTable.makeMinimalSchema()
refCoordKey = afwTable.CoordKey(refSchema["coord"])
refCat = afwTable.SimpleCatalog(refSchema)
sourceSchema = afwTable.SourceTable.makeMinimalSchema()
SingleFrameMeasurementTask(schema=sourceSchema) # expand the schema
sourceCentroidKey = afwTable.Point2DKey(sourceSchema["slot_Centroid"])
sourceCat = afwTable.SourceCatalog(sourceSchema)
# 20 March 2017
# the 'try' block is how it works in swig;
# the 'except' block is how it works in pybind11
try:
matchList = afwTable.ReferenceMatchVector()
except AttributeError:
matchList = []
bbox = camera_wrapper.getBBox(detector_name)
bboxd = LsstGeom.Box2D(bbox)
for x in np.linspace(bboxd.getMinX(), bboxd.getMaxX(), nx):
for y in np.linspace(bboxd.getMinY(), bboxd.getMaxY(), ny):
pixelPos = LsstGeom.Point2D(x, y)
ra, dec = camera_wrapper.raDecFromPixelCoords(
np.array([x]), np.array([y]),
detector_name,
obs_metadata=obs_metadata,
epoch=2000.0,
includeDistortion=True)
skyCoord = LsstGeom.SpherePoint(ra[0], dec[0], LsstGeom.degrees)
refObj = refCat.addNew()
refObj.set(refCoordKey, skyCoord)
source = sourceCat.addNew()
source.set(sourceCentroidKey, pixelPos)
matchList.append(afwTable.ReferenceMatch(refObj, source, 0.0))
# The TAN-SIP fitter is fitting x and y separately, so we have to
# iterate to make it converge
for _ in range(iterations):
sipObject = makeCreateWcsWithSip(matchList, tanWcs, order, bbox)
tanWcs = sipObject.getNewWcs()
fitWcs = sipObject.getNewWcs()
return fitWcs | 35,309 |
def parse_table_column_names(table_definition_text):
"""
Parse the table and column names from the given SQL table
definition. Return (table-name, (col1-name, col2-name, ...)).
Naïvely assumes that ","s separate column definitions regardless of
quoting, escaping, and context.
"""
match = _table_def_pattern.match(table_definition_text)
if match is None:
raise ValueError('Cannot parse table definition from: {!r}'
.format(table_definition_text))
tbl_nm = match[1]
col_defs = match[2].split(',')
col_nms = (col_def.split(maxsplit=1)[0] for col_def in col_defs)
return (tbl_nm, tuple(col_nms)) | 35,310 |
def temporary_macro(tag, macro, app, app_version, nevents):
"""Create temporary macro."""
app_map = {'BACCARAT': 'Bacc'}
if app_version.startswith('3'):
## mdc2 no longer requires these
macro_extras = Template("") # dedent("""
# /$app/beamOn $nevents
# exit
# """))
else:
macro_extras = Template(dedent("""
/control/getEnv SEED
/$app/randomSeed {SEED}
/$app/beamOn $nevents
exit
"""))
lzprod_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
git_dir = os.path.join(lzprod_root, 'git', 'TDRAnalysis')
macro = os.path.join(git_dir, macro)
git = Git(git_dir)
git.fetch('origin')
git.checkout(tag)
if not os.path.isfile(macro):
raise Exception("Macro file '%s' doesn't exist in tag %s" % (macro, tag))
with open(os.path.join('/tmp', os.path.basename(macro)), 'wb') as tmp_macro, \
open(macro, 'rb') as macro_file:
tmp_macro.write(macro_file.read())
tmp_macro.write(macro_extras.safe_substitute(app=app_map.get(app, app), nevents=nevents))
try:
yield tmp_macro.name
finally:
os.remove(tmp_macro.name) | 35,311 |
def patch_debugtoolbar(settings):
"""
Patches the pyramid_debugtoolbar (if installed) to display a link to the related rollbar item.
"""
try:
from pyramid_debugtoolbar import tbtools
except ImportError:
return
rollbar_web_base = settings.get('rollbar.web_base', DEFAULT_WEB_BASE)
if rollbar_web_base.endswith('/'):
rollbar_web_base = rollbar_web_base[:-1]
def insert_rollbar_console(request, html):
# insert after the closing </h1>
item_uuid = request.environ.get('rollbar.uuid')
if not item_uuid:
return html
url = '%s/item/uuid/?uuid=%s' % (rollbar_web_base, item_uuid)
link = '<a style="color:white;" href="%s">View in Rollbar</a>' % url
new_data = "<h2>Rollbar: %s</h2>" % link
insertion_marker = "</h1>"
replacement = insertion_marker + new_data
return html.replace(insertion_marker, replacement, 1)
# patch tbtools.Traceback.render_full
old_render_full = tbtools.Traceback.render_full
def new_render_full(self, request, *args, **kw):
html = old_render_full(self, request, *args, **kw)
return insert_rollbar_console(request, html)
tbtools.Traceback.render_full = new_render_full | 35,312 |
def show_all_positions():
"""
This leads user to the position page when user clicks on the positions
button on the top right and it is supposed to show user all the positions
in the database
"""
db=main()
conn = create_connection(db)
mycur = conn.cursor()
post=mycur.execute("SELECT * FROM positions")
#positions = positions.query.all()
return render_template('position-all.html', positions=post) | 35,313 |
def get_subtree_tips(terms: list, name: str, tree):
"""
get lists of subsubtrees from subtree
"""
# get the duplicate sequences
dups = [e for e in terms if e.startswith(name)]
subtree_tips = []
# for individual sequence among duplicate sequences
for dup in dups:
# create a copy of the tree
temptree = copy.deepcopy(tree)
# get the node path for the duplicate sequence
node_path = temptree.get_path(dup)
# for the terminals of the parent of the duplicate sequence
# get the terminal names and append them to temp
temp = []
for term in node_path[-2].get_terminals():
temp.append(term.name)
subtree_tips.append(temp)
return subtree_tips, dups | 35,314 |
def save_text(datadir, split):
"""
Loads the captions json, extracts the image ids and captions and
stores them so they can be used by TextImageDataset format.
"""
path = f'{datadir}/annotations/captions_{split}.json'
with open(path, 'r') as f:
captions = json.load(f)
x = captions['annotations']
ids = defaultdict(list)
for item in x:
ids[item['image_id']].append(item['caption'])
keys = list(ids.keys())
for k in keys:
fname = f'{str(k).zfill(12)}.txt'
caps = ids[k][:5]
path = f'{datadir}/{split}/{fname}'
with open(path, 'w') as f:
for line in caps:
f.write(line + '\n') | 35,315 |
def pressure_to_cm_h2o(press_in):
"""Convert pressure in [pa] to [cm H2O]
Returns a rounded integer"""
conversion_factor = 98.0665
return int(round(press_in / conversion_factor)) | 35,316 |
def write_triggers(trigger_file, function_file, model, is_direct, has_gid, **generator_args):
"""
:param str file trigger_file: File where triggers will be written.
:param str file function_file: File where functions will be written.
:param model: A :ref:`declarative <sqla:declarative_toplevel>` class.
:param bool is_direct: Whether this is an entity table or not.
"""
# Mapper defines correlation of model class attributes to database table columns
mapper = class_mapper(model)
table_name = mapper.mapped_table.name
fk_columns = [list(r.local_columns)[0].name for r in mapper.relationships
if r.direction.name == 'MANYTOONE']
if is_direct:
if has_gid:
delete_trigger_generator = sql_generator.GIDDeleteTriggerGenerator
else:
delete_trigger_generator = sql_generator.DeleteTriggerGenerator
else:
delete_trigger_generator = sql_generator.ReferencedDeleteTriggerGenerator
update_columns = None
if table_name in column_map:
update_columns = column_map[table_name]
write_triggers_to_file(
trigger_file=trigger_file,
function_file=function_file,
generators=[
sql_generator.InsertTriggerGenerator,
sql_generator.UpdateTriggerGenerator,
delete_trigger_generator,
],
table_name=table_name,
pk_columns=[pk.name for pk in mapper.primary_key],
fk_columns=fk_columns,
update_columns=update_columns,
**generator_args
) | 35,317 |
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if random.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop() | 35,318 |
def generate_config_file():
"""
Generate configuration file by copying the default configuration.
"""
dest_path = os.path.join(CWD, "calendar.yml")
if not os.path.exists(dest_path):
shutil.copyfile(DEFAULT_CONFIG_PATH, dest_path)
sys.exit(0)
else:
raise ex.ExcalFileExistsError(
'Aborting since config file already exists.') | 35,319 |
def parse_deceased_field(deceased_field):
"""
Parse the deceased field.
At this point the deceased field, if it exists, is garbage as it contains First Name, Last Name, Ethnicity,
Gender, D.O.B. and Notes. We need to explode this data into the appropriate fields.
:param list deceased_field: a list where each item is a word from the deceased field
:return: a dictionary representing a deceased field.
:rtype: dict
"""
dob_index = -1
dob_tokens = [Fields.DOB, '(D.O.B', '(D.O.B.', '(D.O.B:', '(DOB', '(DOB:', 'D.O.B.', 'DOB:']
while dob_index < 0 and dob_tokens:
dob_token = dob_tokens.pop()
try:
dob_index = deceased_field.index(dob_token)
except ValueError:
pass
else:
break
if dob_index < 0:
raise ValueError(f'Cannot parse {Fields.DECEASED}: {deceased_field}')
d = {}
d[Fields.DOB] = deceased_field[dob_index + 1]
notes = deceased_field[dob_index + 2:]
if notes:
d[Fields.NOTES] = ' '.join(notes)
# `fleg` stands for First, Last, Ethnicity, Gender. It represents the info stored before the DOB.
fleg = deceased_field[:dob_index]
# Try to pop out the results one by one. If pop fails, it means there is nothing left to retrieve,
# For example, there is no first name and last name.
try:
d[Fields.GENDER] = fleg.pop().replace(',', '')
d[Fields.ETHNICITY] = fleg.pop().replace(',', '')
d[Fields.LAST_NAME] = fleg.pop().replace(',', '')
d[Fields.FIRST_NAME] = fleg.pop().replace(',', '')
except IndexError:
pass
return d | 35,320 |
def rate(epoch, rate_init, epochs_per_order):
""" Computes learning rate as a function of epoch index.
Inputs:
epoch - Index of current epoch.
rate_init - Initial rate.
epochs_per_order - Number of epochs to drop an order of magnitude.
"""
return rate_init * 10.0 ** (-epoch / epochs_per_order) | 35,321 |
def has_loop(net):
"""
Check if the network is a loop
"""
try:
networkx.algorithms.cycles.find_cycle(net)
return True
except networkx.exception.NetworkXNoCycle:
return False | 35,322 |
def get_query_name(hmma):
"""
get the panther family name from the query target
"""
hmma_list = hmma.split ('.')
if len(hmma_list) > 2:
hmm_fam = hmma_list[0]
hmm_sf = hmma_list[1]
something_else = hmma_list[2]
elif len(hmma_list) == 2:
hmm_fam = hmma_list[0]
something_else = hmma_list[1]
hmm_id = hmm_fam
if hmm_sf and hmm_sf.startswith("SF"):
hmm_id = hmm_fam + ':' + hmm_sf
return hmm_id | 35,323 |
def staff_member_required(view_func, redirect_field_name=REDIRECT_FIELD_NAME, login_url='admin:login'):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
return user_passes_test(
lambda u: u.is_active and u.is_staff,
login_url=login_url,
redirect_field_name=redirect_field_name
)(view_func) | 35,324 |
def get_event_listeners(ctx: Configuration) -> Dict:
"""List of events that is being listened for."""
try:
req = restapi(ctx, METH_GET, hass.URL_API_EVENTS)
return req.json() if req.status_code == 200 else {} # type: ignore
except (HomeAssistantCliError, ValueError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Unexpected result retrieving event listeners")
return {} | 35,325 |
def process_tce(tce):
"""Processes the light curve for a Kepler TCE and returns processed data
Args:
tce: Row of the input TCE table.
Returns:
Processed TCE data at each stage (flattening, folding, binning).
Raises:
IOError: If the light curve files for this Kepler ID cannot be found.
"""
# Read and process the light curve.
time, flattened_flux = preprocess.read_and_process_light_curve(tce.kepid, KEPLER_DATA_DIR)
time, folded_flux = preprocess.phase_fold_and_sort_light_curve(time, flattened_flux, tce.tce_period, tce.tce_time0bk)
# Generate the local and global views.
local_view = preprocess.local_view(time, folded_flux, tce.tce_period, tce.tce_duration, num_bins=201, bin_width_factor=0.16, num_durations=4)
global_view = preprocess.global_view(time, folded_flux, tce.tce_period, num_bins=2001, bin_width_factor=1 / 2001)
return flattened_flux, folded_flux, local_view, global_view | 35,326 |
def validate_config_yaml(config):
"""Validates a Project config YAML against the schema.
Args:
config (dict): The parsed contents of the project config YAML file.
Raises:
jsonschema.exceptions.ValidationError: if the YAML contents do not match the
schema.
"""
schema = read_yaml_file(_PROJECT_CONFIG_SCHEMA)
jsonschema.validate(config, schema) | 35,327 |
def _build_kwic(docs, search_tokens, context_size, match_type, ignore_case, glob_method, inverse,
highlight_keyword=None, with_metadata=False, with_window_indices=False,
only_token_masks=False):
"""
Helper function to build keywords-in-context (KWIC) results from documents `docs`.
:param docs: list of tokenized documents, optionally as 2-tuple where each element in `docs` is a tuple
of (tokens list, tokens metadata dict)
:param search_tokens: search pattern(s)
:param context_size: either scalar int or tuple (left, right) -- number of surrounding words in keyword context.
if scalar, then it is a symmetric surrounding, otherwise can be asymmetric
:param match_type: One of: 'exact', 'regex', 'glob'. If 'regex', `search_token` must be RE pattern. If `glob`,
`search_token` must be a "glob" pattern like "hello w*"
(see https://github.com/metagriffin/globre).
:param ignore_case: If True, ignore case for matching.
:param glob_method: If `match_type` is 'glob', use this glob method. Must be 'match' or 'search' (similar
behavior as Python's :func:`re.match` or :func:`re.search`).
:param inverse: Invert the matching results.
:param highlight_keyword: If not None, this must be a string which is used to indicate the start and end of the
matched keyword.
:param with_metadata: add document metadata to KWIC results
:param with_window_indices: add window indices to KWIC results
:param only_token_masks: return only flattened token masks for filtering
:return: list with KWIC results per document
"""
tokens = docs
if docs:
first_elem = next(iter(docs))
if isinstance(first_elem, tuple) and len(first_elem) == 2:
tokens = list(zip(*docs))[0]
# find matches for search criteria -> list of NumPy boolean mask arrays
matches = _token_pattern_matches(tokens, search_tokens, match_type=match_type,
ignore_case=ignore_case, glob_method=glob_method)
if not only_token_masks and inverse:
matches = [~m for m in matches]
left, right = context_size
kwic_list = []
for mask, dtok in zip(matches, docs):
if isinstance(dtok, tuple):
dtok, dmeta = dtok
else:
dmeta = None
dtok_arr = np.array(dtok, dtype=str)
ind = np.where(mask)[0]
ind_windows = make_index_window_around_matches(mask, left, right,
flatten=only_token_masks, remove_overlaps=True)
if only_token_masks:
assert ind_windows.ndim == 1
assert len(ind) <= len(ind_windows)
# from indices back to binary mask; this only works with remove_overlaps=True
win_mask = np.repeat(False, len(dtok))
win_mask[ind_windows] = True
if inverse:
win_mask = ~win_mask
kwic_list.append(win_mask)
else:
assert len(ind) == len(ind_windows)
windows_in_doc = []
for match_ind, win in zip(ind, ind_windows): # win is an array of indices into dtok_arr
tok_win = dtok_arr[win].tolist()
if highlight_keyword is not None:
highlight_mask = win == match_ind
assert np.sum(highlight_mask) == 1
highlight_ind = np.where(highlight_mask)[0][0]
tok_win[highlight_ind] = highlight_keyword + tok_win[highlight_ind] + highlight_keyword
win_res = {'token': tok_win}
if with_window_indices:
win_res['index'] = win
if with_metadata and dmeta is not None:
for meta_key, meta_vals in dmeta.items():
win_res[meta_key] = np.array(meta_vals)[win].tolist()
windows_in_doc.append(win_res)
kwic_list.append(windows_in_doc)
assert len(kwic_list) == len(docs)
return kwic_list | 35,328 |
def get_sqnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SQNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [[128, 256, 512], [256, 128, 96]]
init_block_channels = 96
layers = [2, 2, 3]
net = SQNet(
channels=channels,
init_block_channels=init_block_channels,
layers=layers,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net | 35,329 |
def filter_records(records,
arns_to_filter_for=None,
from_date=datetime.datetime(1970, 1, 1, tzinfo=pytz.utc),
to_date=datetime.datetime.now(tz=pytz.utc)):
"""Filter records so they match the given condition"""
result = list(pipe(records, filterz(_by_timeframe(from_date, to_date)), filterz(_by_role_arns(arns_to_filter_for))))
if not result and records:
logging.warning(ALL_RECORDS_FILTERED)
return result | 35,330 |
def _check_dataframe(dataframe, dtypes):
"""Assert dataframe columns are of the correct type."""
expected = dataframe.dtypes.apply(lambda x: x.name)
expected.name = "pandas_type"
expected.index.name = "column_name"
expected = expected.reset_index()
expected = expected.sort_values(by="column_name", ignore_index=True)
actual = dtypes["pandas_type"].reset_index()
actual = actual.sort_values(by="column_name", ignore_index=True)
assert actual.equals(expected) | 35,331 |
def generate_prob_matrix(A: int, D: int)\
-> Tuple[Dict[Tuple[int, int], int], Dict[Tuple[int, int], int], np.ndarray]:
"""Generate the probability outcome matrix"""
transient_state, absorbing_state = generate_states(A, D)
transient_state_lookup = {s: i for i, s in enumerate(transient_state)}
absorbing_state_lookup = {s: i for i, s in enumerate(absorbing_state)}
transient_length, absorbing_length = len(transient_state), len(absorbing_state)
# Add probability to transition elements
Qrow = []
Qcol = []
Qdata = []
Rrow = []
Rcol = []
Rdata = []
for i, (a, d) in enumerate(transient_state):
max_deaths = 2 if a > 1 and d > 1 else 1
for dl in range(0, max_deaths + 1):
al = max_deaths - dl
na, nd = a - al, d - dl
if a - al > 0 and d - dl > 0:
Qrow.append(i)
Qcol.append(transient_state_lookup[(na, nd)])
Qdata.append(probable_outcome(min(a, 3), min(d, 2), dl))
else:
Rrow.append(i)
Rcol.append(absorbing_state_lookup[(na, nd)])
Rdata.append(probable_outcome(min(a, 3), min(d, 2), dl))
Q = csc_matrix((Qdata, (Qrow, Qcol)), shape=(transient_length, transient_length))
R = csc_matrix((Rdata, (Rrow, Rcol)), shape=(transient_length, absorbing_length))
iden = identity(transient_length)
F = inv(iden - Q) * R
return transient_state_lookup, absorbing_state_lookup, F | 35,332 |
def test_common():
"""Test that all the attributions method works as explainer"""
input_shape, nb_labels, samples = ((16, 16, 3), 10, 20)
x, y = generate_data(input_shape, nb_labels, samples)
model = generate_model(input_shape, nb_labels)
explainers = _default_methods(model)
metrics = [
Deletion(model, x, y, steps=3),
Insertion(model, x, y, steps=3),
MuFidelity(model, x, y, nb_samples=3),
AverageStability(model, x, y, nb_samples=3)
]
for explainer in explainers:
explanations = explainer(x, y)
for metric in metrics:
assert hasattr(metric, 'evaluate')
if isinstance(metric, ExplainerMetric):
score = metric(explainer)
else:
score = metric(explanations)
print(f"\n\n\n {type(score)} \n\n\n")
assert type(score) in [np.float32, np.float64, float] | 35,333 |
def FileTemplateGetNext():
"""
Get the preset template file name that will be used in the next Save/Load action, if it has been preset
Output: Template file name that will be used in the next Save/Load action. Returned Variable will be empty if no next template file name is preset.
"""
pass | 35,334 |
def _prepare_func(app_id, run_id, train_fn, args_dict, local_logdir):
"""
Args:
app_id:
run_id:
train_fn:
args_dict:
local_logdir:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
experiment_utils._set_ml_id(app_id, run_id)
tb_hdfs_path = ''
hdfs_exec_logdir = experiment_utils._get_logdir(app_id, run_id)
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
try:
#Arguments
if args_dict:
param_string, params, args = experiment_utils.build_parameters(train_fn, executor_num, args_dict)
hdfs_exec_logdir, hdfs_appid_logdir = experiment_utils._create_experiment_subdirectories(app_id, run_id, param_string, 'grid_search', params=params)
logfile = experiment_utils._init_logger(hdfs_exec_logdir)
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir)
print(devices._get_gpu_info())
print('-------------------------------------------------------')
print('Started running task ' + param_string)
task_start = time.time()
retval = train_fn(*args)
task_end = time.time()
experiment_utils._handle_return_simple(retval, hdfs_exec_logdir, logfile)
time_str = 'Finished task ' + param_string + ' - took ' + experiment_utils._time_diff(task_start, task_end)
print(time_str)
print('-------------------------------------------------------')
else:
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_exec_logdir, executor_num, local_logdir=local_logdir)
logfile = experiment_utils._init_logger(hdfs_exec_logdir)
print(devices._get_gpu_info())
print('-------------------------------------------------------')
print('Started running task')
task_start = time.time()
retval = train_fn()
task_end = time.time()
experiment_utils._handle_return_simple(retval, hdfs_exec_logdir, logfile)
time_str = 'Finished task - took ' + experiment_utils._time_diff(task_start, task_end)
print(time_str)
print('-------------------------------------------------------')
except:
raise
finally:
experiment_utils._cleanup(tensorboard, t)
return _wrapper_fun | 35,335 |
def convert_camel_to_snake(string, remove_non_alphanumeric=True):
"""
converts CamelCase to snake_case
:type string: str
:rtype: str
"""
if remove_non_alphanumeric:
string = remove_non_alpha(string, replace_with='_', keep_underscore=True)
s1 = _first_cap_re.sub(r'\1_\2', string)
result = _all_cap_re.sub(r'\1_\2', s1).lower()
result = re.sub(pattern='\s*_+', repl="_", string=result)
return result | 35,336 |
def log_to_syslog(message: Message) -> None:
"""
Writes logs to syslog.
Parameters:
message (Message): Message that contains log information.
Returns:
None
"""
syslog.syslog(message.status.value, message.detail) | 35,337 |
async def configure_hacs(hass, configuration, hass_config_dir):
"""Configure HACS."""
from .aiogithub import AIOGitHub
from .hacsbase import HacsBase as hacs
from .hacsbase.configuration import HacsConfiguration
from .hacsbase.data import HacsData
from . import const as const
from .hacsbase import const as hacsconst
from .hacsbase.migration import HacsMigration
#from .hacsbase.storage import HacsStorage
hacs.config = HacsConfiguration(configuration)
if hacs.config.appdaemon:
ELEMENT_TYPES.append("appdaemon")
if hacs.config.python_script:
ELEMENT_TYPES.append("python_script")
if hacs.config.theme:
ELEMENT_TYPES.append("theme")
# Print DEV warning
if hacs.config.dev:
_LOGGER.error(
const.DEV_MODE
)
hass.components.persistent_notification.create(
title="HACS DEV MODE",
message=const.DEV_MODE,
notification_id="hacs_dev_mode"
)
hacs.migration = HacsMigration()
#hacs.storage = HacsStorage()
hacs.aiogithub = AIOGitHub(
hacs.config.token, hass.loop, async_create_clientsession(hass)
)
hacs.hacs_github = await hacs.aiogithub.get_repo("custom-components/hacs")
hacs.hass = hass
hacs.const = const
hacs.hacsconst = hacsconst
hacs.config_dir = hass_config_dir
hacs.store = HacsData(hass_config_dir)
hacs.store.restore_values()
hacs.element_types = sorted(ELEMENT_TYPES) | 35,338 |
def attrdict(d: dict) -> AttrDict:
"""Add attribute access to a dict.
This function takes a dict with nested dicts as input and convert into an AttrDict
object which allows attribute access to keys.
Returns:
A dict-like object with attribute access to keys.
"""
def addattrs(d):
if not isinstance(d, dict):
return d
obj = AttrDict()
for k in d:
obj[k] = obj.__dict__[k] = addattrs(d[k])
return obj
obj = AttrDict()
obj.update(d)
obj.__dict__.update(addattrs(d))
return obj | 35,339 |
def form(cik, year):
"""Returns form 13F for specified CIK number. From https://fmpcloud.io/documentation#thirteenForm
Input:
cik : CIK number for which you'd like the 13F form
year = year for which you'd like the 13F form.
Returns:
Form 13F for specified company
"""
urlroot = settings.get_urlroot()
apikey = settings.get_apikey()
url = urlroot + "form-thirteen/" + cik + "?year=" + year + "&apikey=" + apikey
response = urlopen(url)
data = response.read().decode("utf-8")
return safe_read_json(data) | 35,340 |
def check_bel_script_line_by_line(bel_script_path, error_report_file_path, bel_version):
"""Check statements in file or string for correct.
result['trees'][line_number] = {'statement': statement, 'tree': tree}
result['errors'][line_number] = {'statement': statement, 'error': ex}
# can be used as comments
empty lines will be skipped
every statement should be in a new line
:param str bel_script_path: path to BEL script
:param str error_report_file_path: path to report
:param str bel_version: BEL version
:return: dict
"""
parser = _BELParser()
data_frame = parser.check_bel_script_line_by_line(bel_script_path, bel_version)
if error_report_file_path:
write_error_report(data_frame=data_frame, file_path=error_report_file_path)
else:
return data_frame | 35,341 |
def create(cls, **data):
"""Create a single instance of a Resource.
Arguments:
cls (Resource class): The resource to create.
All other keyword arguments will be provided to the request
when POSTing. For example::
create(Foo, name="bar", email="baz@foo.com")
...would try to create an instance of the Foo resource
with a name set to "bar" and an email set to "baz@foo.com".
"""
instance = cls()
instance.run_validation(data)
response = request("post", url=cls.get_collection_url(), data=data)
try:
return utils.parse_resources(cls=cls, response=response, many=False)
except IndexError:
return None | 35,342 |
def mixup_batch(holder):
"""
Mixup samples in an array
Inputs:
holder: list of datapoint preprocessed with format ['images', 'weights', 'labels']
Outputs:
mixup_samples: Samples have been mixed up from list of sample
"""
gamma = np.random.beta(0.4, 0.4)
mixup_samples = []
for i in range(self.batch_size):
mixup_samples.append(mixup(holder[i], holder[i + 1])) | 35,343 |
def load_test_data(intop_order=1):
"""
Load testing data from TEST_PATH.
"""
print('Loading testing data')
img_name = TEST_PATH + '/image_test' + str(IMG_HEIGHT) + '_' \
+ str(IMG_WIDTH) + '_' + str(intop_order) + '.npy'
reload_flag = os.path.exists(img_name) and \
os.path.exists(TEST_PATH + '/size_test.npy') and \
os.path.exists(TEST_PATH + '/test_ids.bin')
if (reload_flag): # Reload resized images(from numpy file) if exist.
print('Loading exist numpy files')
image_test = np.load(img_name)
size_test = np.load(TEST_PATH + '/size_test.npy')
with open(TEST_PATH + '/test_ids.bin', 'rb') as file:
test_ids = pickle.load(file)
print('Loading finished')
else:# If not, read raw images then resize them.
test_ids = next(os.walk(TEST_PATH))[1]
test_num = len(test_ids)
image_test = np.zeros((test_num, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
size_test = np.zeros((test_num, 2), dtype=np.int32)
print('Getting and resizing images and masks')
for n, id in tqdm(enumerate(test_ids), total=len(test_ids)):
image_path = TEST_PATH + '/' + id + '/images'
img = imread(image_path + '/' + id + '.png')
if len(img.shape) == 2:
img_t = np.zeros((img.shape[0], img.shape[1], IMG_CHANNELS), dtype=np.uint8)
img_t[:, :, 0] = img
img_t[:, :, 1] = img
img_t[:, :, 2] = img
img = img_t
img = img[:, :, :IMG_CHANNELS]
size_test[n, :] = img.shape[:2]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True, order=intop_order)
image_test[n, :, :, :] = img
np.save(img_name, image_test)
np.save('../data/stage1_test/size_test', size_test)
with open('../data/stage1_test/test_ids.bin', 'wb') as file:
pickle.dump(test_ids, file)
return image_test, size_test, test_ids | 35,344 |
def get_cluster_role_template_binding(cluster_id=None,name=None,role_template_id=None,opts=None):
"""
Use this data source to retrieve information about a Rancher v2 cluster role template binding.
> This content is derived from https://github.com/terraform-providers/terraform-provider-rancher2/blob/master/website/docs/d/clusterRole.html.markdown.
:param str cluster_id: The cluster id where bind cluster role template (string)
:param str name: The name of the cluster role template binding (string)
:param str role_template_id: The role template id from create cluster role template binding (string)
"""
__args__ = dict()
__args__['clusterId'] = cluster_id
__args__['name'] = name
__args__['roleTemplateId'] = role_template_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('rancher2:index/getClusterRoleTemplateBinding:getClusterRoleTemplateBinding', __args__, opts=opts).value
return AwaitableGetClusterRoleTemplateBindingResult(
annotations=__ret__.get('annotations'),
cluster_id=__ret__.get('clusterId'),
group_id=__ret__.get('groupId'),
group_principal_id=__ret__.get('groupPrincipalId'),
id=__ret__.get('id'),
labels=__ret__.get('labels'),
name=__ret__.get('name'),
role_template_id=__ret__.get('roleTemplateId'),
user_id=__ret__.get('userId'),
user_principal_id=__ret__.get('userPrincipalId')) | 35,345 |
def np_where(cond, x, y):
"""
Wrap np.where() to allow for keyword arguments
"""
return np.where(cond, x, y) | 35,346 |
def score_feedback_comp_micro_shujun(pred_df, gt_df, discourse_type):
"""
A function that scores for the kaggle
Student Writing Competition
Uses the steps in the evaluation page here:
https://www.kaggle.com/c/feedback-prize-2021/overview/evaluation
"""
gt_df = gt_df.loc[gt_df['discourse_type'] == discourse_type,
['id', 'predictionstring']].reset_index(drop=True)
pred_df = pred_df.loc[pred_df['class'] == discourse_type,
['id', 'predictionstring']].reset_index(drop=True)
pred_df['pred_id'] = pred_df.index
gt_df['gt_id'] = gt_df.index
pred_df['predictionstring'] = [(int(pred.split(' ')[0]),int(pred.split(' ')[-1])) for pred in pred_df['predictionstring']]
gt_df['predictionstring'] = [(int(pred.split(' ')[0]),int(pred.split(' ')[-1])) for pred in gt_df['predictionstring']]
# print(pred_df[pred_df['predictionstring']!=pred_df['predictionstring']])
# exit()
#gt_strings=
# Step 1. all ground truths and predictions for a given class are compared.
joined = pred_df.merge(gt_df,
left_on='id',
right_on='id',
how='outer',
suffixes=('_pred','_gt')
)
overlaps = [calc_overlap_shujun(*args) for args in zip(list(joined.predictionstring_pred),
list(joined.predictionstring_gt))]
# 2. If the overlap between the ground truth and prediction is >= 0.5,
# and the overlap between the prediction and the ground truth >= 0.5,
# the prediction is a match and considered a true positive.
# If multiple matches exist, the match with the highest pair of overlaps is taken.
# we don't need to compute the match to compute the score
TP = joined.loc[overlaps]['gt_id'].nunique()
# 3. Any unmatched ground truths are false negatives
# and any unmatched predictions are false positives.
TPandFP = len(pred_df)
TPandFN = len(gt_df)
#calc microf1
my_f1_score = 2*TP / (TPandFP + TPandFN)
return my_f1_score | 35,347 |
def _url_for_language_resolve_view(url, new_language):
"""
Figure out the new URL by resolving the old URL and re-reversing it using
the new language.
"""
view = urlresolvers.resolve(url)
with language_context(new_language):
new_url = urlresolvers.reverse(view.url_name, args=view.args, kwargs=view.kwargs)
return new_url | 35,348 |
def rsp_matrix(m,k):
"""
Description: This function creates the matrix used for finding the parameters of reals signal perceptron using a system of linear equations
is_Implemented:
True
Args:
(m:int): The domain size , the amount of possible variables that each variable can take
(k:int): The arity, the amount of variables that each signal can recieve
Shape:
- Input: integers that define the functional space
- Output: a matrix of m
Examples::
matrix = rsp_matrix(2,2)
print(matrix)
[[0,0,0,0],[0,1,0,0],[0,0,1,0],[1,1]]
"""
aix=np.zeros([k]); #Array of indexes (to order them)
aiw=np.zeros([k]); #Array of indexes (to order them)
ni=m**k #Number of Iterations
n=k #No. of variables
nn=m**n #|m^k| domain space
nnn=m**nn #|Delta|=|m^m^k| function space
# Matrix
A=np.zeros([nn,nn],dtype=np.float32)
divfrec=m-1
i=0; j=0
v=0;
for xi in range(0,ni,1):
kx=xi;
for xj in range(0,k,1):
aix[xj]= int ( kx % m );
kx=int(kx/m);
#print("aix=",aix)
j=0;
#First Inner nested loop that generates all combinations of w for a signal
for wi in range(0,ni,1):
kw=wi;
for wj in range(0,k,1): #Generamos los índices
aiw[wj]= int ( kw % m ) ; #Lo metemos en array
kw=int(kw/m); #siguientes índices
#print(i,j,A[i,j],"|",end='')
exponente=0
#Seconf Inner loop that multiplies and sums
for ii in range(0,k,1):
exponente=exponente + aix[ii]*aiw[ii]
exponente=int(exponente)
#print("exponente=",exponente)
exponente=np.pi*exponente/divfrec
#print(exponente)
#print(np.exp(exponente))
A[i,j]=np.cos(exponente)
#print(A[i,j])
j=j+1
#print("aiw=",aiw,"j=",j)
#for aj in range(0,nc,1):
# print(i,j,A[i,j],"|",end='')
# print()
i=i+1
return A | 35,349 |
def pretty_format_dict(dct):
"""
Parameters
----------
dct: dict[Any, Any]
Returns
-------
str
"""
return "{}".format(json.dumps(dct, indent=4)) | 35,350 |
def report_account_status(context):
"""报告账户持仓状态"""
logger = context.logger
latest_dt = context.now.strftime(r"%Y-%m-%d %H:%M:%S")
logger.info("=" * 30 + f" 账户状态【{latest_dt}】 " + "=" * 30)
account = context.account()
cash = account.cash
positions = account.positions(symbol="", side="")
cash_report = f"净值:{int(cash.nav)},可用资金:{int(cash.available)}," \
f"浮动盈亏:{int(cash.fpnl)},标的数量:{len(positions)}"
logger.info(cash_report)
for p in positions:
p_report = f"持仓标的:{p.symbol},数量:{p.volume},成本:{round(p.vwap, 2)},方向:{p.side}," \
f"当前价:{round(p.price, 2)},成本市值:{int(p.volume * p.vwap)},建仓时间:{p.created_at}"
logger.info(p_report) | 35,351 |
def extract_encodings(
args, text_file, tok_file, embed_file, lang="en", max_seq_length=None
):
"""Get final encodings (not all layers, as extract_embeddings does)."""
embed_file_path = f"{embed_file}.npy"
if os.path.exists(embed_file_path):
logger.info("loading file from {}".format(embed_file_path))
return load_embeddings(embed_file_path)
config, model, tokenizer, langid = load_model(
args, lang, output_hidden_states=False
)
sent_toks = tokenize_text(text_file, tok_file, tokenizer, lang)
max_length = max([len(s) for s in sent_toks])
logger.info("max length of tokenized text = {}".format(max_length))
batch_size = args.batch_size
num_batch = int(np.ceil(len(sent_toks) * 1.0 / batch_size))
num_sents = len(sent_toks)
embeds = np.zeros(shape=(num_sents, args.embed_size), dtype=np.float32)
for i in tqdm(range(num_batch), desc="Batch"):
start_index = i * batch_size
end_index = min((i + 1) * batch_size, num_sents)
# If given, custom sequence length overrides the args value.
max_seq_length = max_seq_length or args.max_seq_length
batch, pool_mask = prepare_batch(
sent_toks[start_index:end_index],
tokenizer,
args.model_type,
args.device,
max_seq_length,
lang=lang,
langid=langid,
pool_skip_special_token=args.pool_skip_special_token,
)
with torch.no_grad():
if args.model_type in ("bert-retrieval", "xlmr-retrieval"):
batch["inference"] = True
outputs = model(**batch)
batch_embeds = outputs
else:
logger.fatal(
"Unsupported model-type '%s' - "
"perhaps extract_embeddings() must be used?"
)
embeds[start_index:end_index] = (
batch_embeds.cpu().numpy().astype(np.float32)
)
torch.cuda.empty_cache()
if embed_file is not None:
logger.info("save embed {} to file {}".format(embeds.shape, embed_file_path))
np.save(embed_file_path, embeds)
return embeds | 35,352 |
def get_default_release():
# type: () -> Optional[str]
"""Try to guess a default release."""
release = os.environ.get("SENTRY_RELEASE")
if release:
return release
with open(os.path.devnull, "w+") as null:
try:
release = (
subprocess.Popen(
["git", "rev-parse", "HEAD"],
stdout=subprocess.PIPE,
stderr=null,
stdin=null,
)
.communicate()[0]
.strip()
.decode("utf-8")
)
except (OSError, IOError):
pass
if release:
return release
for var in (
"HEROKU_SLUG_COMMIT",
"SOURCE_VERSION",
"CODEBUILD_RESOLVED_SOURCE_VERSION",
"CIRCLE_SHA1",
"GAE_DEPLOYMENT_ID",
):
release = os.environ.get(var)
if release:
return release
return None | 35,353 |
def session_try_readonly(dbtype, dbfile, echo=False):
"""Creates a read-only session to an SQLite database.
If read-only sessions are not supported by the underlying sqlite3 python DB
driver, then a normal session is returned. A warning is emitted in case the
underlying filesystem does not support locking properly.
Raises:
NotImplementedError: if the dbtype is not supported.
"""
if dbtype != 'sqlite':
raise NotImplementedError(
"Read-only sessions are only currently supported for SQLite databases")
connector = SQLiteConnector(dbfile, readonly=True, lock='unix-none')
return connector.session(echo=echo) | 35,354 |
def generate_lab_saliva(directory, file_date, records):
"""
Generate lab saliva file.
"""
lab_saliva_description = (
lambda: {
'ORDPATNAME': _('random.custom_code', mask='SIS########', digit='#'),
'SAMPLEID': _('random.custom_code', mask='H#########', digit='#'),
'IgG Capture Result': _('choice', items=['#r', '#n', '#e'])
}
)
schema = Schema(schema=lab_saliva_description)
lab_saliva = pd.DataFrame(schema.create(iterations=records))
lab_saliva.to_csv(directory / f"lab_saliva_{file_date}.csv", index=False)
return lab_saliva | 35,355 |
def clear_screen(tty=""):
"""Clear the screen."""
global __gef_redirect_output_fd__
if not tty:
gdb.execute("shell clear -x")
return
# Since the tty can be closed at any time, a PermissionError exception can
# occur when `clear_screen` is called. We handle this scenario properly
try:
with open(tty, "wt") as f:
f.write("\x1b[H\x1b[J")
except PermissionError:
__gef_redirect_output_fd__ = None
set_gef_setting("context.redirect", "")
return | 35,356 |
def run_identical_doubleint_2D(dx, du, statespace, x0, ltidyn, poltrack, apol,
assignment_epoch, nagents, ntargets, collisions, collision_tol, dt=0.01, maxtime=10):
""" Setup the engine and simulation scenario
Input:
- dx: agent statesize
- du: agent control input size
- statespace: dict describing agent position, velocity etc. components
- x0: initial agent, target, target terminal states
- ltidyn: agent dynamics model (homogeneous across agent swarm)
- dyn_target: list of target dynamic models
- poltrack: agent control policy (homogeneous across agent swarm)
- poltargets: list of target control policies
- apol: assignment policy
- assignment_epoch: number of ticks at which to perform assignment
- nagents: number of agents
- ntarges: number of targets
- collisions: collisions on/off
- collision_tol:abosolute distance between an agent and tolerance to count as collision
- dt: engine tick size
- maxtime: simulation time
Output: Returns simulation results and diagnostics
"""
dim = 2
agents = [ag.TrackingAgent(dx, du, statespace, dim, ltidyn, poltrack[ii]) for ii in range(nagents)]
targets = [ag.Point(dx, du, statespace, dim) for ii in range(ntargets)]
# setup the scenario and engine
sys = systems.OneVOneFormation(agents, targets, apol, assignment_epoch)
eng = engine.Engine(dim=dim, dt=dt, maxtime=maxtime, collisions=collisions, collision_tol=collision_tol)
# TODO time the simulation
start_run_time = process_time()
eng.run(x0, sys)
elapsed_run_time = process_time() - start_run_time
opt_asst = sys.optimal_assignment
# post processing
polagents = [agent.pol for agent in agents]
# TODO
output = [agents, targets, eng.df, poltrack, nagents, ntargets, sys.costs, polagents, opt_asst, apol]
### diagnostics
runtime_diagnostics = eng.diagnostics
runtime = pd.DataFrame([elapsed_run_time])
runtime_diagnostics = pd.concat([runtime_diagnostics, runtime], axis=1, ignore_index=True)
diagnostics = [runtime_diagnostics]
return output, diagnostics | 35,357 |
def rebuild_field_path(sort_field, resource):
"""
convert dot connected fields into a valid field reference
:param sort_field:
:return: path_to_field
"""
sorted = strip_sort_indicator(sort_field)
split_sorted = sorted.split()
sort_with_this = ""
for s in split_sorted:
if s in resource:
sort_with_this = sort_with_this + s
return sort_with_this | 35,358 |
def load_data(pkl_paths, use_attr, no_img, batch_size, uncertain_label=False, n_class_attr=2, image_dir='images', resampling=False, resol=299):
"""
Note: Inception needs (299,299,3) images with inputs scaled between -1 and 1
Loads data with transformations applied, and upsample the minority class if there is class imbalance and weighted loss is not used
NOTE: resampling is customized for first attribute only, so change sampler.py if necessary
"""
resized_resol = int(resol * 256/224)
is_training = any(['train.pkl' in f for f in pkl_paths])
if is_training:
transform = transforms.Compose([
#transforms.Resize((resized_resol, resized_resol)),
#transforms.RandomSizedCrop(resol),
transforms.ColorJitter(brightness=32/255, saturation=(0.5, 1.5)),
transforms.RandomResizedCrop(resol),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), #implicitly divides by 255
transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [2, 2, 2])
#transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ], std = [ 0.229, 0.224, 0.225 ]),
])
else:
transform = transforms.Compose([
#transforms.Resize((resized_resol, resized_resol)),
transforms.CenterCrop(resol),
transforms.ToTensor(), #implicitly divides by 255
transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [2, 2, 2])
#transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ], std = [ 0.229, 0.224, 0.225 ]),
])
dataset = CUBDataset(pkl_paths, use_attr, no_img, uncertain_label, image_dir, n_class_attr, transform)
if is_training:
drop_last = True
shuffle = True
else:
drop_last = False
shuffle = False
if resampling:
sampler = BatchSampler(ImbalancedDatasetSampler(dataset), batch_size=batch_size, drop_last=drop_last)
loader = DataLoader(dataset, batch_sampler=sampler)
else:
loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last)
return loader | 35,359 |
def scalar_function(x, y):
"""
Returns the f(x,y) defined in the problem statement.
"""
#Your code here
if x <= y:
out = x*y
else:
out = x/y
return out
raise NotImplementedError | 35,360 |
def getImageParticles(imagedata,stackid,noDie=True):
"""
Provided a Stack Id & imagedata, to find particles
"""
particleq = appiondata.ApParticleData(image=imagedata)
stackpdata = appiondata.ApStackParticleData()
stackpdata['particle'] = particleq
stackpdata['stack'] = appiondata.ApStackData.direct_query(stackid)
stackps = stackpdata.query()
particles = []
if not stackps:
if noDie is True:
return particles,None
apDisplay.printError("partnum="+str(particleid)+" was not found in stackid="+str(stackid))
for stackp in stackps:
particles.append(stackp['particle'])
return particles,stackps | 35,361 |
def build_evaluation(
resource_id,
compliance_type,
event,
resource_type=DEFAULT_RESOURCE_TYPE,
annotation=None,
):
"""Form an evaluation as a dictionary. Usually suited to report on scheduled rules.
Keyword arguments:
resource_id -- the unique id of the resource to report
compliance_type -- either COMPLIANT, NON_COMPLIANT or NOT_APPLICABLE
event -- the event variable given in the lambda handler
resource_type -- the CloudFormation resource type (or AWS::::Account) to report on the rule
(default DEFAULT_RESOURCE_TYPE)
annotation -- an annotation to be added to the evaluation (default None). It will be truncated to 255 if longer.
"""
eval_cc = {}
if annotation:
eval_cc["Annotation"] = build_annotation(annotation)
eval_cc["ComplianceResourceType"] = resource_type
eval_cc["ComplianceResourceId"] = resource_id
eval_cc["ComplianceType"] = compliance_type
eval_cc["OrderingTimestamp"] = str(
json.loads(event["invokingEvent"])["notificationCreationTime"]
)
return eval_cc | 35,362 |
def match_countries(df_to_match, olympics):
"""Changes the names of the countries in the df_to_match df so that they match
the names of the countries in the olympics df.
Parameters
-----------
df_to_match : either of the two dataframes:
- gdp
- pop
olympics : the olympics dataframe
Returns
-----------
df_to_match : the dataframe given as first parameter that now its countries
match the countries in the olympics df
common_countries : a list with the common countries in the two dataframes
"""
# countries in the to_match df
df_countries = set(df_to_match.columns.tolist())
# countries in the olympics df
ol_regions = set(sorted(olympics.region.unique().tolist()))
# countries in the to_match df that are not in the olympics df
not_in_ol = df_countries.difference(ol_regions)
# countries in the olympics df that are not in the to_match df
not_in_df = ol_regions.difference(df_countries)
# After printing not_in_il and not_int_df, we see that some countries are simply named differently
# Therefore, I renames these countries in the to_match df so that they match the countries from the olympics df
df_to_match.rename(columns={"United States": "USA",
"United Kingdom": "UK",
"Antigua and Barbuda": "Antigua",
"Congo, Dem. Rep.": "Democratic Republic of the Congo",
"Lao": "Laos",
"North Macedonia": "Macedonia",
"Cote d'Ivoire": "Ivory Coast",
"Trinidad and Tobago": "Trinidad",
"Micronesia, Fed. Sts.": "Micronesia",
"St. Vincent and the Grenadines": "Saint Vincent",
"St. Lucia": "Saint Lucia",
"St. Kitts and Nevis": "Saint Kitts",
"Slovak Republic": "Slovakia",
"Kyrgyz Republic": "Kyrgyzstan",
"Bolivia": "Boliva",
"Congo, Rep.": "Republic of Congo"},
inplace=True)
# Check which countries still remain unmatched
df_countries = set(df_to_match.columns.tolist())
ol_regions = set(sorted(olympics.region.unique().tolist()))
# Countries in the to_match df that are still not in the olympics df
not_in_ol = df_countries.difference(ol_regions)
# Countries in the olympics df that are still not in the to_match df
not_in_df = ol_regions.difference(df_countries)
# Printing not_in_ol and not_in_df shows which countries are still not matched. Used as a check.
# save the resulting common countries
common_countries = ol_regions.intersection(df_countries)
return df_to_match, common_countries | 35,363 |
def main():
"""
Entry point
"""
results = [i for i in range(2, 1000000) if equals_sum_fifth_powers(i)]
print(f"Total: {sum(results)} (values: {results})")
return | 35,364 |
def get_reply(session, url, post=False, data=None, headers=None, quiet=False):
"""
Download an HTML page using the requests session. Low-level function
that allows for flexible request configuration.
@param session: Requests session.
@type session: requests.Session
@param url: URL pattern with optional keywords to format.
@type url: str
@param post: Flag that indicates whether POST request should be sent.
@type post: bool
@param data: Payload data that is sent with request (in request body).
@type data: object
@param headers: Additional headers to send with request.
@type headers: dict
@param quiet: Flag that tells whether to print error message when status
code != 200.
@type quiet: bool
@return: Requests response.
@rtype: requests.Response
"""
request_headers = {} if headers is None else headers
request = requests.Request('POST' if post else 'GET',
url,
data=data,
headers=request_headers)
prepared_request = session.prepare_request(request)
reply = session.send(prepared_request)
try:
reply.raise_for_status()
except requests.exceptions.HTTPError as e:
if not quiet:
logging.error("Error %s getting page %s", e, url)
logging.error("The server replied: %s", reply.text)
raise
return reply | 35,365 |
def get_stock_rack_size():
"""
Returns the number of available positions in a stock rack.
"""
return get_stock_rack_shape().size | 35,366 |
def is_valid_constant_type(x):
"""
@return: True if the name is a legal constant type. Only simple types are allowed.
@rtype: bool
"""
return x in PRIMITIVE_TYPES | 35,367 |
def logsumexp(tensor: torch.Tensor, dim: int = -1, keepdim: bool = False) -> torch.Tensor:
"""
A numerically stable computation of logsumexp.
This is mathematically equivalent to `tensor.exp().sum(dim, keep=keepdim).log()`.
This function is typically used for summing log probabilities.
Parameters
----------
tensor : `torch.FloatTensor`, required.
A tensor of arbitrary size.
dim : `int`, optional (default = `-1`)
The dimension of the tensor to apply the logsumexp to.
keepdim: `bool`, optional (default = `False`)
Whether to retain a dimension of size one at the dimension we reduce over.
"""
max_score, _ = tensor.max(dim, keepdim=keepdim)
stable_vec = tensor - max_score if keepdim else tensor - max_score.unsqueeze(dim)
return max_score + stable_vec.exp().sum(dim, keepdim=keepdim).log() | 35,368 |
def testmod(module=None, run=True, optionflags=None,):
"""
Tests a doctest modules with numba functions. When run in nosetests, only
populates module.__test__, when run as main, runs the doctests.
"""
if module is None:
mod_globals = sys._getframe(1).f_globals
modname = mod_globals['__name__']
module = __import__(modname)
# module = types.ModuleType(modname)
# vars(module).update(mod_globals)
fix_module_doctest_py3(module)
doctest_support.testmod(module, run_doctests=run) | 35,369 |
def test_check_whitespace_glyphnames():
""" Font has **proper** whitespace glyph names? """
check = CheckTester(universal_profile,
"com.google.fonts/check/whitespace_glyphnames")
def deleteGlyphEncodings(font, cp):
""" This routine is used on to introduce errors
in a given font by removing specific entries
in the cmap tables.
"""
for subtable in font['cmap'].tables:
if subtable.isUnicode():
subtable.cmap = {
codepoint: name for codepoint, name in subtable.cmap.items()
if codepoint != cp
}
def editCmap(font, cp, name):
""" Corrupt the cmap by changing the glyph name
for a given code point.
"""
for subtable in font['cmap'].tables:
if subtable.isUnicode():
# Copy the map
subtable.cmap = subtable.cmap.copy()
# edit it
subtable.cmap[cp] = name
# Our reference Mada Regular font is good here:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
assert_PASS(check(ttFont),
'with a good font...')
value = ttFont["post"].formatType
ttFont["post"].formatType = 3.0
assert_SKIP(check(ttFont),
'with post.formatType == 3.0 ...')
# restore good value:
ttFont["post"].formatType = value
deleteGlyphEncodings(ttFont, 0x0020)
assert_results_contain(check(ttFont),
FAIL, 'missing-0020',
'with missing glyph name for char 0x0020 ...')
# restore the original font object in preparation for the next test-case:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
deleteGlyphEncodings(ttFont, 0x00A0)
assert_results_contain(check(ttFont),
FAIL, 'missing-00a0',
'with missing glyph name for char 0x00A0 ...')
# restore the original font object in preparation for the next test-case:
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
# See https://github.com/googlefonts/fontbakery/issues/2624
# nbsp is not Adobe Glyph List compliant.
editCmap(ttFont, 0x00A0, "nbsp")
assert_results_contain(check(ttFont),
FAIL, 'non-compliant-00a0',
'with not AGL-compliant glyph name "nbsp" for char 0x00A0...')
editCmap(ttFont, 0x00A0, "nbspace")
assert_results_contain(check(ttFont),
WARN, 'not-recommended-00a0',
'for naming 0x00A0 "nbspace"...')
editCmap(ttFont, 0x0020, "foo")
assert_results_contain(check(ttFont),
FAIL, 'non-compliant-0020',
'with not AGL-compliant glyph name "foo" for char 0x0020...')
editCmap(ttFont, 0x0020, "uni0020")
assert_results_contain(check(ttFont),
WARN, 'not-recommended-0020',
'for naming 0x0020 "uni0020"...')
editCmap(ttFont, 0x0020, "space")
editCmap(ttFont, 0x00A0, "uni00A0")
assert_PASS(check(ttFont)) | 35,370 |
def shrink_piecwise_linear(r,rvar,theta):
"""Implement the piecewise linear shrinkage function.
With minor modifications and variance normalization.
theta[...,0] : abscissa of first vertex, scaled by sqrt(rvar)
theta[...,1] : abscissa of second vertex, scaled by sqrt(rvar)
theta[...,2] : slope from origin to first vertex
theta[''',3] : slope from first vertex to second vertex
theta[...,4] : slope after second vertex
"""
ab0 = theta[...,0]
ab1 = theta[...,1]
sl0 = theta[...,2]
sl1 = theta[...,3]
sl2 = theta[...,4]
# scale each column by sqrt(rvar)
scale_out = tf.sqrt(rvar)
scale_in = 1/scale_out
rs = tf.sign(r*scale_in)
ra = tf.abs(r*scale_in)
# split the piecewise linear function into regions
rgn0 = tf.to_float( ra<ab0)
rgn1 = tf.to_float( ra<ab1) - rgn0
rgn2 = tf.to_float( ra>=ab1)
xhat = scale_out * rs*(
rgn0*sl0*ra +
rgn1*(sl1*(ra - ab0) + sl0*ab0 ) +
rgn2*(sl2*(ra - ab1) + sl0*ab0 + sl1*(ab1-ab0) )
)
dxdr = sl0*rgn0 + sl1*rgn1 + sl2*rgn2
dxdr = tf.reduce_mean(dxdr,0)
return (xhat,dxdr) | 35,371 |
def LoadAuth(decoratee):
"""Decorator to check if the auth is valid and loads auth if not."""
@wraps(decoratee)
def _decorated(self, *args, **kwargs):
if self.auth is None: # Initialize auth if needed.
self.auth = GoogleAuth()
if self.auth.access_token_expired:
self.auth.LocalWebserverAuth()
if self.auth.service is None: # Check if drive api is built.
self.auth.Authorize()
return decoratee(self, *args, **kwargs)
return _decorated | 35,372 |
def cols_with_nulls(df):
""" Convert whitespace entries to NaN, Return columns with NaN
"""
# Note: Empty string will be converted to NaN automatically,
df.replace(r'^\s*$', np.nan, regex=True, inplace=True)
return list(df.isnull().any().index) | 35,373 |
def recursive_feature_selection_roc_auc(clf,
X,
y,
sample_weight=None,
n_features=10,
cv_steps=10,
n_jobs=1,
forward=True,
matching_features=True):
"""Method building a feature set in a recursive fashion. Depending
on the setting it is run as a forward selection/backward elimination
searching for a set of n features with the highest/lowest mismatch.
To get the set with the size n starting from n_total features the
following approaches are used:
Forward Selection:
To get the k+1 set every not yet selected feature is used to
generate (n_total - k sets). The set with the best score is the
k + 1 set. Those steps are repeated until n features are selected
Backward Elimination:
To get k+1 eliminated features every not yet eleminated feature is used
to generate (n_total - k) sets. The sets consist of all not yet
eliminated features minus the one that is tested. The set with the
best score determines the next feature to eliminate. Those steps are
repeated until n features are eliminated.
What the best score depends also on the settings:
matching_features:
forward: min(|auc - 0.5|)
not forward: max(|aux - 0.5|)
not matching_features:
forward: max(auc )
not forward: min(aux)
Parameters
----------
clf: object
Classifier that should be used for the classification.
It needs a fit and a predict_proba function.
X : numpy.float32array, shape=(n_samples, n_obs)
Values describing the samples.
y : numpy.float32array, shape=(n_samples)
Array of the true labels.
sample_weight : None or numpy.float32array, shape=(n_samples)
If weights are used this has to contains the sample weights.
None in the case of no weights.
n_features : int, optional (default=10)
Number of feature that are selected (forward=True) or eliminated
(forward=False)
n_jobs: int, optional (default=1)
Number of parallel jobs spawned in each a classification in run.
Total number of used cores is the product of n_jobs from the clf
and the n_jobs of this function.
forward: bool, optional (default=True)
If True it is a 'forward selection'. If False it is a 'backward
elimination'.
matching_features: bool, optional (default=True)
Wether for matching or mismatching feature should be searched
Returns
-------
selected_features: list of ints
Return a list containing the indeces of X, that were
selected/eliminated. The order corresponds to the order the
features were selected/eliminated.
auc_scores: np.array float shape(n_features_total, n_features)
Return a array containing the auc values for all steps.
np.nan is the feature was already selected in the specific run.
"""
desired_characteristics = ClassifierCharacteristics()
desired_characteristics.opts['callable:fit'] = True
desired_characteristics.opts['callable:predict_proba'] = True
clf_characteristics = ClassifierCharacteristics(clf)
assert clf_characteristics.fulfilling(desired_characteristics), \
'Classifier sanity check failed!'
if n_features > X.shape[1]:
logger.info(' \'n_features\' higher than total number of features.'
' \'n_features\' reduced!')
n_features = X.shape[1]
auc_scores = np.zeros((X.shape[1], n_features))
selected_features = []
while len(selected_features) != n_features:
auc_scores_i = get_all_auc_scores(clf,
selected_features,
X,
y,
sample_weight=sample_weight,
cv_steps=cv_steps,
n_jobs=n_jobs,
forward=forward)
value_best = None
index_best = None
for idx, auc in enumerate(auc_scores_i):
if not np.isfinite(auc):
continue
if value_best is None:
value_best = auc
index_best = idx
if matching_features:
if forward:
if np.abs(auc - 0.5) < np.abs(value_best - 0.5):
value_best = auc
index_best = idx
else:
if np.abs(auc - 0.5) > np.abs(value_best - 0.5):
value_best = auc
index_best = idx
else:
if forward:
if auc > value_best:
value_best = auc
index_best = idx
else:
if auc < value_best:
value_best = auc
index_best = idx
auc_scores[:, len(selected_features)] = auc_scores_i
selected_features.append(index_best)
return selected_features, auc_scores | 35,374 |
def mermin_klyshko_quantum_bound(n):
"""The quantum bound for the Mermin-Klyshko inequality is :math:`2^{3(n-1)/2}`.
:param n: The number of measurement nodes.
:type n: Int
:returns: The quantum bound.
:rtype: Float
"""
return 2 ** (3 * (n - 1) / 2) | 35,375 |
def get_serial_port_selected():
"""Get the selected serial port from the Settings.
:return: The currently selected serial port in the Settings.
"""
return ServerCompilerSettings().serial_port | 35,376 |
def main(args):
"""Produce library bundle"""
if platform.system() == "Darwin":
res = gen_archive_darwin(args.output, args.libs)
else:
ar_script = gen_archive_script(
args.output, [expand_path(lpath) for lpath in args.libs]
)
res = gen_archive(ar_script)
return res | 35,377 |
async def set_version_response_headers(response: Response) -> None:
"""Set Opentrons-Version headers on the response, without checking the request.
This function should be used inside a `fastapi.Depends` as a router
or application dependency.
"""
response.headers[API_VERSION_HEADER] = f"{API_VERSION}"
response.headers[MIN_API_VERSION_HEADER] = f"{MIN_API_VERSION}" | 35,378 |
def test_result_error_failure():
"""Ensures that ResultE can be typecasted to failure."""
container: ResultE[int] = Failure(ValueError('1'))
assert str(container.failure()) == '1' | 35,379 |
def run_in_background(func: callable, *args, **kwargs) -> Future:
""" run func(*args, **kwargs) in background and return Future for its outputs """
return GLOBAL_EXECUTOR.submit(func, *args, **kwargs) | 35,380 |
def setup_env():
""" Sets required environment variables for GAE datastore library """
os.environ['AUTH_DOMAIN'] = "appscale.com"
os.environ['USER_EMAIL'] = ""
os.environ['USER_NICKNAME'] = ""
os.environ['APPLICATION_ID'] = "" | 35,381 |
def remove_injector():
"""Remove a thread-local injector."""
if getattr(_LOCAL, "injector", None):
del _LOCAL.injector | 35,382 |
def vlan_no_trunk_allowed(dut, hs1, hs2, step):
"""
This test verifies that even though we have sub-interfaces with
vlan id 2 on both interfaces, if the switch interfaces don't support
vlan trunk allowed for vlan id 2 on both the interfaces then packet
transfer is not possible.
"""
dut_port1 = dut.ports['3']
dut_port2 = dut.ports['4']
e1 = hs1.ports['1']
d1 = hs2.ports['1']
step("Create a sub-interface with vlan id 2 in wrkstn 1")
# Configure IP and bring UP host 1 interfaces
hs1.libs.ip.add_link_type_vlan('1', e1 + ".2", 2)
hs1.libs.ip.sub_interface('1', '2', addr="2.2.2.2/24", up=True)
step("Create a sub-interface with vlan id 3 in wrkstn 2")
# Configure IP and bring UP host 2 interfaces
hs2.libs.ip.add_link_type_vlan('1', d1 + ".3", 3)
hs2.libs.ip.sub_interface('1', '3', addr="2.2.2.3/24", up=True)
dut.libs.vtysh.show_running_config()
# Wait until interfaces are up
for switch, portlbl in [(dut, dut_port1), (dut, dut_port2)]:
wait_until_interface_up(switch, portlbl)
sleep(5)
step("Ping should fail as vlan id 2 tagged packets are not allowed in"
" Switch interface " + dut_port2)
# Try to ping from host1 to host2, verify that non of the ICMP requests
# are getting any replies
ping = hs1.libs.ping.ping(10, "2.2.2.3")
assert ping['loss_pc'] == 100, "ping should not happen as switch doesn't"\
" allow vlan 2 tagged packets on " + dut_port2 | 35,383 |
def get_eigenvectors(
q,
dm: Union[DynamicalMatrix, DynamicalMatrixNAC],
ddm: DerivativeOfDynamicalMatrix,
perturbation=None,
derivative_order=None,
nac_q_direction=None,
):
"""Return degenerated eigenvalues and rotated eigenvalues."""
if nac_q_direction is not None and (np.abs(q) < 1e-5).all():
dm.run(q, q_direction=nac_q_direction)
else:
dm.run(q)
eigvals, eigvecs = np.linalg.eigh(dm.dynamical_matrix)
eigvals = eigvals.real
if perturbation is None:
return eigvals, eigvecs
if derivative_order is not None:
ddm.set_derivative_order(derivative_order)
dD = _get_dD(q, ddm, perturbation)
rot_eigvecs, _ = rotate_eigenvectors(eigvals, eigvecs, dD)
return eigvals, rot_eigvecs | 35,384 |
def _get_formatted_atom_types_names_for(connection):
"""Return formatted atom_type names for a connection."""
names = []
for member in connection.connection_members:
if not member.atom_type:
label = ""
else:
label = member.atom_type.name
names.append(label)
return " --- ".join(names) | 35,385 |
def anti_commutator(H1,H2):
""" Calculates the anticommutator of two Hamiltonians :math:`H_1` and :math:`H_2`.
.. math::
\\{H_1,H_2\\}_+ = H_1 H_2 + H_2 H_1
Examples
--------
The following script shows how to compute the anticommutator of two `hamiltonian` objects.
.. literalinclude:: ../../doc_examples/anti_commutator-example.py
:linenos:
:language: python
:lines: 7-
Parameters
-----------
H1 : obj
`numpy.ndarray` or `hamiltonian` class object to define the Hamiltonian operator as a matrix.
H2 : obj
`numpy.ndarray` or `hamiltonian` class object to define the Hamiltonian operator as a matrix.
Returns
--------
obj
Anticommutator: :math:`\\{H_1,H_2\\}_+ = H_1 H_2 + H_2 H_1`
"""
if ishamiltonian(H1) or ishamiltonian(H2):
return H1*H2 + H2*H1
else:
return H1.dot(H2) + H2.dot(H1) | 35,386 |
def test_endpoint_without_authentication():
"""
This test makes sure that we are able to have endpoints without any authentication.
"""
secret_key = 'example'
app = create_app()
app.add_middleware(AuthenticationMiddleware, backend=JWTAuthenticationBackend(secret_key=secret_key, algorithm='RS256'))
client = TestClient(app)
response = client.get("/no-auth")
assert response.text == '{"auth":null}'
assert response.status_code == 200 | 35,387 |
def download(self, auth=False):
"""
needs source url (from webs ite) and destination save location
"""
source_url = 'http://www.spitzer.caltech.edu/uploaded_files/images/0006/3034/ssc2008-11a12_Huge.jpg'
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
req = urllib2.Request(source_url, headers=hdr)
if auth:
out_file = str(auth) + '.jpeg'
else:
out_file = 'test2.jpeg'
try:
opened = urllib2.urlopen(req)
except urllib2.HTTPError as e:
print(e)
total_size = int(opened.info().getheader('Content-Length').strip())
progress = 0
self.update_state(state='PROGRESS')
with open(out_file, 'wb') as f:
while True:
chunk = opened.read(CHUNK)
if not chunk: break
f.write(chunk)
progress += CHUNK
self.update_state(state='PROGRESS',
meta={'current': progress, 'total': total_size, 'status': 'asdfghjk'})
return {'current': total_size, 'total': total_size, 'status': 'Download completed!'} | 35,388 |
def windowed_dataset(dataset, size, shift=None, stride=1, drop_remainder=True):
"""Create a windowed `Dataset`.
Arguments:
dataset: A `Dataset` of output shape ((...), (...), ... (...)) or a `dict`
of the same.
size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements
of the input dataset to combine into a generate.
shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the forward
shift of the sliding generate in each iteration. Defaults to `size`.
stride: A `tf.int64` scalar `tf.Tensor`, representing the stride of the
input elements in the sliding generate.
drop_remainder:
Returns:
A windowed `Dataset`.
"""
dataset = dataset.window(size, shift=shift, stride=stride, drop_remainder=drop_remainder)
def map_fn(nested_structure_of_datasets):
"""nested_structure_of_datasets -> dataset"""
structure_type = type(nested_structure_of_datasets)
if structure_type is dict:
for k, v in nested_structure_of_datasets.items():
nested_structure_of_datasets[k] = map_fn(v)
return tf.data.Dataset.zip(nested_structure_of_datasets)
if structure_type is tuple:
return tf.data.Dataset.zip(tuple(map(map_fn, nested_structure_of_datasets)))
return nested_structure_of_datasets.batch(size)
if type(dataset.element_spec) is tuple:
return dataset.flat_map(lambda *e: map_fn(e))
return dataset.flat_map(map_fn) | 35,389 |
def dnds(seq1, seq2):
"""Main function to calculate dN/dS between two DNA sequences per Nei &
Gojobori 1986. This includes the per site conversion adapted from Jukes &
Cantor 1967.
"""
# Strip any whitespace from both strings
seq1 = clean_sequence(seq1)
seq2 = clean_sequence(seq2)
# Check that both sequences have the same length
# assert len(seq1) == len(seq2)
# Check that sequences are codons
# assert len(seq1) % 3 == 0
syn_sites = syn_sum(seq1, seq2)
non_sites = len(seq1)-syn_sites
syn_subs, non_subs = substitutions(seq1, seq2)
pn = float(non_subs)/non_sites
ps = float(syn_subs)/syn_sites
dn = -0.75 * log(1 - (4 * pn / 3))
ds = -0.75 * log(1 - (4 * ps / 3))
return round(float(pn), 3), round(float(ps), 3), round(float(dn), 3), round(float(ds), 3) | 35,390 |
def send_t1_to_server_with_action(ptfhost, ptfadapter, tbinfo):
"""
Starts IO test from T1 router to server.
As part of IO test the background thread sends and sniffs packets.
As soon as sender and sniffer threads are in running state, a callback action is performed.
When action is finished, the sender and sniffer threads are given time to complete.
Finally, the collected packets are sniffed, and the disruptions are measured.
As part of teardown, the ARP table is cleared and ptf dataplane is flushed.
Args:
ptfhost (fixture): Fixture for PTF instance to be used during the test
ptfadapter (fixture): Fixture which provides helper utility to use ptf ptf testutils
tbinfo (fixture): Fixture for testebd inventory information
Yields:
function: A helper function to run and monitor the IO test
"""
arp_setup(ptfhost)
duthosts = []
def t1_to_server_io_test(activehost, standbyhost=None, tor_port=None, delay=0, action=None, verify=False):
"""
Helper method for `send_t1_to_server_with_action`.
Starts sender and sniffer before performing the action on the tor host.
Args:
tor_port (int): Port index (as in minigraph_ptf_indices) which corresponds to PortChannel member port of the activehost.
default - None. If set to None, the test chooses random PortChannel member port for this test.
delay (int): Maximum acceptable delay for traffic to continue flowing again.
action (function): A Lambda function (with optional args) which performs the desired action while the traffic is flowing from server to T1.
default - `None`: No action will be performed and traffic will run between server to T1 router.
verify (boolean): If set to True, test will automatically verify packet drops/duplication based on given qualification critera
"""
duthosts.append(activehost)
io_ready = threading.Event()
tor_IO = DualTorIO(activehost, standbyhost, ptfhost, ptfadapter, tbinfo, io_ready, tor_port=tor_port)
send_and_sniff = threading.Thread(target=tor_IO.start_io_test, kwargs={'traffic_generator': tor_IO.generate_from_t1_to_server})
send_and_sniff.start()
if action:
# do not perform the provided action until IO threads (sender and sniffer) are ready
io_ready.wait()
logger.info("Sender and sniffer threads started, ready to execute the callback action")
action()
# Wait for the IO to complete before doing checks
logger.info("Waiting for sender and sniffer threads to finish..")
send_and_sniff.join()
generate_test_report(tor_IO)
if verify:
allowed_disruption = 0 if delay == 0 else 1
validate_no_traffic_loss(tor_IO, allowed_disruption=allowed_disruption, delay=delay)
yield t1_to_server_io_test
# cleanup torIO
ptfadapter.dataplane.flush()
for duthost in duthosts:
logger.info('Clearing arp entries on DUT {}'.format(duthost.hostname))
duthost.shell('sonic-clear arp') | 35,391 |
def create_LED_indicator_rect(**kwargs) -> QPushButton:
"""
False: dim red
True : green
"""
# fmt: off
SS = (
"QPushButton {"
"background-color: " + COLOR_INDIAN_RED_2 + ";"
"color: black;"
"border: 1px solid black;"
"border-radius: 0px;"
"min-height: 30px;"
"min-width: 76px;}"
"QPushButton:checked {"
"background-color: " + COLOR_SPRING_GREEN_2 + ";}"
)
# fmt: on
button = QPushButton(checkable=True, enabled=False, **kwargs)
button.setStyleSheet(SS)
return button | 35,392 |
def make_obstime_plot(data_file, period, ref_mjd=58369.30, save=False,
show=False, max_freq=2500, min_freq=200):
"""
Generates observation exposure plot
:param data_file: json file with data
:param period: period to use for phase calculation
:param ref_mjd: reference MJD to use
:param cmap: matplotlib colormap to use
:param save: to save the plot
:param show: to show the plot
"""
burst_dict, snr_dict, obs_duration_dict, obs_startmjds_dict, fmin_dict, fmax_dict, fcen_dict = open_json(data_file)
# Defining duty cycle
frequency_hours = '%fH' % (24 * period)
t = Time(ref_mjd, format='mjd')
t0 = t+((period/2)*u.day)
tf = datetime.datetime.now()
t0_low = t+((period/2)*u.day) - (0.16 * period * u.day)
t0_high = t+((period/2)*u.day) + (0.16 * period * u.day)
df_period = [t0]
df_duty_low = [t0_low]
df_duty_high = [t0_high]
t_activity, t_low, t_high = t0, t0_low, t0_high
while t_activity < tf:
t_activity += period
t_low += period
t_high += period
df_period.append(t_activity)
df_duty_low.append(t_low)
df_duty_high.append(t_high)
n_periods = len(df_period)
# DEFINING COLORS
cm = plt.cm.get_cmap('Spectral_r')
burst_hist_colors = []
obs_hist_colors = {}
for i,k in enumerate(obs_duration_dict.keys()):
freq = np.log10(fcen_dict[k])
col = (np.log10(max_freq)-freq)/(np.log10(max_freq)-np.log10(min_freq))
# c = i/len(obs_duration_dict.keys())
color = cm(col)
# if k in burst_dict.keys():
# burst_hist_colors.append(color)
obs_hist_colors[k] = color
# PLOTTING
fig = plt.figure(figsize=(7,7))
gs = gridspec.GridSpec(2,1, wspace=0.01, height_ratios=[3,1])
ax1 = fig.add_subplot(gs[0, 0]) #ax[0]
for i,k in enumerate(burst_dict.keys()):
ax1.scatter(burst_dict[k], snr_dict[k],
color=obs_hist_colors[k], label=k, marker='o', edgecolor='k',
linewidth=0.5, zorder=10, s=12)
max_snr = max([m for k in snr_dict.keys()
for m in snr_dict[k]])*1.1
ax1.set_ylim(0, max_snr)
ax1.set_ylabel('SNR')
ax2 = fig.add_subplot(gs[1, 0], sharex=ax1) #ax[1]
for i, k in enumerate(obs_duration_dict.keys()):
#d = duration_per_phase_dict[k]
# ax.scatter(obs_startmjds_dict[k],
# [fcen_dict[k] for i in range(len(obs_startmjds_dict[k]))],
# color=obs_hist_colors[i])
obs_patches = []
for j,start in enumerate(obs_startmjds_dict[k]):
obs = Rectangle((start,fmin_dict[k]), obs_duration_dict[k][j]/24,
fmax_dict[k]-fmin_dict[k])
obs_patches.append(obs)
pc = PatchCollection(obs_patches, facecolor=obs_hist_colors[k],
alpha=0.7, edgecolor=obs_hist_colors[k], label=k)
ax2.add_collection(pc)
max_mjdstart = max([m for k in obs_startmjds_dict.keys()
for m in obs_startmjds_dict[k]])
min_mjdstart = min([m for k in obs_startmjds_dict.keys()
for m in obs_startmjds_dict[k]])
max_freq = max(fmax_dict.values())+1e3
min_freq = min(fmin_dict.values())-10
ax2.set_xlim(int(min_mjdstart-2), int(max_mjdstart+2))
ax2.set_ylim(min_freq, max_freq)
ax2.set_yscale('log')
ax2.set_xlabel('MJD')
ax2.set_ylabel('Frequency (MHz)')
# duty cycle
for low, high in zip(df_duty_low, df_duty_high):
ax1.axvspan(low.value, high.value, facecolor='#0f0f0f', alpha=0.1)
ax2.axvspan(low.value, high.value, facecolor='#0f0f0f', alpha=0.1)
for peak in df_period:
ax1.vlines(peak.value, [0 for i in range(n_periods)],
[max_snr for i in range(n_periods)], linestyles='dashed', alpha=0.2)
ax2.vlines(peak.value, [min_freq for i in range(n_periods)],
[max_freq for i in range(n_periods)], linestyles='dashed', alpha=0.2)
ax1.legend()
plt.show() | 35,393 |
def handle_activity(bot, ievent):
""" no arguments - show running threads. """
try: import threading
except ImportError:
ievent.reply("threading is not enabled.")
return
result = {}
todo = threadloops
for thread in threadloops:
name = "%s_%s" % (getname(type(thread)), thread.name)
try: result[name] = date.duration(thread.lastiter, plain=True)
except Exception, ex: logging.warn("%s - %s" % (name, str(ex)))
for b in getfleet().bots:
try: result[b.cfg.name] = date.duration(b.lastiter, plain=True)
except Exception, ex: logging.warn("%s - %s" % (name, str(ex)))
ievent.reply("last iterations: ", result) | 35,394 |
async def test_ingress_post(
fixture_name,
data_format,
request,
async_test_client,
mock_async_kafka_producer,
monkeypatch,
settings,
):
"""
Parameterized /ingress [POST] test with X12, FHIR, and HL7 inputs
:param fixture_name: The name of the pytest fixture used for parameterized testing.
:param data_format: The expected data format for the test case.
:param request: The pytest request fixture used to dynamically access test case fixtures
:param async_test_client: An async test client
:param mock_async_kafka_producer: Mock async kafka producer used to simulate messaging interactions
:param monkeypatch: The pytest monkeypatch fixture.
:param settings: Mocked connect configuration settings.
"""
fixture = request.getfixturevalue(fixture_name)
with monkeypatch.context() as m:
m.setattr(kafka, "ConfluentAsyncKafkaProducer", mock_async_kafka_producer)
m.setattr(CoreWorkflow, "synchronize", AsyncMock())
m.setattr(nats, "get_nats_client", AsyncMock(return_value=AsyncMock()))
m.setattr(nats, "get_jetstream_context", AsyncMock(return_value=AsyncMock()))
async with async_test_client as ac:
# remove external server setting
settings.connect_external_fhir_servers = []
ac._transport.app.dependency_overrides[get_settings] = lambda: settings
actual_response = await ac.post("/ingress", json={"data": fixture})
assert actual_response.status_code == 200
actual_json = actual_response.json()
assert actual_json["uuid"]
assert actual_json["operation"] == "POST"
assert actual_json["creation_date"]
assert actual_json["store_date"]
assert actual_json["consuming_endpoint_url"] == "/ingress"
assert actual_json["data"]
assert actual_json["data_format"] == data_format
assert actual_json["status"] == "success"
assert data_format in actual_json["data_record_location"]
assert actual_json["target_endpoint_urls"] == []
assert actual_json["ipfs_uri"] is None
assert actual_json["elapsed_storage_time"] > 0
assert actual_json["elapsed_storage_time"] > 0
assert actual_json["transmit_date"] is None
assert actual_json["elapsed_transmit_time"] is None
assert actual_json["elapsed_total_time"] > 0
assert actual_json["transmission_attributes"] is None | 35,395 |
def check_data_struct():
"""
Check that all data is in place first
"""
if not os.path.exists(PROJECT_ROOT+'/data'):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data')
if not os.path.exists(PROJECT_ROOT+'/data/CUB_200_2011'):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data/CUB_200_2011')
if not os.path.exists(PROJECT_ROOT+'/data/segmentations'):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data/segmentations')
if not os.path.exists(PROJECT_ROOT+'/data/attributes.txt'):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data/attributes.txt') | 35,396 |
def bind(task):
"""Bind a task method for use in a pipeline
This decorator method adapts a task method to work
in a pipeline. Specifically, it routes successful
Result input to the task logic, and passes through
failure Result input without performing any
additional actions.
Args:
task: A task method that returns a Result
Returns:
function: Bound task that accepts and returns a Result
Example:
<Before bind>
---> Result(Success)
------------- |
data | | |
---> | Task | ---|
| | |
------------- |
---> Result(Failure)
==============================================================
<After bind>
---> Result(Success)
------------- |
data | | |
Result(Success) ---> | Task | ---|
| | |
------------- |
Result(Failure) -------------------------> Result(Failure)
"""
@wraps(task)
def inner(result):
if result.success:
return task(result.payload)
else:
return result
return inner | 35,397 |
def display_all_spellings(phone_num):
""" (str) ->
Displays all posible phone numbers with the last four digits
replaced with a corresponding letter from the phone keys
"""
translate = {'0':'0', '1':'1', '2': ('a','b','c'), '3': ('d','e','f'),
'4': ('g','h','i'), '5': ('j','k','l'), '6': ('m','n','o'),
'7': ('p','q','r','s'), '8': ('t','u','v'), '9': ('w','x','y','z')}
# Dispaly spellings
for let1 in translate[phone_num[8]]:
for let2 in translate[phone_num[9]]:
for let3 in translate[phone_num[10]]:
for let4 in translate[phone_num[11]]:
print('{0}{1}{2}{3}{4}'.format(phone_num[0:8], let1, let2, let3, let4)) | 35,398 |
def test_fast_3():
"""Test for fast prediction time, using knn, series 3"""
model_3 = Pipeline(filename='./tests/test_series/Serie3.csv',
type='fast',
freq='15T',
targetcol='INSTALACIONES [kWh]',
datecol='MSJO_DATUM',
sep=';',
decimal='.',
date_format="%d/%m/%Y %H:%M",
plot=True)
model_3.fit()
assert model_3.r_error < 100 | 35,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.