content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def get_paramvals_percentile(table, percentile, chi2_arr):
"""
Isolates 68th percentile lowest chi^2 values and takes random 1000 sample
Parameters
----------
table: pandas dataframe
Mcmc chain dataframe
pctl: int
Percentile to use
chi2_arr: array
Array of chi^2 values
Returns
---------
mcmc_table_pctl: pandas dataframe
Random 1000 sample of 68th percentile lowest chi^2 values
"""
percentile = percentile/100
table['chi2'] = chi2_arr
table = table.sort_values('chi2').reset_index(drop=True)
slice_end = int(percentile*len(table))
mcmc_table_pctl = table[:slice_end]
# Best fit params are the parameters that correspond to the smallest chi2
bf_params = mcmc_table_pctl.drop_duplicates().reset_index(drop=True).\
values[0][:5]
# Sample random 100 of lowest chi2
mcmc_table_pctl = mcmc_table_pctl.drop_duplicates().sample(10)
return mcmc_table_pctl, bf_params
| 16,300
|
def get_text_from_span(s, (start, end)):
"""
Return the text from a given indices of text (list of words)
"""
return " ".join(s[start: end])
| 16,301
|
def __timestamp():
"""Generate timestamp data for pyc header."""
today = time.time()
ret = struct.pack(b'=L', int(today))
return ret
| 16,302
|
def reverse_index(alist, value):
"""Finding the index of last occurence of an element"""
return len(alist) - alist[-1::-1].index(value) -1
| 16,303
|
def check_subset(set_a, set_b, fail_action, *args, **kwargs):
"""
"""
for x in set_a:
if x not in set_b:
fail_action(x, *args, **kwargs)
| 16,304
|
def generate_twist(loops, non_interacting=False):
"""Generate initial configuration to start braid moves where the active end has crossed outside the loops and they have an initial twist.
Format: ' โ โ โโ'
'โโโโโโโ'
'โโ โ โ '
'โโโโ โ '
' โโโ โ '
'โโโโ โ '
'โโ โ โ '
Keyword arguments:
non_interacting -- loops which the active end cannot interact with (default False)
-- if False, all loops are interactable
-- if Integer (n), n loops randomly selected to be non-interactive
-- if List (j,k,l), loops j, k and l (from left) are non-interactive
"""
# we can use the peppino generator for the first part of this configuration
# we just add the additional lines
spaces = (loops * 2) + 1
row_1, row_2, row_3 = generate_peppino(loops, non_interacting)
if row_3[1] == "โ":
first_loop = "โ"
else:
first_loop = "โ"
# row 4
row_4 = list(row_3)
# add first crossing
row_4[0] = "โ"
row_4[1] = "โ"
row_4[2] = "โ"
# row 5
row_5 = list(row_3)
row_5[0] = " "
row_5[1] = first_loop
row_5[2] = "โ"
# row 6
row_6 = list(row_3)
row_6[0] = "โ"
row_6[1] = first_loop
row_6[2] = "โ"
# row 7
row_7 = list(row_3)
return (
row_1,
row_2,
row_3,
"".join(row_4),
"".join(row_5),
"".join(row_6),
"".join(row_7),
)
| 16,305
|
def get_pathway_id_names_dict():
"""
Given a pathway ID get its name
:return: pathway_id_names_dict
"""
# Fixme: This is not analysis specfic (I think, KmcL) I believe any analysis should do
# A fix is for this is probably wise.
analysis = Analysis.objects.get(name='Tissue Comparisons')
pals_df = get_cache_df(MIN_HITS, analysis)
pathway_id_names_dict = {}
for ix, row in pals_df.iterrows():
pathway_id_names_dict[row.pw_name] = ix
return pathway_id_names_dict
| 16,306
|
def IsShuttingDown(_shutting_down=_shutting_down):
"""
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
return _shutting_down[0]
| 16,307
|
def ellipse_center(a):
"""
Parameters
----------
a : fitted_ellipse_obj
"""
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
num = b*b-a*c
x0=(c*d-b*f)/num
y0=(a*f-b*d)/num
return np.array([x0,y0])
| 16,308
|
def test_task_cl_bump():
"""Test task_cl_bump."""
result = task_cl_bump()
actions = result['actions']
assert len(actions) == 4
assert isinstance(actions[1][0], type(_move_cl))
assert 'poetry run cz bump --annotated-tag' in str(actions[2])
assert actions[3] == 'git push origin --tags --no-verify'
| 16,309
|
def callDiffLoops(
tl,
cl,
td,
cd,
output,
cut=0,
mcut=-1,
cpu=1,
pcut=1e-2,
igp=False,
noPCorr=False,
fdrcut=0.05,
juicebox=False,
washU=False,
customize=False,
cacut=0.0,
cmcut=0.0,
vmin=None,
vmax=None,
cmap=None,
):
"""
Call differentially enriched loops
@param tl: str, file of _loops.txt for treatment sample
@param cl: str, file of _loops.txt for control sample
@param td: str, directory generated by cLoops2 pre for treatment sample
@param cd: str, directory generated by cLoops2 pre for control sample
@param output: str, prefix for output file
@param cut: int, distance cutoff for estimation of difference significance , >=cut
@param mcut: int, distance cutoff for estimation of difference significance, <=mcut
@param cpu: int, number of cpus used
@param pcut: float, p-value cutoffs after Bon correction
@param igp: bool, whether to ignore p-value cutoff
@param noPCorr: bool, whehter to perform Bon correction of p-values, default yes
@param fdrcut: float, fdrcut for background to estimate Mcut and Acut
@param customize: binary, if true, use user provided MA M cut and A cut
@param cacut: float, if customize, used, A for MA plot
@param cmcut: float, if customize, used, M for MA plot
@param cmap: str, color map string option
"""
#data name
if td.endswith("/"):
td = td[:-1]
if cd.endswith("/"):
cd = cd[:-1]
tname = td.split("/")[-1]
cname = cd.split("/")[-1]
#read in loops
tloops = parseTxt2Loops(tl)
cloops = parseTxt2Loops(cl)
#process meta information
na = td.split("/")[-1] #name of sample directory
tmetaf = td + "/petMeta.json"
tmeta = json.loads(open(tmetaf).read())
nb = cd.split("/")[-1]
cmetaf = cd + "/petMeta.json"
cmeta = json.loads(open(cmetaf).read())
#total PETs
ta = tmeta["Unique PETs"]
tb = cmeta["Unique PETs"]
#chromosomes for testing
keys = set(tmeta["data"]["cis"].keys()).intersection(
set(cmeta["data"]["cis"].keys()))
# step 1, merge the overlapped loops
mloops = mergeLoops(tloops, na, cloops, nb)
keys = list(keys.intersection(mloops.keys()))
# step 2, quantify the loops in two conditions
ds = Parallel(n_jobs=cpu, backend="multiprocessing")(delayed(quantDloops)(
key,
mloops[key],
tmeta["data"]["cis"][key]["ixy"],
cmeta["data"]["cis"][key]["ixy"],
cut=cut,
mcut=mcut,
) for key in keys)
ts, cs = [], []
dloops = {}
for d in ds:
if d is None:
continue
ts.extend(d[1])
cs.extend(d[2])
dloops[d[0]] = d[3]
# step 3, estimate the fitting parameters, cutoffs based on MANorm
sf, acut, mcut = getBgNorm(cs, ts, output, fdrcut=fdrcut)
# check whether to use customized cutoffs
if customize:
acut = cacut
mcut = cmcut
# step 4, estimate the difference significance
ds = Parallel(n_jobs=cpu,
backend="multiprocessing")(delayed(estLoopDiffSig)(
key,
sf,
ta,
tb,
dloops[key],
) for key in keys)
dloops = []
cs, ts, ts2 = [], [], []
for d in ds:
if d is None:
continue
dloops.extend(d[0])
cs.extend(d[1])
ts.extend(d[2])
ts2.extend(d[3])
#step 5, p-values Bonferroni correction and determine whether significant
dloops = markDiffSig(dloops, acut, mcut, pcut=pcut,igp=igp,noPCorr=noPCorr)
sigIndex = [i for i, loop in enumerate(dloops) if loop.significant > 0]
# step 6, write the result
dloops2txt(dloops, output + "_dloops.txt")
# step 7, write the result as washU or juicebox
tloops = [
dloop for dloop in dloops
if dloop.significant > 0 and dloop.scaled_fc > 0
]
cloops = [
dloop for dloop in dloops
if dloop.significant > 0 and dloop.scaled_fc < 0
]
dloops2txt( tloops, output + "_" + tname +"_specific_dloops.txt")
dloops2txt( cloops, output + "_" + cname +"_specific_dloops.txt")
comloops = [dloop for dloop in dloops if dloop.significant <1]
if juicebox:
dloops2juiceTxt(tloops, output + "_" + tname + "_loops_juicebox.txt")
dloops2juiceTxt(cloops, output + "_" + cname + "_loops_juicebox.txt")
dloops2juiceTxt(comloops, output + "_common_loops_juicebox.txt",significant=0)
if washU:
loops2washuTxt(tloops, output + "_" + tname + "_loops_legacyWashU.txt")
loops2washuTxt(cloops, output + "_" + cname + "_loops_legacyWashU.txt")
loops2washuTxt(comloops, output + "_common_loops_legacyWashU.txt",significant=0)
dloops2NewWashuTxt(tloops,
output + "_" + tname + "_loops_newWashU.txt")
dloops2NewWashuTxt(cloops,
output + "_" + cname + "_loops_newWashU.txt")
dloops2NewWashuTxt(comloops, output + "_common_loops_newWashU.txt",significant=0)
# step 8, show plot
#ma plot
plotDiffLoopsMA(sigIndex, cs, ts, ts2, tname, cname, mcut, acut, output)
#volcano plot
plotDiffLoopsVolcano(output + "_dloops.txt",
output,
tname,
cname,
fccut=mcut,
pcut=pcut)
#plot aggregated differential loops
plotDiffAggLoops(dloops, output, tl, cl, td, cd, cpu=cpu, norm=True,vmin=vmin,vmax=vmax,cmap=cmap)
| 16,310
|
def _to_average_temp(name, temperature_map):
"""
Converts the list of temperatures associated to a label to a list of average temperatures.
If the sensor does not exist, it will return _default_temperature. If the high or critical temperature thresholds
are invalid, it will use the values from _default_temperature instead.
:param name: Name of the sensor to check.
:param temperature_map: Dictionary of temperatures, as returned by psutil.sensors_temperatures
:return: List containing the current, high and critical temperatures of the label.
"""
if name not in temperature_map:
return _default_temperature
temps = [0.0, 0.0, 0.0]
for temp in temperature_map[name]:
current = temp.current if temp.current is not None and temp.current > -50.0 else _default_temperature[0]
high = temp.high if temp.high is not None and temp.high > 0.0 else _default_temperature[1]
critical = temp.critical if temp.critical is not None and temp.critical > 0.0 else _default_temperature[2]
temps[0] += current
temps[1] += high
temps[2] += critical
size = float(len(temperature_map[name]))
temps[0] = _round(temps[0] / size)
temps[1] = _round(temps[1] / size)
temps[2] = _round(temps[2] / size)
return temps
| 16,311
|
def segments_decode(aseg):
"""
Decode segments.
Parameters
----------
aseg : numpy.ndarra of uint32
Returns
-------
segments : list of list of int
"""
max = 2 ** 32 - 1
segments = []
l = []
for x in list(aseg):
if x == max:
segments.append(l)
l = []
else:
l.append(x)
return segments
| 16,312
|
def generate_sbm_network(input_file: "yaml configuration file") -> None:
"""
This function generates a simulated network, using the block model matrix given as input and saves both the network and the cluster nodes.
All parameters must be specified in a yaml file.
This function allows to create network and geneset for any type of SBM
"""
ym = YamlConfig()
config = ym.load_config(input_file)
print(config)
bm = BlockModel(np.array(config["BlockModel"]["matrix"]), n_nodes=config["BlockModel"]["n_nodes"],
nodes_percentage=config["BlockModel"]["nodes_percentage"])
outpath = config["Simulations"]["output_folder"]
suffix = config["Simulations"]["suffix"]
for i in range(config["Simulations"]["n_simulated"]):
bm.create_graph()
bm.write_network(outpath + suffix + "_s_" + str(i) + "_network.tsv")
bm.write_cluster_genelist(outpath + suffix + "_s_" + str(i) + "_genes.gmt")
# bm.plot_graph(outpath+suffix+"_s_"+str(i))
| 16,313
|
def cvGetReal3D(*args):
"""cvGetReal3D(CvArr arr, int idx0, int idx1, int idx2) -> double"""
return _cv.cvGetReal3D(*args)
| 16,314
|
def parse_root_firefox(root, root_folder):
"""
Function to parse the root of the firefox bookmark tree
"""
# create bookmark menu folder
bookmarks = Folder(title="Bookmarks Menu", parent_id=root_folder.id)
bookmarks.insert()
for node in root:
# skip node if not <DT>
if node.name != "dt":
continue
# get tag of first node child
element = node.contents[0]
tag = element.name
if tag == "h3":
# check for special folders (Other Bookmarks / Toolbar)
# add them to root level instead of inside bookmarks
if element.get("personal_toolbar_folder") or element.get(
"unfiled_bookmarks_folder"
):
recursive_parse(node, root_folder.id)
else:
recursive_parse(node, bookmarks.id)
elif tag == "a":
parse_url(node.contents[0], bookmarks.id)
| 16,315
|
def request_cluster(argv):
"""
only request cluster on GCE, and output all configuration information
:param argv: sys.argv
:return: None
"""
if len(argv) < 7:
print_help()
exit(1)
cluster_name = argv[2]
ambari_agent_vm_num = int(argv[3])
docker_num = int(argv[4])
service_server_num = int(argv[5])
with_ambari_server = False
ambari_server_num = int(argv[6])
if ambari_server_num > 0:
with_ambari_server = True
cluster = Cluster()
cluster.request_gce_cluster(ambari_agent_vm_num, docker_num, service_server_num,
with_ambari_server, cluster_name)
time_to_wait = Config.ATTRIBUTES["gce_boot_time"]
print "wait ", str(time_to_wait), " seconds for the cluster to boot ... ..."
time.sleep(int(time_to_wait))
data = Data()
data.add_new_cluster(cluster)
print "complete"
| 16,316
|
def get_wf_double_FF_opt(
molecule,
pcm_dielectric,
linked=False,
qchem_input_params=None,
name="douple_FF_opt",
db_file=">>db_file<<",
**kwargs,
):
"""
Firework 1 : write QChem input for an FF optimization,
run FF_opt QCJob,
parse directory and insert into db,
pass relaxed molecule to fw_spec and on to fw2,
Firework 2 : write QChem input for an optimization in the
presence of a PCM, using the molecule passed
from fw1,
run FF_opt QCJob,
parse directory and insert into db
Args:
molecule (Molecule): input molecule to be optimized and run.
pcm_dielectric (float): The PCM dielectric constant.
max_cores (int): Maximum number of cores to parallelize over. Defaults to 32.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For instance,
if a user wanted to use a more advanced DFT functional, include a pcm with a
dielectric of 30, and use a larger basis, the user would set qchem_input_params =
{"dft_rung": 5, "pcm_dielectric": 30, "basis_set": "6-311++g**"}. However, more
advanced customization of the input is also possible through the overwrite_inputs
key which allows the user to directly modify the rem, pcm, smd, and solvent
dictionaries that QChemDictSet passes to inputs.py to print an actual input file.
For instance, if a user wanted to set the sym_ignore flag in the rem section of the
input file to true, then they would set qchem_input_params = {"overwrite_inputs":
"rem": {"sym_ignore": "true"}}. Of course, overwrite_inputs could be used in
conjunction with more typical modifications, as seen in the test_double_FF_opt
workflow test.
qchem_cmd (str): Command to run QChem.
db_file (str): path to file containing the database credentials.
kwargs (keyword arguments): additional kwargs to be passed to Workflow
Returns:
Workflow
"""
first_qchem_input_params = qchem_input_params or {}
# Optimize the molecule in vacuum
fw1 = FrequencyFlatteningOptimizeFW(
molecule=molecule,
name="first_FF_no_pcm",
qchem_cmd=">>qchem_cmd<<",
max_cores=">>max_cores<<",
qchem_input_params=first_qchem_input_params,
linked=linked,
db_file=db_file,
)
# Optimize the molecule in PCM
second_qchem_input_params = {"pcm_dielectric": pcm_dielectric}
for key in first_qchem_input_params:
second_qchem_input_params[key] = first_qchem_input_params[key]
fw2 = FrequencyFlatteningOptimizeFW(
name="second_FF_with_pcm",
qchem_cmd=">>qchem_cmd<<",
max_cores=">>max_cores<<",
qchem_input_params=second_qchem_input_params,
linked=linked,
db_file=db_file,
parents=fw1,
)
fws = [fw1, fw2]
wfname = f"{molecule.composition.reduced_formula}:{name}"
return Workflow(fws, name=wfname, **kwargs)
| 16,317
|
def return_stack():
"""
Create the stack of the obtained exception
:return: string stacktrace.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
return lines[0] + lines[1]
| 16,318
|
def get_element_action_names(element):
"""Get a list of all the actions the specified accessibility object can perform.
Args:
element: The AXUIElementRef representing the accessibility object
Returns: an array of actions the accessibility object can perform
(empty if the accessibility object supports no actions)
"""
error_code, names = AXUIElementCopyActionNames(element, None)
error_messages = {
kAXErrorIllegalArgument: "One or both of the arguments is an illegal value.",
kAXErrorInvalidUIElement: "The AXUIElementRef is invalid.",
kAXErrorFailure: "There was some sort of system memory failure.",
kAXErrorCannotComplete: "The function cannot complete "
"because messaging has failed in some way.",
kAXErrorNotImplemented: "The process does not fully support the accessibility API.",
}
check_ax_error(error_code, error_messages)
return names
| 16,319
|
def merge_task_entity_yamls(
task_config_path: str, entity_config_path: str, merged_path: str
):
"""
Merge task yaml file and entity yaml file into a task-entity yaml
file.
:param task_config_path: the path to the task yaml file
:param entity_config_path: the path to the entity yaml file
:param merged_path: the output path of the merged yaml file
:return: None, the merged yaml file will be saved
"""
task_dict = load_yaml(task_config_path)
entity_dict = load_yaml(entity_config_path)
for task in task_dict["Task"]:
if "entities" in task_dict["Task"][task]:
for entity in task_dict["Task"][task]["entities"]:
if "type" in entity_dict["Entity"][entity]:
task_dict["Task"][task]["entities"][entity]["type"] = entity_dict[
"Entity"
][entity]["type"]
if "methods" in entity_dict["Entity"][entity]:
task_dict["Task"][task]["entities"][entity]["methods"] = deepcopy(
entity_dict["Entity"][entity]["methods"]
)
if "suggest_value" in entity_dict["Entity"][entity]:
task_dict["Task"][task]["entities"][entity][
"suggest_value"
] = entity_dict["Entity"][entity]["suggest_value"]
save_yaml(task_dict, merged_path)
| 16,320
|
def accuracy_on_imagenet_c(data_loaders, model, args, writer, num_iteration):
"""Computes model accuracy and mCE on ImageNet-C"""
print("Performance on ImageNet-C:")
model.eval()
ce_alexnet = get_ce_alexnet()
with torch.no_grad():
top1_in_c = AverageMeter('Acc_IN_C@1', ':6.2f')
top5_in_c = AverageMeter('Acc_IN_C@5', ':6.2f')
top1_in_c_wo_noises = AverageMeter('Acc_IN_C_wo_Noises@1', ':6.2f')
top5_in_c_wo_noises = AverageMeter('Acc_IN_C_wo_Noises@5', ':6.2f')
mce, counter = 0, 0
for name, data_loader in data_loaders.items():
top1_tmp = AverageMeter('Acc_tmp@1', ':6.2f')
top5_tmp = AverageMeter('Acc_tmp@5', ':6.2f')
for severity, loader in data_loader.items():
top1_sev_tmp = AverageMeter('Acc_sev_tmp@1', ':6.2f')
ct = 0
for data, labels in loader:
data, labels = data.to(args.device), labels.to(args.device)
logits = model(data)
acc1, acc5 = get_accuracy(logits, labels, (1, 5))
top1_in_c.update(acc1[0], data.size(0))
top5_in_c.update(acc5[0], data.size(0))
top1_sev_tmp.update(acc1[0], data.size(0))
top1_tmp.update(acc1[0], data.size(0))
top5_tmp.update(acc5[0], data.size(0))
if name not in ['Gaussian Noise', 'Shot Noise', 'Impulse Noise']:
top1_in_c_wo_noises.update(acc1[0], data.size(0))
top5_in_c_wo_noises.update(acc5[0], data.size(0))
ct += 1
if ct == 50 and not args.evaluate:
break
args.IN_C_Results[name][int(severity)+1] = top1_sev_tmp.avg.item()
print("{0}: Severity: {1}, Top1 accuracy {2:.2f}".format(name, severity, top1_sev_tmp.avg.item()),
file=args.file)
# get Corruption Error CE:
CE = get_mce_from_accuracy(top1_tmp.avg.item(), ce_alexnet[name])
mce += CE
counter += 1
# Logging:
print("{0}: Top1 accuracy {1:.2f}, Top5 accuracy: {2:.2f}, CE: {3:.2f}\n".format(
name, top1_tmp.avg.item(), top5_tmp.avg.item(), 100. * CE))
writer.add_scalar(f'IN-C/Accuracy {name}', top1_tmp.avg.item(), num_iteration)
writer.add_scalar(f'IN-C/Accuracy {name}top5', top5_tmp.avg.item(), num_iteration)
args.IN_C_Results[name][int(num_iteration/10)] = top1_tmp.avg.item()
mce /= counter
print("Full ImageNet-C: Top1 accuracy {0:.2f}, Top5 accuracy: {1:.2f}, mCE: {2:.2f}\n".format(
top1_in_c.avg.item(),
top5_in_c.avg.item(),
mce * 100.), file=args.file)
print("ImageNet-C w/o Noises: : Top1 accuracy: Top1 accuracy {0:.2f}, Top5 accuracy: {1:.2f}\n".format(
top1_in_c_wo_noises.avg.item(),
top5_in_c_wo_noises.avg.item()), file=args.file)
writer.add_scalar('IN-C/mCE', mce * 100., num_iteration)
writer.add_scalar('IN-C/Accuracy Full ImageNet-C', top1_in_c.avg.item(), num_iteration)
writer.add_scalar('IN-C/Accuracy ImageNet-C w/o noises', top1_in_c_wo_noises.avg.item(), num_iteration)
writer.add_scalar('IN-C/Accuracy Full ImageNet-C top5', top5_in_c.avg.item(), num_iteration)
writer.add_scalar('IN-C/Accuracy ImageNet-C w/o noises top5', top5_in_c_wo_noises.avg.item(), num_iteration)
writer.add_scalar('Val/Accuracy Full ImageNet-C', top1_in_c.avg.item(), num_iteration)
path = args.exp_path + '/IN_C_Results_resnet_50.npy'
np.save(path, args.IN_C_Results)
return
| 16,321
|
def ParseFile(fname):
"""Parse a micrcode.dat file and return the component parts
Args:
fname: Filename to parse
Returns:
3-Tuple:
date: String containing date from the file's header
license_text: List of text lines for the license file
microcodes: List of Microcode objects from the file
"""
re_date = re.compile('/\* *(.* [0-9]{4}) *\*/$')
re_license = re.compile('/[^-*+] *(.*)$')
re_name = re.compile('/\* *(.*)\.inc *\*/', re.IGNORECASE)
microcodes = {}
license_text = []
date = ''
data = []
name = None
with open(fname) as fd:
for line in fd:
line = line.rstrip()
m_date = re_date.match(line)
m_license = re_license.match(line)
m_name = re_name.match(line)
if m_name:
if name:
microcodes[name] = Microcode(name, data)
name = m_name.group(1).lower()
data = []
elif m_license:
license_text.append(m_license.group(1))
elif m_date:
date = m_date.group(1)
else:
data.append(line)
if name:
microcodes[name] = Microcode(name, data)
return date, license_text, microcodes
| 16,322
|
def run_server():
"""Runs the Tornado Server and begins Kafka consumption"""
if topic_check.topic_exists("TURNSTILE_SUMMARY") is False:
logger.fatal(
"Ensure that the KSQL Command has run successfully before running the web server!"
)
exit(1)
if topic_check.topic_exists("org.chicago.cta.stations.table.v1") is False:
logger.fatal(
"Ensure that Faust Streaming is running successfully before running the web server!"
)
exit(1)
weather_model = Weather()
lines = Lines()
application = tornado.web.Application(
[(r"/", MainHandler, {"weather": weather_model, "lines": lines})]
)
application.listen(8888)
# Build kafka consumers
consumers = [
KafkaConsumer(
topic_name_pattern="^mh_weather_channel", # "org.chicago.cta.weather.v1",
message_handler=weather_model.process_message,
offset_earliest=True,
),
KafkaConsumer(
"mh_station_db_stations", # "org.chicago.cta.stations.table.v1",
lines.process_message,
offset_earliest=True,
is_avro=False,
),
KafkaConsumer(
"^mh_station_arrival_", # "^org.chicago.cta.station.arrivals.",
lines.process_message,
offset_earliest=True,
),
KafkaConsumer(
"TURNSTILE_SUMMARY",
lines.process_message,
offset_earliest=True,
is_avro=False,
),
]
try:
logger.info(
"Open a web browser to http://localhost:8888 to see the Transit Status Page"
)
for consumer in consumers:
tornado.ioloop.IOLoop.current().spawn_callback(consumer.consume)
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt as e:
logger.info("shutting down server")
tornado.ioloop.IOLoop.current().stop()
for consumer in consumers:
consumer.close()
| 16,323
|
def create_mesh_node(world, node, mesh_ids, parent=None):
""" Creates a node of type Mesh.
"""
with underworlds.Context("edit-tool") as ctx:
target_world = ctx.worlds[world]
new_node = Mesh()
new_node.properties["mesh_ids"] = mesh_ids
new_node.name = node
new_node.parent = _get_id(target_world, parent) # if parent is None, will be automatically parented to root by the server
logger.info("Created Mesh Node %s with meshes %s in world %s"%(repr(new_node),str(new_node.properties["mesh_ids"]), world))
target_world.scene.nodes.append(new_node)
| 16,324
|
def cal_q_vel(guidance_v):
"""
ๆๆถไฝฟ็จ้ป่ฎคๅ่้ๅบฆ่ฟ่กไผๅ๏ผ็ญ่ฐ่ฏๆ็ๅไฝฟ็จ็ฒ็ณ้ๅบฆๆฅไผๅ
:return:
"""
q_vel = numpy.zeros((1, n_t + 1))
if flag_obs == 0:
q_vel[0][0] = -ref_v
q_vel[0][n_t] = ref_v
if flag_obs == 1:
for i in range(n_t + 1):
if i < 1:
q_vel[0][i] = -guidance_v[0][i]
elif i >= n_t:
q_vel[0][i] = guidance_v[0][i - 1]
else:
q_vel[0][i] = guidance_v[0][i - 1] - guidance_v[0][i]
# print('q_vel:', numpy.shape(q_vel), q_vel)
return q_vel
| 16,325
|
def tf_decode(
ref_pts,
ref_theta,
bin_x,
res_x_norm,
bin_z,
res_z_norm,
bin_theta,
res_theta_norm,
res_y,
res_size_norm,
mean_sizes,
Ss,
DELTAs,
R,
DELTA_THETA,
):
"""Turns bin-based box3d format into an box_3d
Input:
ref_pts: (B,p,3) [x,y,z]
ref_theta: (B,p) [ry] or a constant value
bin_x: (B,p,K), bin assignments along X-axis
res_x_norm: (B,p,K), normalized residual corresponds to bin_x
bin_z: (B,p,K), bin assignments along Z-axis
res_z_norm: (B,p,K), normalized residual corresponds to bin_z
bin_theta: (B,p,K), bin assignments for orientation
res_theta_norm: (B,p,K), normalized residual corresponds to bin_theta
res_y: (B,p,K), residual w.r.t. ref_pts along Y-axis
res_size_norm: (B,p,K,3), residual w.r.t. the average object size [l,w,h]
mean_sizes, (B,p,K,3), average object size [l,w,h]
Ss: XZ search range for different classes [-Ss, +Ss]
DELTAs: XZ_BIN_LENs for different classes
R: THETA search range [-R, +R]
DELTA_THETA: THETA_BIN_LEN = 2 * R / NUM_BIN_THETA
Output:
boxes_3d: (B,p,K,7) 3D box in box_3d format [x, y, z, l, w, h, ry]
"""
ndims = ref_pts.shape.ndims
dx = (tf.to_float(bin_x) + 0.5) * DELTAs - Ss + res_x_norm * DELTAs
dz = (tf.to_float(bin_z) + 0.5) * DELTAs - Ss + res_z_norm * DELTAs
if ndims == 3: # rpn
K = tf.shape(bin_x)[2]
if isinstance(ref_theta, tf.Tensor):
# rotate along y
all_rys = ref_theta
ry_sin = tf.sin(all_rys)
ry_cos = tf.cos(all_rys)
rot_mats = tf.stack(
[
tf.stack([ry_cos, ry_sin], axis=2),
tf.stack([-ry_sin, ry_cos], axis=2),
],
axis=3,
)
rot_mats = tf.tile(tf.expand_dims(rot_mats, 2), [1, 1, K, 1, 1])
dxz_rot = tf.matmul(
rot_mats,
tf.expand_dims(tf.stack([dx, dz], axis=3), axis=3),
transpose_a=True,
transpose_b=True,
)
dxz_rot = tf.squeeze(tf.matrix_transpose(dxz_rot), axis=3)
dx = dxz_rot[:, :, :, 0]
dz = dxz_rot[:, :, :, 1]
else:
assert ref_theta == 0
ref_pts_tiled = tf.tile(tf.expand_dims(ref_pts, axis=2), [1, 1, K, 1])
x = dx + ref_pts_tiled[:, :, :, 0]
z = dz + ref_pts_tiled[:, :, :, 2]
y = res_y + ref_pts_tiled[:, :, :, 1]
elif ndims == 2: # rcnn
K = tf.shape(bin_x)[1]
if isinstance(ref_theta, tf.Tensor):
# rotate along y
all_rys = ref_theta
ry_sin = tf.sin(all_rys)
ry_cos = tf.cos(all_rys)
rot_mats = tf.stack(
[
tf.stack([ry_cos, ry_sin], axis=1),
tf.stack([-ry_sin, ry_cos], axis=1),
],
axis=2,
)
rot_mats = tf.tile(tf.expand_dims(rot_mats, 1), [1, K, 1, 1])
dxz_rot = tf.matmul(
rot_mats,
tf.expand_dims(tf.stack([dx, dz], axis=2), axis=2),
transpose_a=True,
transpose_b=True,
)
dxz_rot = tf.squeeze(tf.matrix_transpose(dxz_rot), axis=2)
dx = dxz_rot[:, :, 0]
dz = dxz_rot[:, :, 1]
else:
assert ref_theta == 0
ref_pts_tiled = tf.tile(tf.expand_dims(ref_pts, axis=1), [1, K, 1])
x = dx + ref_pts_tiled[:, :, 0]
z = dz + ref_pts_tiled[:, :, 2]
y = res_y + ref_pts_tiled[:, :, 1]
ref_theta = tf.tile(tf.expand_dims(ref_theta, axis=1), [1, K])
theta = (
ref_theta
+ (tf.to_float(bin_theta) + 0.5) * DELTA_THETA
- R
+ res_theta_norm * 0.5 * DELTA_THETA
)
size = mean_sizes + res_size_norm * mean_sizes
if ndims == 3:
l = size[:, :, :, 0]
w = size[:, :, :, 1]
h = size[:, :, :, 2]
# combine all
boxes_3d = tf.stack([x, y, z, l, w, h, theta], axis=3) # y+h/2
elif ndims == 2:
l = size[:, :, 0]
w = size[:, :, 1]
h = size[:, :, 2]
# combine all
boxes_3d = tf.stack([x, y, z, l, w, h, theta], axis=2) # y+h/2
return boxes_3d
| 16,326
|
def _histogram_discretize(target, num_bins=gin.REQUIRED):
"""Discretization based on histograms."""
discretized = np.zeros_like(target)
for i in range(target.shape[0]):
discretized[i, :] = np.digitize(target[i, :], np.histogram(
target[i, :], num_bins)[1][:-1])
return discretized
| 16,327
|
def apply_acl(instance, content):
"""Apply ACLs."""
any_acl_applied = False
if not isinstance(instance, roleable.Roleable):
return any_acl_applied
instance_acl_dict = {(l.ac_role_id, p.id): l
for p, l in instance.access_control_list}
person_ids = set()
for role_id, data in content.get("access_control_list", {}).iteritems():
person_ids |= {i["id"] for i in data["added"] + data["deleted"]}
person_dict = {p.id: p for p in all_models.Person.query.filter(
all_models.Person.id.in_(person_ids))
}
acr_dict = {r.id: r for r in ACR.get_ac_roles_for(instance.type).values()}
for role_id, data in content.get("access_control_list", {}).iteritems():
role_id = int(role_id)
if role_id not in acr_dict:
continue
for add in data["added"]:
if (role_id, add["id"]) not in instance_acl_dict:
instance.add_person_with_role_id(person_dict[add["id"]], role_id)
any_acl_applied = True
for delete in data["deleted"]:
if (role_id, delete["id"]) in instance_acl_dict:
instance.acr_id_acl_map[role_id].remove_person(
person_dict[delete["id"]]
)
any_acl_applied = True
return any_acl_applied
| 16,328
|
def serialize(results):
"""Serialize a ``QueryDict`` into json."""
serialized = {}
for result in results:
serialized.update(result.to_dict())
return json.dumps(serialized, indent=4)
| 16,329
|
def allowed_once (cave, visited):
"""Only allows small caves to be visited once. Returns False if `cave`
is small and already in `visited`.
"""
return big(cave) or (small(cave) and cave not in visited)
| 16,330
|
def isna(obj: Literal["CAT0"]):
"""
usage.dask: 2
"""
...
| 16,331
|
def _serialize_examstruct(exam):
""" Serialize the exam structure for, eg. cache.
The dates, especially, need work before JSON
"""
assert isinstance(exam, dict)
date_fmt = '%Y-%m-%d %H:%M:%S'
assert isinstance(exam['start'], datetime.datetime)
assert isinstance(exam['end'], datetime.datetime)
safe = exam.copy()
safe['start'] = exam['start'].strftime(date_fmt)
safe['end'] = exam['end'].strftime(date_fmt)
return json.dumps(safe)
| 16,332
|
def _analysis_test_impl(ctx):
"""Implementation function for analysis_test. """
_ignore = [ctx]
return [AnalysisTestResultInfo(
success = True,
message = "All targets succeeded analysis",
)]
| 16,333
|
def main(args):
"""
Main function for the script
:param args: parsed command line arguments
:return: None
"""
from config import cfg as opt
opt.merge_from_file(args.config)
opt.freeze()
print("Creating generator object ...")
# create the generator object
gen = Generator(resolution=opt.dataset.resolution,
num_channels=opt.dataset.channels,
structure=opt.structure,
**opt.model.gen)
print("Loading the generator weights from:", args.generator_file)
# load the weights into it
gen.load_state_dict(torch.load(args.generator_file))
# path for saving the files:
save_path = args.output_dir
os.makedirs(save_path, exist_ok=True)
latent_size = opt.model.gen.latent_size
out_depth = int(np.log2(opt.dataset.resolution)) - 2
print("Generating scale synchronized images ...")
# generate the images:
with torch.no_grad():
point = torch.randn(args.n_row * args.n_col, latent_size)
point = (point / point.norm()) * (latent_size ** 0.5)
ss_image = gen(point, depth=out_depth, alpha=1)
# color adjust the generated image:
ss_image = adjust_dynamic_range(ss_image)
# save the ss_image in the directory
save_image(ss_image, os.path.join(save_path, "grid.png"), nrow=args.n_row,
normalize=True, scale_each=True, pad_value=128, padding=1)
print('Done.')
| 16,334
|
def filename_to_scienceurl(filename, suffix=None, source="irsa", verbose=False, check_suffix=True):
"""
"""
_, filefracday, paddedfield, filtercode, ccd_, imgtypecode, qid_, *suffix_ = os.path.basename(filename).split("_")
suffix_ = "_".join(suffix_)
year,month, day, fracday = filefrac_to_year_monthday_fracday(filefracday)
paddedccdid = ccd_.replace("c","")
qid = qid_.replace("q","")
if suffix is None:
suffix = suffix_
return science_path(year, month, day, fracday,
paddedfield, filtercode,
paddedccdid, qid, # added in input
imgtypecode=imgtypecode, suffix=suffix,
source=source, verbose=verbose,
check_suffix=check_suffix)
| 16,335
|
def CD_Joint(CD_J_AS = None,
Ypred = None,
beta = None,
zeta = None,
active_set = None,
lam = None,
P = None,
P_interaction = None,
Y = None,
B = None,
B_interaction = None,
S = None,
S_interaction = None,
I = None,
interaction_terms = None,
r = None,
max_iter = None,
tol = 1e-4,
full_set = None,
MaxSuppSize_main = None,
MaxSuppSize_interaction = None,
verbose = False,
path = None):
"""Cyclic Block Coordinate Descent over the full set of main/interaction effects.
Args:
CD_J_AS: a callable function that optimizes over a reduced set of main effects, callable.
Ypred: numpy array of shape (N, ).
beta: coefficients for main/interaction effects, 2 lists of arrays of shapes [ [(Ki+1, 1), ...], [(Kij+1, 1), ...]]
zeta: binary vector to track which main effects are in the active set, 2 bool arrays of shape [(1, d), (1, Imax)]
active_set: indices of main effects to optimize over, a numpy int array.
lam: regularization parameters [lam_1, lam_2], list of floats.
P: B^T*B + 2*N*(lam_1*S_i + eps*I) matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...].
eps is a small epsilon for numerical stability.
P_interaction: B^T*B + 2*N*(lam_1*S_ij + eps*I) matrices for main effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...].
eps is a small epsilon for numerical stability.
Y: training target responses, a float numpy array of shape (N,).
B: B-spline transformed sparse matrices for main effects, list of sparse matrices of shapes [(N, Ki+1), ...].
B_interaction: B-spline transformed sparse matrices for interaction effects, list of sparse matrices of shapes [(N, Kij+1), ...].
S: Smoothness matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...].
S_interaction: Smoothness matrices for interaction effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...].
I: number of possible main/interaction effects, int scalers.
interaction_terms: list of interaction effects to consider if only a subset need to be considered,
a 2D numpy array of of shape (Imax, 2).
r: relative scaling factor for L0 penalty between main and interaction effects.
We consider r=1.0 (corresponds to alpha symbol in the paper), float scaler.
max_iter: maximum number of Cyclic BCD on the active set, int scaler.
tol: relative loss termination criteria for stopping, a float scalar.
full_set: indices of all main effects, a numpy int array.
main_terms: list of main effects to consider if only a subset need to be considered,
not supported yet.
MaxSuppSize_main: Stop L0 regularization if the active set of main effects is larger than the MaxSuppSize_main
and move to next smoothing lambda setting and start L0 regularization, int scaler.
MaxSuppSize_interaction: Stop L0 regularization if the active set of interaction effects is larger than the MaxSuppSize_interaction
and move to next smoothing lambda setting and start L0 regularization, int scaler.
verbose: for printing optimization steps, bool scaler.
path: for logging, str.
Returns:
Ypred: Updated prediction, numpy array of shape (N, ).
beta: Updated coefficients for main effects, list of arrays of shapes [(Ki+1, 1), ...].
zeta: Updated binary vector to track which main effects are in the active set, a bool array of shape (1, d).
delta: Updated coefficients for interaction effects, list of arrays of shapes [(Kij+1, 1), ...].
alpha: Updated binary vector to track which interaction effects are in the active set, a bool array of shape (1, Imax).
active_set: Updated indices of nonzero main effects, a numpy int array.
active_interaction_set: Updated indices of nonzero interaction effects, a numpy int array.
MaxSuppSize_flag: indicates Maximum Support size is reached, bool scaler.
"""
N = Y.shape[0]
delta = beta[1]
beta = beta[0]
alpha = zeta[1]
zeta = zeta[0]
active_interaction_set = active_set[1]
active_set = active_set[0]
full_interaction_set = full_set[1]
full_set = full_set[0]
Bspam = B
Bspam_interaction = B_interaction
Pspam = P
Pspam_interaction = P_interaction
d = I[0]
dinteraction = I[1]
MaxSuppSize_flag = 0
eps = 1e-8
warnings.filterwarnings("error")
res = Y-Ypred
beta_p = [(P.solve((B.transpose()).dot(res))).reshape(-1,1) for B, P in zip(Bspam, Pspam)]
res_p = np.array([np.linalg.norm(res-B.dot(bp)) for B, bp in zip(Bspam, beta_p)])
active_set = np.arange(d)
# if active_set is None:
# A = int(np.ceil(0.1*d))
# active_set = res_p.argsort()[:A]
# else:
# A = np.minimum(np.maximum(int(np.ceil(0.2*len(active_set))),10), 50)
# active_set = np.union1d(active_set, res_p.argsort()[:A])
res = Y-Ypred
delta_p = [(P.solve((B.transpose()).dot(res))).reshape(-1,1) for B, P in zip(Bspam_interaction, Pspam_interaction)]
res_p = np.array([np.linalg.norm(res-B.dot(dp)) for B, dp in zip(Bspam_interaction, delta_p)])
if active_interaction_set is None:
A = int(np.ceil(0.01*dinteraction))
active_interaction_set = res_p.argsort()[:A]
else:
A = np.minimum(np.maximum(int(np.ceil(0.2*len(active_interaction_set))),10), 50)
active_interaction_set = np.union1d(active_interaction_set, res_p.argsort()[:A])
'''
Coordinate Descent over full set
'''
for it in range(max_iter):
Ypred, beta, zeta, delta, alpha = CD_J_AS(Ypred = Ypred,
beta = [beta, delta],
zeta = [zeta, alpha],
active_set = [active_set, active_interaction_set],
lam = [lam[0], lam[1]],
P = Pspam,
P_interaction = Pspam_interaction)
active_set = np.where(zeta[0,:] == 1)[0]
active_interaction_set = np.where(alpha[0,:] == 1)[0]
if (len(np.where(zeta[0,:] == 1)[0]) > MaxSuppSize_main) or (len(np.where(alpha[0,:] == 1)[0]) > MaxSuppSize_interaction):
MaxSuppSize_flag = 1
break
J = 0.5*mean_squared_error(Y, Ypred)+\
lam[0]*sum([(np.transpose(beta[k])).dot(S[k].dot(beta[k]))[0,0] for k in active_set])+\
lam[0]*sum([(np.transpose(delta[k])).dot(S_interaction[k].dot(delta[k]))[0,0] for k in active_interaction_set])+\
eps*sum([np.dot(beta[k][:,0],beta[k][:,0]) for k in active_set])+\
eps*sum([np.dot(delta[k][:,0],delta[k][:,0]) for k in active_interaction_set])+\
lam[1]*(np.count_nonzero(zeta[0,:]))+\
r*lam[1]*(np.count_nonzero(alpha[0,:]))
if verbose == True:
display(Math(r'Iteration: {}, Obj: {:.0f}, '.format(it, J)+', \sum_{j \in S^c} z_j: '+'{} \leq {}.'.format(np.count_nonzero(zeta[0,:]), len(active_set))+'\sum_{ij \in S^c} z_{ij}: '+'{} \leq {}.'.format(np.count_nonzero(alpha[0,:]),len(active_interaction_set))))
for j in [x for x in full_set if x not in active_set]:
if zeta[0,j]==1:
Ypred -= Bspam[j].dot(beta[j])
res = Y-Ypred
beta[j], zeta[:,j] = utilities.solve(B=Bspam[j], P=Pspam[j], y=res, beta=beta[j], S=S[j], lam=[lam[0], lam[1]])
if zeta[0,j]==1:
Ypred += Bspam[j].dot(beta[j])
for j in [x for x in full_interaction_set if x not in active_interaction_set]:
if alpha[0,j]==1:
Ypred -= Bspam_interaction[j].dot(delta[j])
res = Y-Ypred
delta[j], alpha[:,j] = utilities.solve(B=Bspam_interaction[j], P=Pspam_interaction[j], y=res, beta=delta[j], S=S_interaction[j], lam=[lam[0], r*lam[1]])
if alpha[0,j]==1:
Ypred += Bspam_interaction[j].dot(delta[j])
if np.count_nonzero(zeta[0,:])==active_set.shape[0] and np.count_nonzero(alpha[0,:])==active_interaction_set.shape[0]:
if np.sum(sorted(active_set) == np.where(zeta[0,:] == 1)[0])==active_set.shape[0] and np.sum(sorted(active_interaction_set) == np.where(alpha[0,:] == 1)[0])==active_interaction_set.shape[0]:
#print('Active set converged')
active_set = np.where(zeta[0,:] == 1)[0]
active_interaction_set = np.where(alpha[0,:] == 1)[0]
break
active_set = np.where(zeta[0,:] == 1)[0]
active_interaction_set = np.where(alpha[0,:] == 1)[0]
# for i in active_set:
# Pspam[i] = sp.linalg.splu((Bspam[i].transpose()).dot(Bspam[i])+2*N*(lam[0]*S[i]+eps*sp.csr_matrix(np.identity(Bspam[i].shape[1]))))
# for i in active_interaction_set:
# Pspam_interaction[i] = sp.linalg.splu((Bspam_interaction[i].transpose()).dot(Bspam_interaction[i])+2*N*(lam[0]*S_interaction[i]+eps*sp.csr_matrix(np.identity(Bspam_interaction[i].shape[1]))))
if(it == max_iter-1):
with open(path+'/Warning.txt', "a") as f:
f.write('Warning: CD over full set did not converge within the chosen max_iter!')
f.write('\lambda_1: {:.7f},\lambda_2: {:.7f}'.format(lam[0], lam[1]))
return Ypred, beta, zeta, delta, alpha, active_set, active_interaction_set, MaxSuppSize_flag
| 16,336
|
def testAllCallbacksSmokeTest(
args_count: int, type_checker: TypeCheckerFixture
) -> None:
"""
Parametrized test to do basic checking over all Callbacks (except Callback0).
We generate functions with too much arguments, too few, and correct number, and check
that the errors are as expected.
This should be enough to catch copy/paste errors when declaring the
Callback overloads.
"""
def gen_signature_and_args(count: int) -> Tuple[str, str, str]:
# Generates "v1: int, v2: int" etc
signature = ", ".join(f"v{i}: int" for i in range(count))
# Generates "10, 20" etc
args = ", ".join(f"{i+1}0" for i in range(count))
# Generates "int, int" etc
types = ", ".join("int" for _ in range(count))
return signature, args, types
sig_too_few, args_too_few, types_too_few = gen_signature_and_args(args_count - 1)
sig_too_many, args_too_many, types_too_many = gen_signature_and_args(args_count + 1)
sig_ok, args_ok, types_ok = gen_signature_and_args(args_count)
type_checker.make_file(
f"""
from oop_ext.foundation.callback import Callback{args_count}
c = Callback{args_count}[{types_ok}]()
def too_few_func({sig_too_few}) -> None: ...
c.Register(too_few_func)
c({args_too_few})
def too_many_func({sig_too_many}) -> None: ...
c.Register(too_many_func)
c({args_too_many})
def ok_func({sig_ok}) -> None: ...
c.Register(ok_func)
c({args_ok})
"""
)
result = type_checker.run()
result.assert_errors(
[
"has incompatible type",
"Missing positional argument",
"has incompatible type",
"Too many arguments",
]
)
| 16,337
|
def transform_unnamed_cols_range(df: pd.DataFrame, columns_range: range,
new_column_name_prefix: str, inplace=False) -> object:
"""
This function transforms a range of columns based assuming the presence of following schema in dataframe:
|base_column_name|Unnamed_n|Unnamed_n+1|Unnamed_n+2|---
|option_1 |NaN |NaN |NaN |---
|----------------|NaN |option_3 |NaN |---
|----------------|option_2 |NaN |NaN |---
|----------------|NaN |NaN |option_4 |---
Without a precise order, only one cell will be checked as "option_x"
and that the following schema will be given as output:
|base_column_name_option_1|base_column_name_option_2 |base_column_name_option_3|base_column_name_option_4|---
Also, it will replace cell values from this columns with binary data (1, 0) according to the presence or not
of the corresponding categorical value.
:param df: input dataframe to be processed
:param columns_range: range of columns from input dataframe to be transformed
:param new_column_name_prefix: new column_name to be added as base_name to rename map
:param inplace: If False, return a copy. Otherwise, do operation inplace and return None.
:return: input dataframe with Unnamed columns dropped and string values transformed to binary values (0,1)
"""
# extracting columns of interest
df_target_columns = df.iloc[:, columns_range]
return _even_out_categorical_as_binaries(df, df_target_columns.columns,
new_column_name_prefix=new_column_name_prefix, inplace=inplace)
| 16,338
|
def shingles(tokens, n):
"""
Return n-sized shingles from a list of tokens.
>>> assert list(shingles([1, 2, 3, 4], 2)) == [(1, 2), (2, 3), (3, 4)]
"""
return zip(*[tokens[i:-n + i + 1 or None] for i in range(n)])
| 16,339
|
def load_json(filename):
"""Load JSON file as dict."""
with open(join(dirname(__file__), filename), "rb") as fp:
return json.load(fp)
| 16,340
|
def getLayerList(layer_list, criterionFn):
"""Returns a list of all of the layers in the stack that match the given criterion function, including substacks."""
matching_layer = []
for layer in layer_list:
if criterionFn(layer):
matching_layer.append(layer)
if hasattr(layer, 'layerStack'):
matching_layer.extend(getLayerList(layer.layerStack().layerList(), criterionFn))
if layer.hasMaskStack():
matching_layer.extend(getLayerList(layer.maskStack().layerList(), criterionFn))
if hasattr(layer, 'hasAdjustmentStack') and layer.hasAdjustmentStack():
matching_layer.extend(getLayerList(layer.adjustmentStack().layerList(), criterionFn))
return matching_layer
| 16,341
|
def getBiLinearMap(edge0, edge1, edge2, edge3):
"""Get the UV coordinates on a square defined from spacing on the edges"""
if len(edge0) != len(edge1):
raise ValueError("getBiLinearMap: The len of edge0 and edge1 are not the same")
if len(edge2) != len(edge3):
raise ValueError("getBiLinearMap: The len of edge2 and edge3 are no the same")
N = len(edge0)
M = len(edge2)
UV = np.zeros((N, M, 2))
UV[:, 0, 0] = edge0
UV[:, 0, 1] = 0.0
UV[:, -1, 0] = edge1
UV[:, -1, 1] = 1.0
UV[0, :, 0] = 0.0
UV[0, :, 1] = edge2
UV[-1, :, 0] = 1.0
UV[-1, :, 1] = edge3
for i in range(1, N - 1):
x1 = edge0[i]
y1 = 0.0
x2 = edge1[i]
y2 = 1.0
for j in range(1, M - 1):
x3 = 0
y3 = edge2[j]
x4 = 1.0
y4 = edge3[j]
UV[i, j] = calcIntersection(x1, y1, x2, y2, x3, y3, x4, y4)
return UV
| 16,342
|
def _parse_args():
"""parse arguments"""
parser = argparse.ArgumentParser(description='train and export wdsr on modelarts')
# train output path
parser.add_argument('--train_url', type=str, default='', help='where training log and ckpts saved')
# dataset dir
parser.add_argument('--data_url', type=str, default='', help='where datasets located')
# train config
parser.add_argument('--data_train', type=str, default='DIV2K', help='train dataset name')
parser.add_argument('--device_target', type=str, default='Ascend', help='target device to run')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=16, help='input batch size for training')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--init_loss_scale', type=float, default=65536., help='scaling factor')
parser.add_argument('--loss_scale', type=float, default=1024.0, help='loss_scale')
parser.add_argument('--scale', type=str, default='2+3+4', help='super resolution scale')
parser.add_argument('--ckpt_save_path', type=str, default='ckpt', help='path to save ckpt')
parser.add_argument('--ckpt_save_interval', type=int, default=10, help='save ckpt frequency, unit is epoch')
parser.add_argument('--ckpt_save_max', type=int, default=5, help='max number of saved ckpt')
parser.add_argument('--task_id', type=int, default=0)
# export config
parser.add_argument("--export_batch_size", type=int, default=1, help="batch size")
parser.add_argument("--export_file_name", type=str, default="wdsr", help="output file name.")
parser.add_argument("--export_file_format", type=str, default="AIR",
choices=['MINDIR', 'AIR', 'ONNX'], help="file format")
args, _ = parser.parse_known_args()
return args
| 16,343
|
def listkeys():
""" Show stored keys
"""
stm = shared_steem_instance()
t = PrettyTable(["Available Key"])
t.align = "l"
for key in stm.wallet.getPublicKeys():
t.add_row([key])
print(t)
| 16,344
|
def missing_frame_features(files, pipeline: PipelineContext):
"""Get file paths with missing frame-level features."""
frame_features = pipeline.repr_storage.frame_level
for i, file_path in enumerate(files):
if not frame_features.exists(pipeline.reprkey(file_path)):
yield file_path
| 16,345
|
def _parse_orientation(response: HtmlResponse):
"""Parse Orientation.
Returns None if not available or is unknown.
"""
value = response.css('th:contains("Ausrichtung") + td ::text').get()
if value:
if value == "unbekannt" or value == "verschieden":
return None
fk_value = {
"Nord": "N",
"Nordost": "NO",
"Ost": "O",
"Sรผdost": "SO",
"Sรผd": "S",
"Sรผdwest": "SW",
"West": "W",
"Nordwest": "NW",
}
return Orientation.objects.get(name=fk_value[value])
else:
return None
| 16,346
|
def binaryread(file, vartype, shape=(1,), charlen=16):
"""
Uses numpy to read from binary file. This was found to be faster than the
struct approach and is used as the default.
"""
# read a string variable of length charlen
if vartype == str:
result = file.read(charlen * 1)
else:
# find the number of values
nval = np.prod(shape)
result = np.fromfile(file, vartype, nval)
if nval == 1:
result = result # [0]
else:
result = np.reshape(result, shape)
return result
| 16,347
|
def subsample_data(neuron_data, sample_size = 10000):
"""
Acquires a subsample of the Neuron dataset.
This function samples a set of neurons without replacement.
Params
-----------
Returns
-----------
rand_ix (array-like):
Array containing the chosen indices
sample_neurons (array-like ):
Array with shape (sample_size, neuron_data.shape[1])
containing a subset of the neuron traces.
"""
# Get random indices sampling without replacement
rand_ix = np.random.choice(
np.arange(neuron_data.shape[0]), size= sample_size, replace=False
)
# Get subsample by choosing indices along rows
sample_neurons = neuron_data[rand_ix, :]
return rand_ix, sample_neurons
| 16,348
|
def appGet(*args, **kwargs):
"""
.. deprecated:: 0.42.0
Use :func:`app_get()` instead.
"""
print("dxpy.appGet is deprecated; please use app_get instead.", file=sys.stderr)
return app_get(*args, **kwargs)
| 16,349
|
def connect_db():
"""Connects to the specific database."""
mongo = MongoClient(DATABASE_URL,replicaset=MONGO_REPLICASET)
#if COLLECTION_NAME in mongo[DATABASE_NAME].collection_names():
collection = mongo[DATABASE_NAME][COLLECTION_NAME]
#else:
# mongo[DATABASE_NAME].create_collection(COLLECTION_NAME)
# collection = mongo[DATABASE_NAME][COLLECTION_NAME]
# collection.createIndex( { "timestamp": 1 }, { 'unique': True } )
return collection
| 16,350
|
def save_params(network, epoch_i, base_save_path="./checkpoints"):
"""
Save current params of the network.
Args:
network: the lagrangian network.
epoch_i: the current training epoch.
base_save_path: base save path for saving the parameters.
"""
save_path = Path(base_save_path + "/network_params_e" + str(epoch_i))
if not save_path.exists():
save_path.mkdir(parents=True, exist_ok=True)
network.save(save_path)
| 16,351
|
def transient(func):
"""
decorator to make a function execution transient.
meaning that before starting the execution of the function, a new session with a
new transaction will be started, and after the completion of that function, the
new transaction will be rolled back without the consideration or affecting the
parent transaction which by default is scoped to request. the corresponding new
session will also be removed after function execution.
note that you *should not* commit, flush or rollback anything inside a transient
function, the `@transient` decorator will handle rollback operation when needed.
otherwise, unexpected behaviors may occur.
also note that you *should not* remove the corresponding session from session factory
when using `@transient` decorator. the removal operation will be handled by decorator
itself and if you remove session manually, it will cause broken chain of sessions
and unexpected behaviour.
this decorator also supports multiple `@transient` usage in a single call hierarchy.
for example:
def service_root():
store = get_current_store()
value = EntityRoot()
store.add(value)
service_a()
@atomic
def service_a():
store = get_current_store()
value = EntityA()
store.add(value)
service_b()
@transient
def service_b():
store = get_current_store()
value = EntityB()
store.add(value)
service_c()
@transient
def service_c():
value = EntityC()
value.save()
in the above example, if the call hierarchy starts with `service_root()`, at
the end, the data of `service_root` and `service_a` will be persisted into database.
but the data of `service_b` and `service_c` will not be persisted because they are
decorated as transient.
:param function func: function.
:returns: function result.
"""
def decorator(*args, **kwargs):
"""
decorates the given function and makes its execution transient.
:param object args: function arguments.
:param object kwargs: function keyword arguments.
:returns: function result.
"""
store = database_services.get_atomic_store()
try:
result = func(*args, **kwargs)
return result
finally:
store.rollback()
factory = database_services.get_current_session_factory()
factory.remove(atomic=True)
return update_wrapper(decorator, func)
| 16,352
|
def make_mapping(environ, start_response):
"""
Establishing a mapping, storing the provided URI
as a field on a tiddler in the PRIVATEER bag.
Accepted data is either a json dictory with a uri
key or a POST CGI form with a uri query paramter.
Respond with a location header containing the uri
of the mapping.
"""
uri = None
try:
content_type = environ['tiddlyweb.type']
except KeyError:
content_type = None
if content_type == 'application/json':
try:
length = environ['CONTENT_LENGTH']
content = environ['wsgi.input'].read(int(length))
data = simplejson.loads(content)
uri = data['uri']
except (KeyError, IOError, simplejson.JSONDecodeError), exc:
raise HTTP400('Unable to parse input: %s' % exc)
else:
try:
uri = environ['tiddlyweb.query']['uri'][0]
except (KeyError, IndexError), exc:
raise HTTP400('Unable to parse input: %s' % exc)
if uri:
title_uuid = _make_mapping_tiddler(environ, uri)
else:
raise HTTP400('No uri for mapping provided')
start_response('201 Created', [
('Location', _mapping_uri(environ, title_uuid))])
return []
| 16,353
|
def test_aws_binary_file(host):
"""
Tests if aws binary is a file type.
"""
assert host.file(PACKAGE_BINARY).is_file
| 16,354
|
def create_space_magnitude_region(region, magnitudes):
"""Simple wrapper to create space-magnitude region """
if not (isinstance(region, CartesianGrid2D) or isinstance(region, QuadtreeGrid2D)) :
raise TypeError("region must be CartesianGrid2D")
# bind to region class
if magnitudes is None:
raise ValueError("magnitudes should not be None if creating space-magnitude region.")
region.magnitudes = magnitudes
region.num_mag_bins = len(region.magnitudes)
return region
| 16,355
|
def autocorrelation_plot(series, label, lower_lim=1, n_samples=None, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(lower_lim, n_samples), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) *
(data[h:] - mean)).sum() / float(n) / c0
x = (np.arange(n) + 1).astype(int)
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
if n_samples:
ax.plot(x[:n_samples], y[:n_samples], label=label, **kwds)
else:
ax.plot(x, y, label=label, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
| 16,356
|
def process_input_using_awk(inputf, outputf):
"""
Square the value read from inputf and save in outputf
:param inputf: file to read number from
:param outputf: file to save squere to
:return: void
"""
cmd = "sleep 5;\n awk '{print $1*$1}' " + \
("{} > {};\n").format(inputf, outputf)
script_dir = os.path.join(os.path.dirname(inputf), 'drmaa')
print(outputf)
#run_job(cmd, run_locally=True)
run_slurm_job(cmd, script_dir)
| 16,357
|
def get_productivity(coin_endowments):
"""Returns the total coin inside the simulated economy.
Args:
coin_endowments (ndarray): The array of coin endowments for each of the
agents in the simulated economy.
Returns:
Total coin endowment (float).
"""
return np.sum(coin_endowments)
| 16,358
|
def prefix_attrs(source, keys, prefix):
"""Rename some of the keys of a dictionary by adding a prefix.
Parameters
----------
source : dict
Source dictionary, for example data attributes.
keys : sequence
Names of keys to prefix.
prefix : str
Prefix to prepend to keys.
Returns
-------
dict
Dictionary of attributes with some keys prefixed.
"""
out = {}
for key, val in source.items():
if key in keys:
out[f"{prefix}{key}"] = val
else:
out[key] = val
return out
| 16,359
|
def add_polygon_to_image(image: np.ndarray, object: dict) -> np.ndarray:
""" Add the polynom of the given object to the image.
Since using CV, order is (B,G,R)
Parameters
----------
img : np.ndarray
Opencv image
object : dict
Dictionary of the polynom with meta infos.
Returns
-------
np.ndarray
image with the polynom in (B,G,R)
"""
x_points = object['points']['x_points']
y_points = object['points']['y_points']
pts = np.array([a for a in zip(x_points, y_points)])
pts = pts[:, None, :]
logging.debug(f'Polylines with shape {pts.shape} : {pts}')
cv.polylines(image, [pts], True, (0, 0, 255), 5)
return image
| 16,360
|
def add(coefficient_1, value_1, coefficient_2, value_2):
"""Provides an addition algebra for various types, including scalars and
histogram objects.
Incoming values are not modified.
Args:
coefficient_1: The first coefficient, a scalar
value_1: The first value, a histogram or scalar
coefficient_2: The second coefficient, a scalar
value_2: The second value, a histogram or scalar
Returns:
The value of the expression:
((coefficient_1 * value_1) + (coefficient_2 * value_2))
"""
# Verify that the incoming types match
if type(value_1) != type(value_2):
raise ValueError('values must be of the same type')
# Handle based on type
if isinstance(value_1, TH1):
# Create the result
result = value_1.Clone(uuid4().hex)
# Add the histograms
result.Add(value_1, value_2, coefficient_1, coefficient_2)
else:
# Create the result
result = ((coefficient_1 * value_1) + (coefficient_2 * value_2))
# All done
return result
| 16,361
|
def kotlin_object_type_summary(lldb_val, internal_dict = {}):
"""Hook that is run by lldb to display a Kotlin object."""
start = time.monotonic()
log(lambda: f"kotlin_object_type_summary({lldb_val.unsigned:#x}: {lldb_val.GetTypeName()})")
fallback = lldb_val.GetValue()
if lldb_val.GetTypeName() != "ObjHeader *":
if lldb_val.GetValue() is None:
bench(start, lambda: "kotlin_object_type_summary:({:#x}) = NULL".format(lldb_val.unsigned))
return NULL
bench(start, lambda: "kotlin_object_type_summary:({:#x}) = {}".format(lldb_val.unsigned, lldb_val.signed))
return lldb_val.value
if lldb_val.unsigned == 0:
bench(start, lambda: "kotlin_object_type_summary:({:#x}) = NULL".format(lldb_val.unsigned))
return NULL
tip = internal_dict["type_info"] if "type_info" in internal_dict.keys() else type_info(lldb_val)
if not tip:
bench(start, lambda: "kotlin_object_type_summary:({0:#x}) = falback:{0:#x}".format(lldb_val.unsigned))
return fallback
value = select_provider(lldb_val, tip, internal_dict)
bench(start, lambda: "kotlin_object_type_summary:({:#x}) = value:{:#x}".format(lldb_val.unsigned, value._valobj.unsigned))
start = time.monotonic()
str0 = value.to_short_string()
bench(start, lambda: "kotlin_object_type_summary:({:#x}) = str:'{}...'".format(lldb_val.unsigned, str0[:3]))
return str0
| 16,362
|
def parse_arguments(root_dir):
""" Will parse the command line arguments arnd return the arg object.
"""
# Create top level parser. TODO: add description, usage, etc
parser = argparse.ArgumentParser(prog="aware.py",
description="Probabilistic demultiplexer for Illumina bcl files. Works "
"with single or dual-indexed reads, and single or pair-"
"end reads. (github.com/edm1/aware-demultiplexer)",
epilog="Enter sub-command to see specific options.",
usage="pypy3 aware.py [-h] [-v] <subcommand> [options]")
subparsers = parser.add_subparsers(title="The aware.py sub-commands include",
prog="pypy3 aware.py",
metavar="<subcommand>")
# Create parser for the bcl2fastq (extracting reads from illumina folder)
parser_b2f = subparsers.add_parser('bcl2fastq',
description="Wrapper for picard-tools. Extracts multiplexed reads and "
"barcodes from Illumina bcl files.",
help="Extracts multiplexed reads and barcodes from Illumina bcl files.")
# Required positional arguments
parser_b2f.add_argument('baseCallDir', metavar='<baseCallDir>', type=str,
help='Directory containing base call intensitites')
parser_b2f.add_argument('runParamXML', metavar='<runParameters.xml>',
type=str, help='runParameters.xml file')
parser_b2f.add_argument('lane', metavar='<lane>',
type=int, help='Lane number')
# Optional arguments
parser_b2f.add_argument('--outDir', '-o', metavar='<str>', type=str,
default=os.path.join(root_dir, "output"),
help='Location to create output files. (output)')
parser_b2f.add_argument('--numCPU', '-p', metavar='<int>', type=int,
default=1, help='Number of CPUs to use. (1)')
parser_b2f.add_argument('--readsPerTile', '-r', metavar='<int>', type=int,
default=120000, help=('Max number of reads in RAM per tile, reduce if '
'you have problems with memory. (120000)'))
parser_b2f.add_argument('--MaxInRam', '-m', metavar='<int>', type=int,
default=500000, help=('Maximum number of records that are stored in the'
' RAM. (500000)'))
parser_b2f.add_argument('--JavaRAM', '-mem', metavar='<int>', type=int,
default=2, help='Amount of RAM (GB) allocated to the Java heap. (2)')
parser_b2f.add_argument('--PicardJar', '-jar', metavar='<path>',
type=str, default=os.path.join(root_dir, 'libs/picard.jar'),
help='Location of picard.jar (libs/picard.jar)')
# Add function to call if selected
parser_b2f.set_defaults(func=basecalls2fastq.run)
# Create parser for the demultiplexer
parser_demux = subparsers.add_parser('demux',
description="Demultiplexes multiplexed fastqs that are extracted "
"by sub-command bcl2fastq.",
help="Demultiplex the fastqs extracted by bcl2fastq using indexes "
"provided in sampleSheet.csv.")
# Required positional args
parser_demux.add_argument('inDir', metavar='<inDir>',
type=str, help='Directory created by bcl2fastq in output folder.')
parser_demux.add_argument('sampleSheet', metavar='<SampleSheet.csv>',
type=str, help='MiSeq SampleSheet.csv file, containing index info.')
# Optional args
parser_demux.add_argument('--uniqID', '-u', metavar='<str>', type=str,
default=None, help='Unique ID to append to output folder. (None)')
# parser_demux.add_argument('--numCPU', '-p', metavar='<int>', type=int,
# default=1, help='Number of CPUs to use. (1)')
parser_demux.add_argument('--minProb', '-min', metavar='<float>',
type=float, default=0.05, help=('Minimum probability of a match else'
' discard. (0.05)'))
parser_demux.add_argument('--phredOffset', '-s', metavar='<int>', type=int,
required=False, default=33, help='FASTQ phred score offset (33)')
parser_demux.add_argument('--indexQual', '-i', metavar='<int>', type=int,
default=30, help='Phred-score given to barcode indexes (30)')
# Add function to call if selected
parser_demux.set_defaults(func=demultiplexer.run)
# Add version number to the parser
parser.add_argument('-v', '--version', action='version', version='v1.0.3')
# Parse the arguments
args = parser.parse_args()
# Workaround for sub-parser bug (http://bugs.python.org/issue16308)
try:
a = getattr(args, "func")
except AttributeError:
parser.print_help()
sys.exit(0)
# Parse the arguments
return args
| 16,363
|
def _grep_first_pair_of_parentheses(s):
"""
Return the first matching pair of parentheses in a code string.
INPUT:
A string
OUTPUT:
A substring of the input, namely the part between the first
(outmost) matching pair of parentheses (including the
parentheses).
Parentheses between single or double quotation marks do not
count. If no matching pair of parentheses can be found, a
``SyntaxError`` is raised.
EXAMPLES::
sage: from sage.misc.sageinspect import _grep_first_pair_of_parentheses
sage: code = 'def foo(a="\'):", b=4):\n return'
sage: _grep_first_pair_of_parentheses(code)
'(a="\'):", b=4)'
sage: code = 'def foo(a="%s):", \'b=4):\n return'%("'")
sage: _grep_first_pair_of_parentheses(code)
Traceback (most recent call last):
...
SyntaxError: The given string does not contain balanced parentheses
"""
out = []
single_quote = False
double_quote = False
escaped = False
level = 0
for c in s:
if level>0:
out.append(c)
if c=='(' and not single_quote and not double_quote and not escaped:
level += 1
elif c=='"' and not single_quote and not escaped:
double_quote = not double_quote
elif c=="'" and not double_quote and not escaped:
single_quote = not single_quote
elif c==')' and not single_quote and not double_quote and not escaped:
if level == 1:
return '('+''.join(out)
level -= 1
elif c=="\\" and (single_quote or double_quote):
escaped = not escaped
else:
escaped = False
raise SyntaxError("The given string does not contain balanced parentheses")
| 16,364
|
def login():
"""
"""
url = "http://127.0.0.1:5001/rest/login"
data = {"username": "kivanc", "password": "1234"}
r = requests.post(url, json=data)
output = r.json()
return output["access_token"]
| 16,365
|
def get_all_ops(ifshortcut=True, ifse=True, strides=[1, 2, 2, 2, 1, 2, 1]):
"""Get all possible ops of current search space
Args:
ifshortcut: bool, shortcut or not
ifse: bool, se or not
strides: list, list of strides for bottlenecks
Returns:
op_params: list, a list of all possible params
"""
op_params = []
# conv1_1
op_params.append(('conv', 0, 0, 1, image_shape[0], image_shape[1],
image_shape[2], 32, 1, 3, 1, 2, 1))
op_params.append(('batch_norm', 'None', 1, 32, int(image_shape[1] / 2),
int(image_shape[2] / 2)))
op_params.append(('activation', 'relu6', 1, 32, int(image_shape[1] / 2),
int(image_shape[2] / 2)))
# bottlenecks, TODO: different h and w for images
in_c, in_shape = [32], int(image_shape[1] / 2)
for i in range(len(NAS_FILTER_SIZE) + 2):
if i == 0:
expansion, kernels, num_filters, s = [1], [3], [16], strides[i]
elif i == len(NAS_FILTER_SIZE) + 1:
expansion, kernels, num_filters, s = [6], [3], [320], strides[i]
else:
expansion, kernels, num_filters, s = NAS_FILTERS_MULTIPLIER, \
NAS_KERNEL_SIZE, \
NAS_FILTER_SIZE[i-1], \
strides[i]
# first block
tmp_ops = ops_of_inverted_residual_unit(
in_c, in_shape, expansion, kernels, num_filters, s, False, ifse)
op_params = op_params + tmp_ops
in_c, in_shape = num_filters, int(in_shape / s)
# repeated block: possibly more ops, but it is ok
tmp_ops = ops_of_inverted_residual_unit(in_c, in_shape, expansion,
kernels, num_filters, 1,
ifshortcut, ifse)
op_params = op_params + tmp_ops
# last conv
op_params.append(('conv', 0, 0, 1, 320, in_shape, in_shape, 1280, 1, 1, 0,
1, 1))
op_params.append(('batch_norm', 'None', 1, 1280, in_shape, in_shape))
op_params.append(('activation', 'relu6', 1, 1280, in_shape, in_shape))
op_params.append(('pooling', 1, 1, 1280, in_shape, in_shape, in_shape, 0, 1,
0, 3))
# fc, converted to 1x1 conv
op_params.append(('conv', 0, 0, 1, 1280, 1, 1, class_dim, 1, 1, 0, 1, 1))
op_params.append(('eltwise', 2, 1, 1000, 1, 1))
op_params.append(('softmax', -1, 1, 1000, 1, 1))
op_params.append(('eltwise', 1, 1, 1, 1, 1))
op_params.append(('eltwise', 2, 1, 1, 1, 1))
return list(set(op_params))
| 16,366
|
def evaluation_per_relation(triples: dict, model: EvaluationModel, batch_size: int = 4):
"""
:param triples: It should be a dict in form (Relation id):[(s_1,p_1,o_1)...(s_n,p_n,o_n)]
"""
# Evaluate per relation and store scores/evaluation measures
score_per_rel = dict()
for k in tqdm.tqdm(triples.keys()):
# use API to evaluate model and generate model output for error analysis
sub = torch.tensor(triples[k][:, 0]).cuda()
pra = torch.tensor(triples[k][:, 1]).cuda()
obj = torch.tensor(triples[k][:, 2]).cuda()
score_per_rel[k] = model.evaluate_only_metrics(sub, pra, obj, batch_size=batch_size)
return score_per_rel
| 16,367
|
def apply_executor(executor, path, parameters):
"""Non-interactively run a given executor."""
args = executor["input_arguments"] if "input_arguments" in executor else {}
final_parameters = set_parameters(args, parameters)
launcher = convert_launcher(executor["executor"]["name"])
command = executor["executor"]["command"]
built_command = build_command(launcher, command, final_parameters)
# begin execution with the above parameters.
execute_command(launcher, built_command, path)
| 16,368
|
def check_upload():
"""
ๅคๆญไปๅคฉ็ไปฃ็ ๆฏๅฆไธไผ
:return:
"""
ctime = datetime.date.today() # ๅฝๅๆฅๆ
data = db_helper.fetchone('select id from record where ctime = %s and user_id = %s',
(ctime, session['user_info']['id']))
return data
| 16,369
|
def get_package_object():
"""Gets a sample package for the submission in Dev Center."""
package = {
# The file name is relative to the root of the uploaded ZIP file.
"fileName" : "bin/super_dev_ctr_api_sim.appxupload",
# If you haven't begun to upload the file yet, set this value to "PendingUpload".
"fileStatus" : "PendingUpload"
}
return package
| 16,370
|
def check_for_cmd():
""" Returns tuple of [Type] [Data] where type is the shuffle type and data
will contain either random shuffle parameters or the top deck order required """
try:
with open(CMD_FILE, 'r+') as f:
data = f.readline()
f.truncate(0)
# DEBUGGIN TODO REMOVE
# data = 'HOLD,4,true,A,Diamond,Q,Heart,K,,,Diamond,A,Club,7,,,,6,Heart,9,,A,,A,,,Spade,,Spade,,,,,,,,,,,,,,,,\n'
# Clean and format data
data = data.replace('\n','')
data = data.replace('Diamond','D')
data = data.replace('Heart','H')
data = data.replace('Club','C')
data = data.replace('Spade','S')
rawdata = data.split(',')
# Process data based on shuffle type key
if rawdata[0] in SHUFFLES:
shuffletype = SHUFFLES.index(rawdata[0])
if shuffletype is 0:
return (rawdata[0], format_rand(rawdata[1:]))
elif shuffletype is 1:
return (rawdata[0], format_bjack(rawdata[1:]))
elif shuffletype is 2:
return (rawdata[0], format_holdem(rawdata[1:]))
except:
pass
return (None, None)
| 16,371
|
def _set_quantity(request, force_delete=False):
"""Set the quantity for a specific cartitem.
Checks to make sure the item is actually in the user's cart.
"""
cart = Cart.objects.from_request(request, create=False)
if isinstance(cart, NullCart):
return (False, None, None, _("No cart to update."))
cartplaces = config_value('SHOP', 'CART_PRECISION')
if force_delete:
qty = Decimal('0')
else:
try:
roundfactor = config_value('SHOP', 'CART_ROUNDING')
qty = round_decimal(request.POST.get('quantity', 0), places=cartplaces, roundfactor=roundfactor, normalize=True)
except RoundedDecimalError, P:
return (False, cart, None, _("Bad quantity."))
if qty < Decimal('0'):
qty = Decimal('0')
try:
itemid = int(request.POST.get('cartitem'))
except (TypeError, ValueError):
return (False, cart, None, _("Bad item number."))
try:
cartitem = CartItem.objects.get(pk=itemid, cart=cart)
except CartItem.DoesNotExist:
return (False, cart, None, _("No such item in your cart."))
if qty == Decimal('0'):
cartitem.delete()
cartitem = NullCartItem(itemid)
else:
from satchmo_store.shop.models import Config
config = Config.objects.get_current()
if config_value('PRODUCT','NO_STOCK_CHECKOUT') == False:
stock = cartitem.product.items_in_stock
log.debug('checking stock quantity. Have %d, need %d', stock, qty)
if stock < qty:
return (False, cart, cartitem, _("Not enough items of '%s' in stock.") % cartitem.product.translated_name())
cartitem.quantity = round_decimal(qty, places=cartplaces)
cartitem.save()
satchmo_cart_changed.send(cart, cart=cart, request=request)
return (True, cart, cartitem, "")
| 16,372
|
def load_typos_file(file_name, char_vocab = {}, filter_OOA_chars = False):
"""
Loads typos from a given file.
Optionally, filters all entries that contain out-of-alphabet characters.
"""
basename, ext = os.path.splitext(file_name)
replacement_rules = list()
if ext == ".tsv":
typos = load_typos_moe(file_name)
else:
typos = load_typos_belinkov_bisk(file_name)
if "extracted" in basename:
print("> applying replacement rules..")
replacement_rules.append((chr(172), ' '))
typos = _normalize_typos(typos, replacement_rules)
if filter_OOA_chars:
typos = _filter_typos(typos, char_vocab)
return typos
| 16,373
|
def load_config(config_file="config.yaml"):
"""Load config file to initialize fragment factories.
A config file is a Python file, loaded as a module.
Example config file:
# config.yaml
name: My LDF server
maintainer: chuck Norris <me@gmail.com>
datasets:
-
name: DBpedia-2016-04
description: DBpedia dataset, version 2016-04
backend: hdt-file
file: /home/chuck-norris/dbpedia-2016-04.hdt
-
name: Chuck-Norris-facts
description: Best Chuck Norris facts ever
backend: rdf-file
format: nt
file: /home/chuck-norris/facts.nt
"""
config = load(open(config_file))
# set page size, i.e. the number of triples per page
quota = config['quota'] if 'quota' in config else 75
max_results = config['max_results'] if 'max_results' in config else inf
config['quota'] = quota
for c in config["datasets"]:
if 'quota' not in c:
c['quota'] = quota
if 'max_results' not in c:
c['max_results'] = max_results
if 'queries' not in c:
c['queries'] = []
# build graphs
graphs = {c["name"]: Graph(c) for c in config["datasets"]}
return (config, graphs)
| 16,374
|
def run_s3_test():
"""run_s3_test
Run the S3 verification test
"""
access_key = os.getenv(
'S3_ACCESS_KEY',
'trexaccesskey')
secret_key = os.getenv(
'S3_SECRET_KEY',
'trex123321')
region_name = os.getenv(
'S3_REGION_NAME'
'us-east-1')
service_address = os.getenv(
'S3_ADDRESS',
'minio-service:9000')
filename = os.getenv(
'S3_UPLOAD_FILE',
'run-s3-test.txt')
bucket_name = os.getenv(
'S3_BUCKET',
's3-verification-tests')
bucket_key = os.getenv(
'S3_BUCKET_KEY',
's3-worked-on-{}'.format(
datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')))
secure = bool(os.getenv(
'S3_SECURE',
'0') == '1')
if len(sys.argv) > 1:
service_address = sys.argv[1]
endpoint_url = 'http://{}'.format(
service_address)
if secure:
endpoint_url = 'https://{}'.format(
service_address)
download_filename = 'download-{}'.format(
filename)
key_contents = 'tested on: {}'.format(
datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'))
if not os.path.exists(filename):
print((
'creating test file: {}').format(
filename))
with open(filename, 'w') as key_file:
key_file.write(key_contents)
print((
'connecting: {}').format(
endpoint_url))
s3 = boto3.resource(
's3',
endpoint_url=endpoint_url,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
region_name=region_name,
config=boto3.session.Config(
signature_version='s3v4')
)
# Once the client is created.
try:
print((
'checking bucket={} exists').format(
bucket_name))
if s3.Bucket(bucket_name) not in s3.buckets.all():
print((
'creating bucket={}').format(
bucket_name))
s3.create_bucket(
Bucket=bucket_name)
except Exception as e:
print((
'failed creating bucket={} with ex={}').format(
bucket_name,
e))
# end of try/ex for creating bucket
# Upload the file to S3
print((
'upload_file({}, {}, {})').format(
filename,
bucket_name,
bucket_key))
s3.Bucket(bucket_name).upload_file(filename, bucket_key)
# Download the file from S3
print((
'upload_file({}, {}, {})').format(
bucket_name,
bucket_key,
download_filename))
s3.Bucket(bucket_name).download_file(bucket_key, download_filename)
print((
'download_filename={} contents: {}').format(
download_filename,
open(download_filename).read()))
| 16,375
|
def test_2():
""" Test the backlog
"""
ann = lox.Announcement(backlog=0)
x_in = [1, 2, 3, 4, 5]
foo_soln, bar_soln = [], []
foo_q = ann.subscribe()
def foo():
x = foo_q.get()
foo_soln.append(x**2)
def bar():
x = bar_q.get()
bar_soln.append(x**3)
threads = []
for _ in x_in:
threads.append(Thread(target=foo))
threads.append(Thread(target=bar))
for x in x_in:
ann.put(x)
bar_q = ann.subscribe()
ann.finalize()
for t in threads:
t.start()
for t in threads:
t.join()
assert(len(foo_soln) == len(x_in))
assert(len(bar_soln) == len(x_in))
for x, r in zip(x_in, foo_soln):
assert(r == x**2)
for x, r in zip(x_in, bar_soln):
assert(r == x**3)
| 16,376
|
def execute_actor(actor_id,
worker_id,
execution_id,
image,
msg,
user=None,
d={},
privileged=False,
mounts=[],
leave_container=False,
fifo_host_path=None,
socket_host_path=None):
"""
Creates and runs an actor container and supervises the execution, collecting statistics about resource consumption
from the Docker daemon.
:param actor_id: the dbid of the actor; for updating worker status
:param worker_id: the worker id; also for updating worker status
:param execution_id: the id of the execution.
:param image: the actor's image; worker must have already downloaded this image to the local docker registry.
:param msg: the message being passed to the actor.
:param user: string in the form {uid}:{gid} representing the uid and gid to run the command as.
:param d: dictionary representing the environment to instantiate within the actor container.
:param privileged: whether this actor is "privileged"; i.e., its container should run in privileged mode with the
docker daemon mounted.
:param mounts: list of dictionaries representing the mounts to add; each dictionary mount should have 3 keys:
host_path, container_path and format (which should have value 'ro' or 'rw').
:param fifo_host_path: If not None, a string representing a path on the host to a FIFO used for passing binary data to the actor.
:param socket_host_path: If not None, a string representing a path on the host to a socket used for collecting results from the actor.
:return: result (dict), logs (str) - `result`: statistics about resource consumption; `logs`: output from docker logs.
"""
logger.debug("top of execute_actor()")
# initial stats object, environment, binds and volumes
result = {'cpu': 0,
'io': 0,
'runtime': 0 }
# instantiate docker client
cli = docker.APIClient(base_url=dd, version="auto")
# don't try to pass binary messages through the environment as these can cause
# broken pipe errors. the binary data will be passed through the FIFO momentarily.
if not fifo_host_path:
d['MSG'] = msg
binds = {}
volumes = []
# if container is privileged, mount the docker daemon so that additional
# containers can be started.
logger.debug("privileged: {}".format(privileged))
if privileged:
binds = {'/var/run/docker.sock':{
'bind': '/var/run/docker.sock',
'ro': False }}
volumes = ['/var/run/docker.sock']
# add a bind key and dictionary as well as a volume for each mount
for m in mounts:
binds[m.get('host_path')] = {'bind': m.get('container_path'),
'ro': m.get('format') == 'ro'}
volumes.append(m.get('host_path'))
host_config = cli.create_host_config(binds=binds, privileged=privileged)
# write binary data to FIFO if it exists:
if fifo_host_path:
try:
fifo = os.open(fifo_host_path, os.O_RDWR)
os.write(fifo, msg)
except Exception as e:
logger.error("Error writing the FIFO. Exception: {}".format(e))
os.remove(fifo_host_path)
raise DockerStartContainerError("Error writing to fifo: {}".format(e))
# set up results socket
try:
server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
server.bind(socket_host_path)
server.settimeout(RESULTS_SOCKET_TIMEOUT)
except Exception as e:
logger.error("could not instantiate or bind socket. Exception: {}".format(e))
raise e
# instantiate the results channel:
results_ch = ExecutionResultsChannel(actor_id, execution_id)
# create and start the container
logger.debug("Final container environment: {}".format(d))
logger.debug("Final binds: {} and host_config: {} for the container.".format(binds, host_config))
container = cli.create_container(image=image,
environment=d,
user=user,
volumes=volumes,
host_config=host_config)
# get the URC time stampe
start_time = get_current_utc_time()
# start the timer to track total execution time.
start = timeit.default_timer()
logger.debug("right before cli.start: {}".format(start))
try:
cli.start(container=container.get('Id'))
except Exception as e:
# if there was an error starting the container, user will need to debug
logger.info("Got exception starting actor container: {}".format(e))
raise DockerStartContainerError("Could not start container {}. Exception {}".format(container.get('Id'), str(e)))
# local bool tracking whether the actor container is still running
running = True
logger.debug("right before creating stats_cli: {}".format(timeit.default_timer()))
# create a separate cli for checking stats objects since these should be fast and we don't want to wait
stats_cli = docker.APIClient(base_url=dd, timeout=1, version="auto")
logger.debug("right after creating stats_cli: {}".format(timeit.default_timer()))
# under load, we can see UnixHTTPConnectionPool ReadTimeout's trying to create the stats_obj
# so here we are trying up to 3 times to create the stats object for a possible total of 3s
# timeouts
ct = 0
while ct < 3:
try:
stats_obj = stats_cli.stats(container=container.get('Id'), decode=True)
break
except ReadTimeout:
ct += 1
except Exception as e:
logger.error("Unexpected exception creating stats_obj. Exception: {}".format(e))
# in this case, we need to kill the container since we cannot collect stats;
running = False
logger.debug("right after creating stats_obj: {}".format(timeit.default_timer()))
while running:
datagram = None
try:
datagram = server.recv(MAX_RESULT_FRAME_SIZE)
except socket.timeout:
pass
except Exception as e:
logger.error("got exception from server.recv: {}".format(e))
logger.debug("right after try/except datagram block: {}".format(timeit.default_timer()))
if datagram:
try:
results_ch.put(datagram)
except Exception as e:
logger.error("Error trying to put datagram on results channel. Exception: {}".format(e))
logger.debug("right after results ch.put: {}".format(timeit.default_timer()))
try:
logger.debug("waiting on a stats obj: {}".format(timeit.default_timer()))
stats = next(stats_obj)
logger.debug("got the stats obj: {}".format(timeit.default_timer()))
except ReadTimeoutError:
# this is a ReadTimeoutError from docker, not requests. container is finished.
logger.debug("next(stats) just timed out: {}".format(timeit.default_timer()))
# container stopped before another stats record could be read, just ignore and move on
running = False
break
try:
result['cpu'] += stats['cpu_stats']['cpu_usage']['total_usage']
except KeyError as e:
logger.info("Got a KeyError trying to fetch the cpu object: {}".format(e))
try:
result['io'] += stats['networks']['eth0']['rx_bytes']
except KeyError as e:
logger.info("Got KeyError exception trying to grab the io object. running: {}; Exception: {}".format(running, e))
if running:
logger.debug("about to check container status: {}".format(timeit.default_timer()))
# we need to wait for the container id to be available
i = 0
while i < 10:
try:
c = cli.containers(all=True, filters={'id': container.get('Id')})[0]
break
except IndexError:
logger.error("Got an IndexError trying to get the container object.")
time.sleep(0.1)
i += 1
logger.debug("done checking status: {}; i: {}".format(timeit.default_timer(), i))
if i == 10:
logger.error("Never could retrieve the container object! container id: {}".format(container.get('Id')))
try:
cli.stop(container.get('Id'))
except Exception as e:
logger.error("Got another exception trying to stop the actor container. Exception: {}".format(e))
finally:
running = False
continue
state = c.get('State')
if not state == 'running':
logger.debug("container finished, final state: {}".format(state))
running = False
else:
# container still running; check if we are beyond the max_run_time
runtime = timeit.default_timer() - start
if max_run_time > 0 and max_run_time < runtime:
logger.info("hit runtime limit: {}".format(timeit.default_timer()))
cli.stop(container.get('Id'))
running = False
logger.debug("right after checking container state: {}".format(timeit.default_timer()))
logger.info("container stopped:{}".format(timeit.default_timer()))
stop = timeit.default_timer()
# get info from container execution, including exit code
try:
container_info = cli.inspect_container(container.get('Id'))
try:
container_state = container_info['State']
try:
exit_code = container_state['ExitCode']
except KeyError as e:
logger.error("Could not determine ExitCode for container {}. e: {}".format(container.get('Id'), e))
exit_code = 'undetermined'
except KeyError as e:
logger.error("Could not determine final state for container {}. e: {} ".format(container.get('Id')), e)
container_state = {'unavailable': True}
except docker.errors.APIError as e:
logger.error("Could not inspect container {}. e: {}".format(container.get('Id'), e))
logger.debug("right after getting container_info: {}".format(timeit.default_timer()))
# get logs from container
logs = cli.logs(container.get('Id'))
logger.debug("right after getting container logs: {}".format(timeit.default_timer()))
# get any additional results from the execution:
while True:
datagram = None
try:
datagram = server.recv(MAX_RESULT_FRAME_SIZE)
except socket.timeout:
break
except Exception as e:
logger.error("Got exception from server.recv: {}".format(e))
if datagram:
try:
results_ch.put(datagram)
except Exception as e:
logger.error("Error trying to put datagram on results channel. Exception: {}".format(e))
logger.debug("right after getting last execution results from datagram socket: {}".format(timeit.default_timer()))
if socket_host_path:
server.close()
os.remove(socket_host_path)
logger.debug("right after removing socket: {}".format(timeit.default_timer()))
# remove container, ignore errors
if not leave_container:
try:
cli.remove_container(container=container)
logger.info("Container removed.")
except Exception as e:
logger.error("Exception trying to remove actor: {}".format(e))
else:
logger.debug("leaving actor container since leave_container was True.")
logger.debug("right after removing actor container: {}".format(timeit.default_timer()))
if fifo_host_path:
os.close(fifo)
os.remove(fifo_host_path)
result['runtime'] = int(stop - start)
logger.debug("right after removing fifo; about to return: {}".format(timeit.default_timer()))
return result, logs, container_state, exit_code, start_time
| 16,377
|
def add_library(command):
"""
tests if the add library command is running properly
"""
from src.praxxis.library import list_library
namespace = app.main(command)
assert namespace.command == 'al' or namespace.command == "addlibrary"
assert namespace.path == "test"
| 16,378
|
def find_start_end(grid):
"""
Finds the source and destination block indexes from the list.
Args
grid: <list> the world grid blocks represented as a list of blocks (see Tutorial.pdf)
Returns
start: <int> source block index in the list
end: <int> destination block index in the list
"""
#------------------------------------
#
# Fill and submit this code
#
# return (None, None)
#-------------------------------------
counter = 0
eb_index = None
rb_index = None
air_block=[]
diamond_block=[]
state=[]
for i in grid:
if i =='diamond_block':
diamond_block.append(counter)
if i =='air':
air_block.append(counter)
if i == 'emerald_block':
eb_index = counter
if i == 'redstone_block':
rb_index = counter
state.append(counter)
counter+=1
return (eb_index, rb_index,air_block,diamond_block)
| 16,379
|
def find_method_signature(klass, method: str) -> Optional[inspect.Signature]:
"""Look through a class' ancestors and fill out the methods signature.
A class method has a signature. But it might now always be complete. When a parameter is not
annotated, we might want to look through the ancestors and determine the annotation. This is
very useful when you have a base class that has annotations, and child classes that are not.
Examples
--------
>>> class Parent:
...
... def foo(self, x: int) -> int:
... ...
>>> find_method_signature(Parent, 'foo')
<Signature (self, x: int) -> int>
>>> class Child(Parent):
...
... def foo(self, x, y: float) -> str:
... ...
>>> find_method_signature(Child, 'foo')
<Signature (self, x: int, y: float) -> str>
"""
m = getattr(klass, method)
sig = inspect.signature(m)
params = []
for param in sig.parameters.values():
if param.name == "self" or param.annotation is not param.empty:
params.append(param)
continue
for ancestor in inspect.getmro(klass):
try:
ancestor_meth = inspect.signature(getattr(ancestor, m.__name__))
except AttributeError:
break
try:
ancestor_param = ancestor_meth.parameters[param.name]
except KeyError:
break
if ancestor_param.annotation is not param.empty:
param = param.replace(annotation=ancestor_param.annotation)
break
params.append(param)
return_annotation = sig.return_annotation
if return_annotation is inspect._empty:
for ancestor in inspect.getmro(klass):
try:
ancestor_meth = inspect.signature(getattr(ancestor, m.__name__))
except AttributeError:
break
if ancestor_meth.return_annotation is not inspect._empty:
return_annotation = ancestor_meth.return_annotation
break
return sig.replace(parameters=params, return_annotation=return_annotation)
| 16,380
|
def format_long_calc_line(line: LongCalcLine) -> LongCalcLine:
"""
Return line with .latex attribute formatted with line breaks suitable
for positioning within the "\aligned" latex environment.
"""
latex_code = line.latex
long_latex = latex_code.replace("=", "\\\\&=") # Change all...
long_latex = long_latex.replace("\\\\&=", "&=", 1) # ...except the first one
line_break = "\\\\\n"
comment_space = ""
comment = ""
if line.comment:
comment_space = "\\;"
comment = format_strings(line.comment, comment=True)
line.latex = f"{long_latex}{comment_space}{comment}{line_break}"
return line
| 16,381
|
def run(data_fn, prop_missing=0., max_num_feature=-1,
feature_selection='random', k=10, data_dir='_data', out_dir='_out'):
"""Run RIDDLE classification interpretation pipeline.
Arguments:
data_fn: string
data file filename
prop_missing: float
proportion of feature observations which should be randomly masked;
values in [0, 1)
max_num_feature: int
maximum number of features to use
feature_selection: string
feature selection method; values = {'random', 'frequency', 'chi2'}
k: int
number of partitions for k-fold cross-validation
interpret_model: bool
whether to interpret the trained model for first k-fold partition
which_half: str
which half of experiments to do; values = {'first', 'last', 'both'}
data_dir: string
directory where data files are located
cache_dir: string
directory where cached files (e.g., saved parameters) are located
out_dir: string
outer directory where outputs (e.g., results) should be saved
"""
from keras.models import load_model
from riddle import emr, feature_importance
from riddle.models import MLP
start = time.time()
base_out_dir = get_base_out_dir(out_dir, 'riddle', data_fn, prop_missing,
max_num_feature, feature_selection)
recursive_mkdir(base_out_dir)
# get common data
x_unvec, y, idx_feat_dict, idx_class_dict, icd9_descript_dict, perm_indices = (
get_preprocessed_data(data_dir, data_fn, prop_missing=prop_missing))
num_feature = len(idx_feat_dict)
num_class = len(idx_class_dict)
list_sums_D, list_sums_D2, list_sums_contribs = [], [], []
for k_idx in range(k):
full_out_dir = '{}/k_idx={}'.format(base_out_dir, k_idx)
print('\nPartition k = {}'.format(k_idx))
x_train_unvec, y_train, _, _, x_test_unvec, y_test = emr.get_k_fold_partition(
x_unvec, y, k_idx=k_idx, k=k, perm_indices=perm_indices)
if max_num_feature > 0: # select features and re-encode
feat_encoding_dict, idx_feat_dict = select_features(
x_train_unvec, y_train, idx_feat_dict,
method=feature_selection, num_feature=num_feature,
max_num_feature=max_num_feature)
x_test_unvec = subset_reencode_features(
x_test_unvec, feat_encoding_dict)
num_feature = max_num_feature
# interpret
start = time.time()
temp_mlp = MLP(num_feature=num_feature, num_class=num_class)
hdf5_path = full_out_dir + '/model.h5'
sums_D, sums_D2, sums_contribs, pairs = \
feature_importance.get_diff_sums(
hdf5_path,
x_test_unvec,
process_x_func=temp_mlp.process_x,
num_feature=num_feature,
num_class=num_class)
with open(full_out_dir + '/sums_D.pkl', 'wb') as f:
pickle.dump(sums_D, f)
with open(full_out_dir + '/sums_D2.pkl', 'wb') as f:
pickle.dump(sums_D2, f)
with open(full_out_dir + '/sums_contribs.pkl', 'wb') as f:
pickle.dump(sums_contribs, f)
list_sums_D.append(sums_D)
list_sums_D2.append(sums_D2)
list_sums_contribs.append(sums_contribs)
def compute_total_sums(list_sums):
total_sums = list_sums[0]
for i in range(1, len(list_sums)):
for j in range(len(total_sums)):
total_sums[j] = np.add(total_sums[j], list_sums[i][j])
return total_sums
total_sums_D = compute_total_sums(list_sums_D)
total_sums_D2 = compute_total_sums(list_sums_D2)
total_sums_contribs = compute_total_sums(list_sums_contribs)
num_sample = len(x_unvec)
run_interpretation_summary(
x_unvec, y, total_sums_D, total_sums_D2, total_sums_contribs,
idx_feat_dict=idx_feat_dict, idx_class_dict=idx_class_dict,
icd9_descript_dict=icd9_descript_dict, pairs=pairs,
num_sample=num_sample, full_out_dir=base_out_dir)
print('Computed DeepLIFT scores and analysis in {:.4f} seconds'
.format(time.time() - start))
print('-' * 72)
print()
| 16,382
|
def h_lgn(t, mu, sigma, normalize=False):
""" Log-normal density
Args:
t: input argument (array)
mu: mean parameter (-infty,infty)
sigma: std parameter > 0
normalize: trapz integral normalization over t
Returns:
function values
"""
y = np.zeros(len(t))
y[t>0] = 1/(t[t>0]*sigma*np.sqrt(2*np.pi)) * np.exp(-(np.log(t[t>0]) - mu)**2 / (2*sigma**2))
y[np.isinf(y) | np.isnan(y)] = 0 # Protect underflows
if normalize:
y /= np.abs(trapz(x=t, y=y)) # abs for numerical protection
return y
| 16,383
|
def align_background(data, align='auto'):
"""
Determine the Qz value associated with the background measurement.
The *align* flag determines which background points are matched
to the sample points. It can be 'sample' if background is
measured using an offset from the sample angle, or 'detector'
if it is offset from detector angle. If *align* is 'auto', then
use 'Qz_target' to align the background scan.
For 'auto' alignment without Qz_target set, we can only distinguish
relative and constant offsets, and cannot determine which of sample
and detector is offset from the specular condition, so we must rely
on convention. If the offset is constant for each angle, then it is
assumed to be a sample offset. If the the offset is proportional to
the angle (and therefore offset divided by angle is constant), then
it is assumed to be a detector offset. If neither condition is met,
it is assumed to be a sample offset.
The 'auto' test is robust: 90% of the points should be within 5% of the
median value of the vector for the offset to be considered a constant.
**Inputs**
data (refldata) : background data with unknown $q$
align (opt:auto|sample|detector) : angle which determines $q_z$
**Returns**
output (refldata) : background with known $q$
2015-12-17 Paul Kienzle
2020-10-16 Paul Kienzle rename 'offset' to 'align'
"""
from .background import set_background_alignment
data = copy(data)
set_background_alignment(data, align)
return data
| 16,384
|
def another_function_requiring_decoration():
"""Hey you! Decorate me!"""
print(
"I am the function which needs some decoration to remove my foul smell"
)
| 16,385
|
def get_bounds_from_config(b, state, base_units):
"""
Method to take a 3- or 4-tuple state definition config argument and return
tuples for the bounds and default value of the Var object.
Expects the form (lower, default, upper, units) where units is optional
Args:
b - StateBlock on which the state vars are to be constructed
state - name of state var as a string (to be matched with config dict)
base_units - base units of state var to be used if conversion required
Returns:
bounds - 2-tuple of state var bounds in base units
default_val - default value of state var in base units
"""
try:
var_config = b.params.config.state_bounds[state]
except (KeyError, TypeError):
# State definition missing
return (None, None), None
if len(var_config) == 4:
# Units provided, need to convert values
bounds = (pyunits.convert_value(var_config[0],
from_units=var_config[3],
to_units=base_units),
pyunits.convert_value(var_config[2],
from_units=var_config[3],
to_units=base_units))
default_val = pyunits.convert_value(var_config[1],
from_units=var_config[3],
to_units=base_units)
else:
bounds = (var_config[0], var_config[2])
default_val = var_config[1]
return bounds, default_val
| 16,386
|
def notify_user_activation(user, request=None):
"""Send mail for user activation"""
security = query_utility(ISecurityManager)
settings = INotificationSettings(security)
if not settings.enable_notifications: # pylint: disable=assignment-from-no-return
LOGGER.info("Security notifications disabled, no message sent...")
return
mailer = settings.get_mailer() # pylint: disable=assignment-from-no-return
if mailer is None:
LOGGER.warning("Can't find mailer utility, no notification message sent!")
return
if request is None:
request = check_request()
translate = request.localizer.translate
i18n_settings = II18n(settings)
message_text, template_name = None, None
if user.self_registered:
# pylint: disable=assignment-from-no-return
message_text = i18n_settings.query_attribute('registration_template', request=request)
if not message_text:
template_name = 'templates/register-message.pt'
elif user.wait_confirmation:
# pylint: disable=assignment-from-no-return
message_text = i18n_settings.query_attribute('confirmation_template', request=request)
if not message_text:
template_name = 'templates/register-info.pt'
site = get_parent(request.context, ISite)
if message_text is not None:
message_text = message_text.format(**user.to_dict())
elif template_name is not None:
message_text = render(template_name, request=request, value={
'user': user,
'site': site,
'settings': settings
})
html_body = render('templates/register-body.pt', request=request, value={
'user': user,
'site': site,
'settings': settings,
'message': message_text
})
message = Message(
subject=translate(_("{prefix}Please confirm registration")).format(
prefix="{prefix} ".format(prefix=settings.subject_prefix)
if settings.subject_prefix else ''),
sender='{name} <{email}>'.format(name=settings.sender_name,
email=settings.sender_email),
recipients=("{firstname} {lastname} <{email}>".format(firstname=user.firstname,
lastname=user.lastname,
email=user.email),),
html=Attachment(data=html_body,
content_type='text/html; charset=utf-8',
disposition='inline',
transfer_encoding='quoted-printable'),
body=Attachment(data=html_to_text(html_body),
content_type='text/plain; charset=utf-8',
disposition='inline',
transfer_encoding='quoted-printable'))
mailer.send(message)
| 16,387
|
def read_train_data():
"""
train_data.shape = (73257, 32, 32, 3)
train_label.shape = (73257,)
extra_data.shape = (531131, 32, 32, 3)
extra_label.shape = (531131,)
data.shape = (604388, 32, 32, 3)
labels.shape = (604388,)
"""
train_data, train_label = read_images(full_data_dir+'train_32x32.mat')
extra_data, extra_label = read_images(full_data_dir+'extra_32x32.mat')
data = np.concatenate( (train_data, extra_data) )
label = np.concatenate( (train_label, extra_label) )
return data, label
| 16,388
|
def laguerreFunction(n, alpha, t, normalized=True):
"""Evaluate Laguerre function using scipy.special"""
if normalized:
Z = np.exp( .5*sps.gammaln(n+1) - .5*sps.gammaln(n+alpha+1) )
else:
Z = 1
return Z * np.sqrt(mu(alpha,t)) * sps.eval_genlaguerre(n, alpha, t)
| 16,389
|
def dispatch_scan(asset_id, user_id, policy):
"""Main method for periodic scan task dispatching."""
asset = AssetInstance.objects.get(id=asset_id)
str_asset_id = str(asset.id)
str_user_id = str(user_id)
scheduled = PeriodicTask.objects.filter(name__contains='ps-'+str(asset.id))
if policy.repeat and not scheduled:
# Period cron definitions
if policy.repeat_freq == 'DAILY':
print('Adding DAILY periodic scan for '+str(asset_id))
schedule, _ = CrontabSchedule.objects.get_or_create(
minute='0',
hour='8',
day_of_week='*',
day_of_month='*',
month_of_year='*',
)
elif policy.repeat_freq == 'WEEKLY':
print('Adding WEEKLY periodic scan for ' + str(asset_id))
schedule, _ = CrontabSchedule.objects.get_or_create(
minute='0',
hour='8',
day_of_week='mon',
day_of_month='*',
month_of_year='*',
)
else:
print('Adding MONTHLY periodic scan for ' + str(asset_id))
schedule, _ = CrontabSchedule.objects.get_or_create(
minute='0',
hour='8',
day_of_week='mon',
day_of_month='1-7',
month_of_year='*',
)
PeriodicTask.objects.create(
crontab=schedule,
name='ps-'+str(asset.id)+'-'+str(policy.repeat_freq),
task='pulsar.tasks.scan_wrapper',
args=json.dumps([str_asset_id, str_user_id]),
)
| 16,390
|
def end(s):
"""Select the mobile or weight hanging at the end of a side."""
assert is_side(s), "must call end on a side"
return branches(s)[0]
| 16,391
|
def main():
"""
The main function (runs the GUI and the Crawler).
"""
enable_js_preference() # fix javascript to be enabled on chrome browser
g = GuiMenu() # build GuiMenu object
fb_urls = g.fb_urls # facebook urls to crawl
fb_mobile_urls = g.fb_mobile_urls # facebook urls to crawl in mobile version (when js disabled)
ids_to_check_on_tineye = []
if g.check_on_tineye:
ids_to_check_on_tineye = g.profiles_lst # profiles to upload & search their photos on TinEye
if fb_urls or fb_mobile_urls or ids_to_check_on_tineye:
print(fb_urls)
print(fb_mobile_urls)
print(ids_to_check_on_tineye)
# build 'FacebookCrawler' object
scraper = FacebookCrawler(fb_urls, fb_mobile_urls, ids_to_check_on_tineye)
scraper.crawl_all_targets()
if g.build_profiles and g.profiles_lst: # build profile is True
sleep(3)
build_profiles(g.profiles_lst)
| 16,392
|
def bam_to_fasta(source_bam, dest_fasta, sam_flag=4,
debug=False, intermediate_sam=True):
"""
Convert a .bam file to a .fasta using samtools and the specified
samtools flag.
:param source_bam: file path to .bam file to extract reads from
:param dest_fasta: file path to save resulting .fasta file to
:param sam_flag: samtools flag to use.
:param debug: run in debug mode? Passed to all envoy shell commands.
:param intermediate_sam: Write an intermediate .sam file (that gets
deleted) on the way to the .fasta file? Currently only True is
supported; envoy truncated my piped commands at 8.0kb in tests.
:return:
"""
# dictionary that converts description of a samtools flag into a
# numerical value used in the samtools call.
samtools_flag_converter = {'unmapped': 4, 'multiple': 256}
# if the samtools flag passed was a string, convert it to a bit flag.
if type(sam_flag) == str:
try:
sam_flag = samtools_flag_converter[sam_flag]
except LookupError:
print("error: sam string {} could not be converted to " \
"a samtools command".format(sam_flag))
if intermediate_sam:
print("run bam_to_fasta() by making an intermediate .sam file")
else:
print("run bam_to_fasta() without making an intermediate .sam file")
# make sure the .bam file exists
assert(os.path.exists(source_bam))
if intermediate_sam:
# run just the first command and save to an intermediate file
# write the temp file into the .fasta dir.
intermediate_sam_path = 'dest_fasta'.rstrip('.fasta') + '.sam'
command_1 = \
"/work/software/samtools/bin/samtools view -f {} {}".format(
sam_flag, source_bam)
print("run shell command that makes intermediate .sam file:")
print(command_1)
print("save to: {}".format(intermediate_sam_path))
shell(command_1, intermediate_sam_path, debug=debug)
# confirm the .sam file was made.
print('file {} exists: {}'.format(
intermediate_sam_path, check_file_exists(intermediate_sam_path)))
# can use triple quotes to have mixed ' and " in python.
# source:
# http://stackoverflow.com/questions/15280050/calling-awk-from-python
command_2 = """ awk '{OFS="\\t"; print ">"$1"\\n"$10}' """
if intermediate_sam:
# run samtools on the intermediate .sam file
command = command_2 + intermediate_sam_path
print('run sam to fasta command:')
print(command)
print('save to: {}'.format(dest_fasta))
shell(command, dest_fasta, debug=debug)
# remove the intermediate .sam file
print('remove the intermediate .sam file: {}'.format(
intermediate_sam_path))
command = 'rm {}'.format(intermediate_sam_path)
print('rm command: \n {}')
shell(command, debug=debug)
else:
# SKIPPING INTERMEDIATE .sam FILE DOESN'T WORK! Piping w/ envoy
# limits the file to 8kb in my experience.
# This loop runs if you don't make an intermediate file.
print("intermediate_file set to {}: don't write .sam "
"on way to .fasta".format(intermediate_sam))
command_1 = \
"/work/software/samtools/bin/samtools view -f {} {}".format(
sam_flag, source_bam)
command_string = command_1 + " | " + command_2
print("run this shell command that takes .bam to .fasta: ")
print(command_string)
print("save standard out to: {}".format(dest_fasta))
# run the command.
shell(command_string, outfile=dest_fasta, debug=debug)
print("!!!!!!!! WARNING !!!!!!!!!! Files are truncated to 8.0MB "
"if you don't write an intermediate .sam file")
# WORKS:
# command_string = """ /work/software/samtools/bin/samtools view -f 4
# /gscratch/lidstrom/meta4_bins/workspace/LakWasM112_LOW13_2/bwa/
# LakWasM112_LOW13_2.sorted.bam | awk '{OFS="\t"; print ">"$1"\\n"$10}' """
# # don't run the >. Use envoy, wrapped in shell() to do this.
# # - > ./fasta_files/112_LOW13_unmapped.fasta """
pass
| 16,393
|
def get_ca_pos_from_atoms(df, atoms):
"""Look up alpha carbon positions of provided atoms."""
ca = df[df['atom_name'] == 'CA'].reset_index()
nb = ca.reindex(atoms)
nb = nb.reset_index().set_index('index')
return nb
| 16,394
|
def split_inline_box(context, box, position_x, max_x, skip_stack, containing_block, containing_page, absolute_boxes,
fixed_boxes, line_placeholders, waiting_floats, line_children):
"""Same behavior as split_inline_level."""
# In some cases (shrink-to-fit result being the preferred width)
# max_x is coming from Pango itself,
# but floating point errors have accumulated:
# width2 = (width + X) - X # in some cases, width2 < width
# Increase the value a bit to compensate and not introduce
# an unexpected line break. The 1e-9 value comes from PEP 485.
max_x *= 1 + 1e-9
is_start = skip_stack is None
initial_position_x = position_x
initial_skip_stack = skip_stack
assert isinstance(box, (boxes.LineBox, boxes.InlineBox))
left_spacing = (box.padding_left + box.margin_left +
box.border_left_width)
right_spacing = (box.padding_right + box.margin_right +
box.border_right_width)
content_box_left = position_x
children = []
waiting_children = []
preserved_line_break = False
first_letter = last_letter = None
float_widths = {'left': 0, 'right': 0}
float_resume_at = 0
if box.style['position'] == 'relative':
absolute_boxes = []
if is_start:
skip = 0
else:
skip, skip_stack = skip_stack
for i, child in enumerate(box.children[skip:]):
index = i + skip
child.position_y = box.position_y
if child.is_absolutely_positioned():
child.position_x = position_x
placeholder = AbsolutePlaceholder(child)
line_placeholders.append(placeholder)
waiting_children.append((index, placeholder))
if child.style['position'] == 'absolute':
absolute_boxes.append(placeholder)
else:
fixed_boxes.append(placeholder)
continue
elif child.is_floated():
child.position_x = position_x
float_width = shrink_to_fit(context, child, containing_block.width)
# To retrieve the real available space for floats, we must remove
# the trailing whitespaces from the line
non_floating_children = [
child_ for _, child_ in (children + waiting_children)
if not child_.is_floated()]
if non_floating_children:
float_width -= trailing_whitespace_size(
context, non_floating_children[-1])
if float_width > max_x - position_x or waiting_floats:
# TODO: the absolute and fixed boxes in the floats must be
# added here, and not in iter_line_boxes
waiting_floats.append(child)
else:
child = float_layout(context, child, containing_block, containing_page, absolute_boxes, fixed_boxes)
waiting_children.append((index, child))
# Translate previous line children
dx = max(child.margin_width(), 0)
float_widths[child.style['float']] += dx
if child.style['float'] == 'left':
if isinstance(box, boxes.LineBox):
# The parent is the line, update the current position
# for the next child. When the parent is not the line
# (it is an inline block), the current position of the
# line is updated by the box itself (see next
# split_inline_level call).
position_x += dx
elif child.style['float'] == 'right':
# Update the maximum x position for the next children
max_x -= dx
for _, old_child in line_children:
if not old_child.is_in_normal_flow():
continue
if ((child.style['float'] == 'left' and
box.style['direction'] == 'ltr') or
(child.style['float'] == 'right' and
box.style['direction'] == 'rtl')):
old_child.translate(dx=dx)
float_resume_at = index + 1
continue
elif child.is_running():
running_name = child.style['position'][1]
page = context.current_page
context.running_elements[running_name][page].append(child)
continue
last_child = (index == len(box.children) - 1)
available_width = max_x
child_waiting_floats = []
new_child, resume_at, preserved, first, last, new_float_widths = (
split_inline_level(context, child, position_x, available_width, skip_stack, containing_block,
containing_page,
absolute_boxes, fixed_boxes, line_placeholders, child_waiting_floats, line_children))
if last_child and right_spacing and resume_at is None:
# TODO: we should take care of children added into absolute_boxes,
# fixed_boxes and other lists.
if box.style['direction'] == 'rtl':
available_width -= left_spacing
else:
available_width -= right_spacing
new_child, resume_at, preserved, first, last, new_float_widths = (
split_inline_level(context, child, position_x, available_width, skip_stack, containing_block,
containing_page,
absolute_boxes, fixed_boxes, line_placeholders, child_waiting_floats, line_children))
if box.style['direction'] == 'rtl':
max_x -= new_float_widths['left']
else:
max_x -= new_float_widths['right']
skip_stack = None
if preserved:
preserved_line_break = True
can_break = None
if last_letter is True:
last_letter = ' '
elif last_letter is False:
last_letter = 'ย ' # no-break space
elif box.style['white_space'] in ('pre', 'nowrap'):
can_break = False
if can_break is None:
if None in (last_letter, first):
can_break = False
else:
can_break = can_break_text(
last_letter + first, child.style['lang'])
if can_break:
children.extend(waiting_children)
waiting_children = []
if first_letter is None:
first_letter = first
if child.trailing_collapsible_space:
last_letter = True
else:
last_letter = last
if new_child is None:
# May be None where we have an empty TextBox.
assert isinstance(child, boxes.TextBox)
else:
if isinstance(box, boxes.LineBox):
line_children.append((index, new_child))
# TODO: we should try to find a better condition here.
trailing_whitespace = (
isinstance(new_child, boxes.TextBox) and
not new_child.text.strip())
margin_width = new_child.margin_width()
new_position_x = new_child.position_x + margin_width
if new_position_x > max_x and not trailing_whitespace:
if waiting_children:
# Too wide, let's try to cut inside waiting children,
# starting from the end.
# TODO: we should take care of children added into
# absolute_boxes, fixed_boxes and other lists.
waiting_children_copy = waiting_children[:]
break_found = False
while waiting_children_copy:
child_index, child = waiting_children_copy.pop()
# TODO: should we also accept relative children?
if (child.is_in_normal_flow() and
can_break_inside(child)):
# We break the waiting child at its last possible
# breaking point.
# TODO: The dirty solution chosen here is to
# decrease the actual size by 1 and render the
# waiting child again with this constraint. We may
# find a better way.
max_x = child.position_x + child.margin_width() - 1
child_new_child, child_resume_at, _, _, _, _ = (
split_inline_level(context, child, child.position_x, max_x, None, box, containing_page,
absolute_boxes, fixed_boxes, line_placeholders, waiting_floats,
line_children))
# As PangoLayout and PangoLogAttr don't always
# agree, we have to rely on the actual split to
# know whether the child was broken.
# https://github.com/Kozea/WeasyPrint/issues/614
break_found = child_resume_at is not None
if child_resume_at is None:
# PangoLayout decided not to break the child
child_resume_at = (0, None)
# TODO: use this when Pango is always 1.40.13+:
# break_found = True
children = children + waiting_children_copy
if child_new_child is None:
# May be None where we have an empty TextBox.
assert isinstance(child, boxes.TextBox)
else:
children += [(child_index, child_new_child)]
# As this child has already been broken
# following the original skip stack, we have to
# add the original skip stack to the partial
# skip stack we get after the new rendering.
# Combining skip stacks is a bit complicated
# We have to:
# - set `child_index` as the first number
# - append the new stack if it's an absolute one
# - otherwise append the combined stacks
# (resume_at + initial_skip_stack)
# extract the initial index
if initial_skip_stack is None:
current_skip_stack = None
initial_index = 0
else:
initial_index, current_skip_stack = (
initial_skip_stack)
# child_resume_at is an absolute skip stack
if child_index > initial_index:
resume_at = (child_index, child_resume_at)
break
# combine the stacks
current_resume_at = child_resume_at
stack = []
while current_skip_stack and current_resume_at:
skip, current_skip_stack = (
current_skip_stack)
resume, current_resume_at = (
current_resume_at)
stack.append(skip + resume)
if resume != 0:
break
resume_at = current_resume_at
while stack:
resume_at = (stack.pop(), resume_at)
# insert the child index
resume_at = (child_index, resume_at)
break
if break_found:
break
if children:
# Too wide, can't break waiting children and the inline is
# non-empty: put child entirely on the next line.
resume_at = (children[-1][0] + 1, None)
child_waiting_floats = []
break
position_x = new_position_x
waiting_children.append((index, new_child))
waiting_floats.extend(child_waiting_floats)
if resume_at is not None:
children.extend(waiting_children)
resume_at = (index, resume_at)
break
else:
children.extend(waiting_children)
resume_at = None
is_end = resume_at is None
new_box = box.copy_with_children(
[box_child for index, box_child in children])
new_box.remove_decoration(start=not is_start, end=not is_end)
if isinstance(box, boxes.LineBox):
# We must reset line box width according to its new children
in_flow_children = [
box_child for box_child in new_box.children
if box_child.is_in_normal_flow()]
if in_flow_children:
new_box.width = (
in_flow_children[-1].position_x +
in_flow_children[-1].margin_width() -
new_box.position_x)
else:
new_box.width = 0
else:
new_box.position_x = initial_position_x
if box.style['box_decoration_break'] == 'clone':
translation_needed = True
else:
translation_needed = (
is_start if box.style['direction'] == 'ltr' else is_end)
if translation_needed:
for child in new_box.children:
child.translate(dx=left_spacing)
new_box.width = position_x - content_box_left
new_box.translate(dx=float_widths['left'], ignore_floats=True)
line_height, new_box.baseline = strut_layout(box.style, context)
new_box.height = box.style['font_size']
half_leading = (line_height - new_box.height) / 2.
# Set margins to the half leading but also compensate for borders and
# paddings. We want margin_height() == line_height
new_box.margin_top = (half_leading - new_box.border_top_width -
new_box.padding_top)
new_box.margin_bottom = (half_leading - new_box.border_bottom_width -
new_box.padding_bottom)
if new_box.style['position'] == 'relative':
for absolute_box in absolute_boxes:
absolute_layout(context, absolute_box, new_box, containing_page, fixed_boxes)
if resume_at is not None:
if resume_at[0] < float_resume_at:
resume_at = (float_resume_at, None)
return (
new_box, resume_at, preserved_line_break, first_letter, last_letter,
float_widths)
| 16,395
|
def LoadComponent(self,filename): # real signature unknown; restored from __doc__
"""
LoadComponent(self: object,filename: str) -> object
LoadComponent(self: object,stream: Stream) -> object
LoadComponent(self: object,xmlReader: XmlReader) -> object
LoadComponent(self: object,filename: TextReader) -> object
LoadComponent(self: object,reader: XamlXmlReader) -> object
"""
return object()
| 16,396
|
def split_bits(word : int, amounts : list):
"""
takes in a word and a list of bit amounts and returns
the bits in the word split up. See the doctests for concrete examples
>>> [bin(x) for x in split_bits(0b1001111010000001, [16])]
['0b1001111010000001']
>>> [bin(x) for x in split_bits(0b1001111010000001, [8,8])]
['0b10011110', '0b10000001']
not the whole 16 bits!
>>> [bin(x) for x in split_bits(0b1001111010000001, [8])]
Traceback (most recent call last):
AssertionError: expected to split exactly one word
This is a test splitting MOVE.B (A1),D4
>>> [bin(x) for x in split_bits(0b0001001010000100, [2,2,3,3,3,3])]
['0b0', '0b1', '0b1', '0b10', '0b0', '0b100']
"""
nums = []
pos = 0
for amount in amounts:
# get a group of "amount" 1's
mask = 2**amount - 1
# shift mask to the left so it aligns where the last
# iteration ended off
shift = 16 - amount - pos
mask = mask << shift
# update location in the word
pos += amount
# extract the relavent bits
bits = word & mask
# shift back and insert the list to be returned
nums.append(bits >> shift)
assert pos == 16, 'expected to split exactly one word'
return nums
| 16,397
|
def rasterio_to_gdir(gdir, input_file, output_file_name,
resampling='cubic'):
"""Reprojects a file that rasterio can read into the glacier directory.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory
input_file : str
path to the file to reproject
output_file_name : str
name of the output file (must be in cfg.BASENAMES)
resampling : str
nearest', 'bilinear', 'cubic', 'cubic_spline', or one of
https://rasterio.readthedocs.io/en/latest/topics/resampling.html
"""
output_file = gdir.get_filepath(output_file_name)
assert '.tif' in output_file, 'output_file should end with .tif'
if not gdir.has_file('dem'):
raise InvalidWorkflowError('Need a dem.tif file to reproject to')
with rasterio.open(input_file) as src:
kwargs = src.meta.copy()
data = src.read(1)
with rasterio.open(gdir.get_filepath('dem')) as tpl:
kwargs.update({
'crs': tpl.crs,
'transform': tpl.transform,
'width': tpl.width,
'height': tpl.height
})
with rasterio.open(output_file, 'w', **kwargs) as dst:
for i in range(1, src.count + 1):
dest = np.zeros(shape=(tpl.height, tpl.width),
dtype=data.dtype)
reproject(
source=rasterio.band(src, i),
destination=dest,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=tpl.transform,
dst_crs=tpl.crs,
resampling=getattr(Resampling, resampling)
)
dst.write(dest, indexes=i)
| 16,398
|
def global_tracer(ot_tracer):
"""A function similar to one OpenTracing users would write to initialize
their OpenTracing tracer.
"""
set_global_tracer(ot_tracer)
return ot_tracer
| 16,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.