content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def render_content(template, context={}, request=None):
"""Renderiza el contenido para un email a partir de la plantilla y el contexto.
Deben existir las versiones ".html" y ".txt" de la plantilla.
Adicionalmente, si se recibe el request, se utilizará para el renderizado.
"""
if request:
context_class = RequestContext(request, context)
else:
context_class = Context(context)
template = Template(template)
return {
"text_content": template.render(context_class),
"html_content": template.render(context_class)
}
| 22,300
|
def calculate_psi(expected, actual, buckettype="bins", breakpoints=None, buckets=10, axis=0):
"""Calculate the PSI (population stability index) across all variables
Args:
expected: numpy matrix of original values
actual: numpy matrix of new values
buckettype: type of strategy for creating buckets, bins splits into even splits,
quantiles splits into quantile buckets, customize split into customized buckets
breakpoints: if buckettype is customizer, pass a numpy array as breakpoints
buckets: number of quantiles to use in bucketing variables
axis: axis by which variables are defined, 0 for vertical, 1 for horizontal
Returns:
psi_values: ndarray of psi values for each variable
"""
def psi(expected_array, actual_array, buckets, breaks=None):
"""Calculate the PSI for a single variable
Args:
expected_array: numpy array of original values
actual_array: numpy array of new values
buckets: number of percentile ranges to bucket the values into
breaks: default None, customize breakpoints
Returns:
psi_value: calculated PSI value
"""
breakpoints = np.arange(0, buckets + 1) / (buckets) * 100
if buckettype == 'bins':
breakpoints = scale_range(breakpoints, np.min(expected_array), np.max(expected_array))
elif buckettype == 'quantiles':
breakpoints = np.stack([np.percentile(expected_array, b) for b in breakpoints])
elif buckettype == 'customize':
assert breaks is not None, "buckettype is customize, breakpoints should not be None"
breakpoints = breaks
expected_percents = np.histogram(expected_array, breakpoints)[0] / len(expected_array)
actual_percents = np.histogram(actual_array, breakpoints)[0] / len(actual_array)
psi_value = sum(sub_psi(expected_percents[i], actual_percents[i]) for i in range(0, len(expected_percents)))
return psi_value
if len(expected.shape) == 1:
psi_values = np.empty(len(expected.shape))
else:
psi_values = np.empty(expected.shape[axis])
for i in range(0, len(psi_values)):
if len(psi_values) == 1:
psi_values = psi(expected, actual, buckets, breakpoints)
elif axis == 0:
psi_values[i] = psi(expected[:,i], actual[:,i], buckets, breakpoints)
elif axis == 1:
psi_values[i] = psi(expected[i,:], actual[i,:], buckets, breakpoints)
return psi_values
| 22,301
|
def test_blur_effect_h_size():
"""Test that the blur metric decreases with increasing size of the
re-blurring filter.
"""
image = cp.array(astronaut())
B0 = blur_effect(image, h_size=3, channel_axis=-1)
B1 = blur_effect(image, channel_axis=-1) # default h_size is 11
B2 = blur_effect(image, h_size=30, channel_axis=-1)
assert 0 <= B0 < 1
assert B0 > B1 > B2
| 22,302
|
async def test_report_state_instance(hass, aioclient_mock):
"""Test proactive state reports with instance."""
aioclient_mock.post(TEST_URL, text="", status=202)
hass.states.async_set(
"fan.test_fan",
"off",
{
"friendly_name": "Test fan",
"supported_features": 15,
"oscillating": False,
"preset_mode": None,
"preset_modes": ["auto", "smart"],
"percentage": None,
},
)
await state_report.async_enable_proactive_mode(hass, DEFAULT_CONFIG)
hass.states.async_set(
"fan.test_fan",
"on",
{
"friendly_name": "Test fan",
"supported_features": 15,
"oscillating": True,
"preset_mode": "smart",
"preset_modes": ["auto", "smart"],
"percentage": 90,
},
)
# To trigger event listener
await hass.async_block_till_done()
assert len(aioclient_mock.mock_calls) == 1
call = aioclient_mock.mock_calls
call_json = call[0][2]
assert call_json["event"]["header"]["namespace"] == "Alexa"
assert call_json["event"]["header"]["name"] == "ChangeReport"
change_reports = call_json["event"]["payload"]["change"]["properties"]
checks = 0
for report in change_reports:
if report["name"] == "toggleState":
assert report["value"] == "ON"
assert report["instance"] == "fan.oscillating"
assert report["namespace"] == "Alexa.ToggleController"
checks += 1
if report["name"] == "mode":
assert report["value"] == "preset_mode.smart"
assert report["instance"] == "fan.preset_mode"
assert report["namespace"] == "Alexa.ModeController"
checks += 1
if report["name"] == "percentage":
assert report["value"] == 90
assert report["namespace"] == "Alexa.PercentageController"
checks += 1
if report["name"] == "powerLevel":
assert report["value"] == 90
assert report["namespace"] == "Alexa.PowerLevelController"
checks += 1
assert checks == 4
assert call_json["event"]["endpoint"]["endpointId"] == "fan#test_fan"
| 22,303
|
def accsum(reports):
"""
Runs accsum, returning a ClassReport (the final section in the report).
"""
report_bytes = subprocess.check_output(
[ACCSUM_BIN] + reports,
stderr=subprocess.STDOUT
)
contents = report_bytes.decode('UTF-8')
return ClassReport.from_accuracy_report(contents)
| 22,304
|
def test_target_status_processing(
vws_client: VWS,
high_quality_image: io.BytesIO,
mock_database: VuforiaDatabase,
) -> None:
"""
An error is given when trying to delete a target which is processing.
"""
runner = CliRunner(mix_stderr=False)
target_id = vws_client.add_target(
name='x',
width=1,
image=high_quality_image,
active_flag=True,
application_metadata=None,
)
commands = [
'delete-target',
'--target-id',
target_id,
'--server-access-key',
mock_database.server_access_key,
'--server-secret-key',
mock_database.server_secret_key,
]
result = runner.invoke(vws_group, commands, catch_exceptions=False)
assert result.exit_code == 1
expected_stderr = (
f'Error: The target "{target_id}" cannot be deleted as it is in the '
'processing state.\n'
)
assert result.stderr == expected_stderr
assert result.stdout == ''
| 22,305
|
def get_sid(token):
"""
Obtain the sid from a given token, returns None if failed connection or other error preventing success
Do not use manually
"""
r = requests.get(url=str(URL + "app"), headers={'Accept': 'text/plain',
'authorization': token,
'Content-Type': 'application/json;charset=utf-8'})
cookie = r.headers.get('set-cookie')
# If successful, then the cookie was set
if cookie:
return cookie.split("connect.sid=", 1)[1].strip("; Path=/; HttpOnly")
return None
| 22,306
|
def generer_lien(mots, commande="http://www.lextutor.ca/cgi-bin/conc/wwwassocwords.pl?lingo=French&KeyWordFormat=&Maximum=10003&LineWidth=100&Gaps=no_gaps&store_dic=&is_refire=true&Fam_or_Word=&Source=http%3A%2F%2Fwww.lextutor.ca%2Fconc%2Ffr%2F&unframed=true&SearchType=equals&SearchStr={0}&Corpus=Fr_le_monde.txt&ColloSize=&SortType=right&AssocWord=&Associate={1}",contextes=["right","left"]):
"""
retourne une liste de liens.
{'ce':'liendroit'},'ce':'liengauche'}
"""
liens = {}
for mot in mots:
for contexte in contextes:
command = commande.format(quote_plus(mot,encoding="ISO 8859-1"),contexte)
liens.update({contexte:{mot:command}})
return liens
| 22,307
|
def ring_forming_scission_grid(zrxn, zma, npoints=(7,)):
""" Build forward WD grid for a ring forming scission reaction
# the following allows for a 2-d grid search in the initial ts_search
# for now try 1-d grid and see if it is effective
"""
# Obtain the scan coordinate
scan_name = ring_forming_scission_scan_coordinate(zrxn, zma)
# Build the grid
npoints1 = npoints[0]
brk_bnd_len = _ts_bnd_len(zma, scan_name)
if brk_bnd_len is not None:
r1min = brk_bnd_len + 0.1 * phycon.ANG2BOHR
r1max = brk_bnd_len + 0.7 * phycon.ANG2BOHR
else:
r1min = (1.54 + 0.1) * phycon.ANG2BOHR
r1max = (1.54 + 0.7) * phycon.ANG2BOHR
grid1 = numpy.linspace(r1min, r1max, npoints1)
grid = tuple(val.item() for val in grid1)
return grid
| 22,308
|
def parse_proj(lines):
""" parse a project file, looking for section definitions """
section_regex_start = re.compile(
'\s*([0-9A-F]+) /\* ([^*]+) \*/ = {$', re.I)
section_regex_end = re.compile('\s*};$')
children_regex = re.compile('\s*([0-9A-F]+) /\* ([^*]+) \*/,', re.I)
children_regex_start = re.compile('\s*children = \(')
children_regex_end = re.compile('\s*\);')
group_regex = re.compile('\s*sourceTree = ([^;]+);')
file_reference_regex = re.compile(
'\s*([0-9A-F]+) /\* ([^*]+) \*/ = .* ' +
'path = "?([^;"]+)"?; sourceTree = ([^;]+);',
re.I)
entries = {}
current_section = None
got_children = False
for line in lines:
if current_section:
end = section_regex_end.match(line)
if end:
current_section = None
continue
# look for the children marker, or append to children
if got_children:
if children_regex_end.match(line):
got_children = False
else:
child_match = children_regex.match(line)
if child_match:
id = child_match.groups()[0]
name = child_match.groups()[1]
current_section.add_link(Link(id, name))
elif children_regex_start.match(line):
got_children = True
else:
# no children, try to match a sourceTree = ...; line
group = group_regex.match(line)
if group:
current_section.location = group.groups()[0]
else:
# try for a new section
new_section_matches = section_regex_start.match(line)
if new_section_matches:
id = new_section_matches.groups()[0]
name = new_section_matches.groups()[1]
current_section = Section(id, name)
entries[id] = current_section
else:
# no new section, check for a plain FileReference
file_ref_captures = file_reference_regex.match(line)
if file_ref_captures:
id = file_ref_captures.groups()[0]
name = file_ref_captures.groups()[1]
path = file_ref_captures.groups()[2]
location = file_ref_captures.groups()[3]
entries[id] = FileReference(id, name, path, location)
return entries
| 22,309
|
def doctest_MongoDataManager_complex_sub_objects():
"""MongoDataManager: Never store objects marked as _p_mongo_sub_object
Let's construct comlpex object with several levels of containment.
_p_mongo_doc_object will point to an object, that is subobject itself.
>>> foo = Foo('one')
>>> sup = Super('super')
>>> bar = Bar('bar')
>>> bar._p_mongo_sub_object = True
>>> bar._p_mongo_doc_object = sup
>>> sup.bar = bar
>>> sup._p_mongo_sub_object = True
>>> sup._p_mongo_doc_object = foo
>>> foo.sup = sup
>>> dm.root['one'] = foo
>>> dm.tpc_finish(None)
>>> sorted(conn[DBNAME].collection_names())
[u'mongopersist.tests.test_datamanager.Foo',
u'persistence_root',
u'system.indexes']
Now, save foo first, and then add subobjects
>>> foo = Foo('two')
>>> dm.root['two'] = foo
>>> dm.tpc_finish(None)
>>> sup = Super('second super')
>>> bar = Bar('second bar')
>>> bar._p_mongo_sub_object = True
>>> bar._p_mongo_doc_object = sup
>>> sup.bar = bar
>>> sup._p_mongo_sub_object = True
>>> sup._p_mongo_doc_object = foo
>>> foo.sup = sup
>>> dm.tpc_finish(None)
>>> sorted(conn[DBNAME].collection_names())
[u'mongopersist.tests.test_datamanager.Foo',
u'persistence_root',
u'system.indexes']
>>> dm.root['two'].sup.bar
<Bar second bar>
>>> pprint(list(conn[DBNAME]['mongopersist.tests.test_datamanager.Foo'].
... find({'name': 'one'})))
[{u'_id': ObjectId('...'),
u'name': u'one',
u'sup': {u'_py_persistent_type': u'mongopersist.tests.test_datamanager.Super',
u'bar': {u'_py_persistent_type': u'mongopersist.tests.test_datamanager.Bar',
u'name': u'bar'},
u'name': u'super'}}]
Now, make changes to the subobjects and then commit
>>> foo = dm.root['one']
>>> foo.sup.name = 'new super'
>>> foo.sup.bar.name = 'new bar'
>>> dm.tpc_finish(None)
>>> foo = dm.root['one']
>>> foo.sup
<Super new super>
>>> foo.sup._p_mongo_sub_object
True
>>> foo.sup._p_mongo_doc_object
<Foo one>
>>> foo.sup.bar
<Bar new bar>
>>> foo.sup.bar._p_mongo_sub_object
True
>>> foo.sup.bar._p_mongo_doc_object
<Foo one>
>>> sorted(conn[DBNAME].collection_names())
[u'mongopersist.tests.test_datamanager.Foo',
u'persistence_root',
u'system.indexes']
Even if _p_mongo_doc_object is pointed to subobject, subobject does not get
saved to its own collection:
>>> foo.sup.bar._p_mongo_doc_object = foo.sup
>>> foo.sup.bar.name = 'newer bar'
>>> foo.sup.name = 'newer sup'
>>> dm.tpc_finish(None)
>>> sorted(conn[DBNAME].collection_names())
[u'mongopersist.tests.test_datamanager.Foo',
u'persistence_root',
u'system.indexes']
"""
| 22,310
|
def make_daemon():
"""
Daemonize to run in background
"""
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
pid = os.fork()
if pid > 0:
# exit second parent
sys.exit(0)
if STREAM:
# Create sillystream server
output = sillystream.server()
# Start the server
output.start_thread()
else:
output = open("/dev/null", 'wb')
sys.stdout = output
sys.stderr = output
| 22,311
|
def print_exception(msg=None):
"""Print exceptions with/without traceback."""
manually_set_trace, show_trace = _get_manual_env_var("XONSH_SHOW_TRACEBACK", False)
manually_set_logfile, log_file = _get_manual_env_var("XONSH_TRACEBACK_LOGFILE")
if (not manually_set_trace) and (not manually_set_logfile):
# Notify about the traceback output possibility if neither of
# the two options have been manually set
sys.stderr.write(
"xonsh: For full traceback set: " "$XONSH_SHOW_TRACEBACK = True\n"
)
# convert show_trace to bool if necessary
if not is_bool(show_trace):
show_trace = to_bool(show_trace)
# if the trace option has been set, print all traceback info to stderr
if show_trace:
# notify user about XONSH_TRACEBACK_LOGFILE if it has
# not been set manually
if not manually_set_logfile:
sys.stderr.write(
"xonsh: To log full traceback to a file set: "
"$XONSH_TRACEBACK_LOGFILE = <filename>\n"
)
traceback.print_exc()
# additionally, check if a file for traceback logging has been
# specified and convert to a proper option if needed
log_file = to_logfile_opt(log_file)
if log_file:
# if log_file <> '' or log_file <> None, append
# traceback log there as well
with open(os.path.abspath(log_file), "a") as f:
traceback.print_exc(file=f)
if not show_trace:
# if traceback output is disabled, print the exception's
# error message on stderr.
display_error_message()
if msg:
msg = msg if msg.endswith("\n") else msg + "\n"
sys.stderr.write(msg)
| 22,312
|
def mot_decode(heat,
wh,
reg=None,
cat_spec_wh=False,
K=100):
"""
多目标检测结果解析
"""
batch, cat, height, width = heat.size() # N×C×H×W
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat) # 默认应用3×3max pooling操作, 检测目标数变为feature map的1/9
scores, inds, clses, ys, xs = _topk(scores=heat, K=K)
if reg is not None:
reg = _tranpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _tranpose_and_gather_feat(wh, inds)
if cat_spec_wh:
wh = wh.view(batch, K, cat, 2)
clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long()
wh = wh.gather(2, clses_ind).view(batch, K, 2)
else:
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float() # 目标类别
scores = scores.view(batch, K, 1)
bboxes = torch.cat([xs - wh[..., 0:1] / 2, # left
ys - wh[..., 1:2] / 2, # top
xs + wh[..., 0:1] / 2, # right
ys + wh[..., 1:2] / 2], # down
dim=2)
detections = torch.cat([bboxes, scores, clses], dim=2)
return detections, inds
| 22,313
|
def create_database(path, host='localhost', port=8080):
"""For Tests purpose"""
app.config.setdefault('host', host)
app.config.setdefault('port', port)
global db
db = Database(path)
| 22,314
|
def raise_for_status_with_detail(resp):
"""
wrap raise_for_status and attempt give detailed reason for api failure
re-raise HTTPError for normal flow
:param resp: python request resp
:return:
"""
try:
resp.raise_for_status()
except HTTPError as http_exception:
try:
log_message(msg=resp.json(), log=get_logger('http_status'), level=logging.ERROR)
except Exception as e:
pass # resp.json() failed
finally:
raise http_exception
| 22,315
|
def realign_exons(args):
"""Entry point."""
memlim = float(args["memlim"]) if args["memlim"] != "Auto" else None
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE" # otherwise it could crash
# read gene-related data
bed_data = read_bed(
args["gene"], args["bdb_bed_file"]
) # extract gene data from bed file
# bed_exons_num = len(bed_data["blocks"])
# parse gene bed-track: get exon coordinates, sequences and splice sites
exon_coordinates, exon_sequences, s_sites, exon_flanks = get_exons(
bed_data, args["tDB"]
)
# read chain IDs list:
chains = (
[int(x) for x in args["chains"].split(",") if x != ""]
if args["chains"] != "region"
else []
)
# get path to the chain file (most likely just arg)
chain_file = (
find_chain_file(args["ref"], args["bdb_chain_file"])
if args["chains"] != "region"
else args["bdb_chain_file"]
)
# check if there are stop codons in reference -> we either mask them or halt execution
exon_sequences, sec_codons = check_ref_exons(exon_sequences, args["mask_stops"])
# CESAR require some formatting of the reference exon sequences:
prepared_exons = prepare_exons_for_cesar(exon_sequences)
# read chain-related data
query_sequences, query_loci, inverts = {}, {}, {}
gene_range = "{0}:{1}-{2}".format(
bed_data["chrom"], bed_data["chromStart"], bed_data["chromEnd"]
)
chain_exon_gap, chain_exon_class, chain_exon_exp_reg, chain_missed = {}, {}, {}, {}
aa_block_sat_chain = {} # one of dicts to mark exceptionally good exon predictions
fragments_data = [] # required for fragmented genomes
chains_in_input = True
force_include_regions_opt_v = (
[]
) # regions we force to include in the optimized version
verbose("Reading query regions")
chain_to_predefined_regions = read_predefined_regions(args["predefined_regions"])
# if chains and args["fragments"]:
# the normal branch: call CESAR vs 1+ query sequences
for chain_id in chains: # in region more this part is skipped
verbose(f"\nLoading chain {chain_id}") # only one place where I need chain data
# extract chain and coordinates of locus; extract sequence from query genome
chain_str = get_chain(chain_file, chain_id)
verbose(f"Chain {chain_id} extracted")
chain_header = chain_str.split("\n")[0].split()
# most likely we need only the chain part that intersects the gene
# and skip the rest:
verbose("Cutting the chain...")
search_locus, subch_locus, chain_data = chain_cut(
chain_str, gene_range, args["gene_flank"], args["extra_flank"]
)
# chain data: t_strand, t_size, q_strand, q_size
chain_qStrand = chain_data[2]
chain_qSize = chain_data[3]
verbose("Chain cut is done.")
# this call of make_query_seq is actually for extracting
# query sequence for CESAR:
verbose("Extracting query sequence...")
query_seq, directed = make_query_seq(
chain_id, search_locus, args["qDB"], chain_qStrand, bed_data["strand"]
)
verbose("Query sequence extracted")
q_seq_len = len(query_seq)
# this is extended query seq (larger locus) for assembly gaps search only!
# We do not call CESAR for this _query_seq_ext sequence
_query_seq_ext, directed = make_query_seq(
chain_id, subch_locus, args["qDB"], chain_qStrand, bed_data["strand"]
)
if args["ic"]: # invert complement required for some reason
query_seq = invert_complement(query_seq)
_query_seq_ext = invert_complement(_query_seq_ext)
if len(query_seq) > args["query_len_limit"] > 0:
# query length limit exceeded:
verbose(f"Skipping chain {chain_id} - too long")
continue
# extract gaps and block coordinates
gap_coordinates = find_gaps(
_query_seq_ext, subch_locus, args["gap_size"], directed
)
# blocks are [target_start, target_end, query_start, query_end]
subchain_blocks_raw = extract_subchain(chain_str, subch_locus)
# swap blocks in correct orientation and fill interblock ranges
subchain_blocks = orient_blocks(subchain_blocks_raw, chain_data)
# intersect exon: chain blocks and chain blocks: gaps, get exons not covered by chain
block_intersection_out = intersect_exons_blocks_gaps(
exon_coordinates,
subchain_blocks,
gap_coordinates,
args["exon_flank"],
args["uhq_flank"],
)
# parse block_intersection_out -> there are many different data:
exon_blocks = block_intersection_out[0]
# flanked_exon_blocks = block_intersection_out[1]
blocks_gaps = block_intersection_out[2]
missing_exons = block_intersection_out[3]
# exon_flank_coordinates = block_intersection_out[4]
margin_cases = block_intersection_out[5]
aa_block_sat = block_intersection_out[6]
verbose(f"AA sat: {aa_block_sat}")
# classify exons, get expected regions
exon_class, exon_exp_region = classify_predict_exons(
exon_blocks, subchain_blocks, margin_cases
)
relative_regions_for_chain = get_relative_coordinates(
exon_exp_region, search_locus, directed
)
relative_regions_for_chain_extended = extend_rel_regions(
relative_regions_for_chain, len(query_seq)
)
relative_regions_formatted = fmt_incl_reg(
chain_id, relative_regions_for_chain_extended
)
force_include_regions_opt_v.extend(relative_regions_formatted)
# if there are predefined regions (by lastz for example)
extra_regions = chain_to_predefined_regions[chain_id]
force_include_regions_opt_v.extend(extra_regions)
# check whether any exon intersects assembly gap in the corresponding region
exon_gap = find_exons_gaps(
exon_coordinates, exon_blocks, subchain_blocks, blocks_gaps, gap_coordinates
)
# possibly there are multiple chains
# save data for this particular chain:
chain_exon_gap[chain_id] = exon_gap
chain_exon_class[chain_id] = exon_class
chain_exon_exp_reg[chain_id] = exon_exp_region
chain_missed[chain_id] = missing_exons
query_sequences[chain_id] = query_seq
query_loci[chain_id] = search_locus
inverts[chain_id] = directed
aa_block_sat_chain[chain_id] = aa_block_sat
# some features that we need in case of fragmented gene
t_start = int(chain_header[5])
t_end = int(chain_header[6])
q_chrom, q_start_end_str = search_locus.split(":")
q_start_end_tup = q_start_end_str.split("-")
q_start = int(q_start_end_tup[0])
q_end = int(q_start_end_tup[1])
q_strand = True if chain_header[9] == "+" else False
# chain_qSize -> query chromosome/scaffold length
# q_seq_len -> query sequence (that comes to CESAR) length
fragment_data = (
chain_id,
q_chrom,
q_strand,
q_start,
q_end,
t_start,
t_end,
q_seq_len,
chain_qSize,
)
fragments_data.append(fragment_data)
if not chains:
# it is possible in the case of "region" mode
# possible if CESAR wrapper is used as a standalone script
# a region given directly
verbose("Working in the region mode")
chains_in_input = False
region_chrom, region_start_end = chain_file.replace(",", "").split(":")
region_start, region_end = [int(x) for x in region_start_end.split("-")]
if region_start < region_end:
region_strand = True
else:
region_strand = False
region_start, region_end = region_end, region_start
search_locus = "{}:{}-{}".format(region_chrom, region_start, region_end)
query_seq, directed = make_query_seq(
"-1", search_locus, args["qDB"], region_strand, bed_data["strand"]
)
# mimic the chain parsing result:
chain_exon_gap = None
chain_exon_class[-1] = {}
chain_exon_exp_reg[-1] = {}
chain_missed[-1] = {}
query_sequences[-1] = query_seq
query_loci[-1] = search_locus
inverts[-1] = directed
if chains_in_input and args["fragments"]:
# sort chains, get proper chain_id sorting
if bed_data["strand"]: # gene is + -> sort directly
fragments_data = sorted(fragments_data, key=lambda x: x[5])
else: # gene is - -> reverse sort of chains
fragments_data = sorted(fragments_data, key=lambda x: x[6], reverse=True)
# merge query feat dictionaries
exon_gap = merge_dicts(chain_exon_gap.values())
exon_class = merge_dicts(chain_exon_class.values())
exon_exp_region = merge_dicts(chain_exon_exp_reg.values())
aa_block_sat = merge_dicts(aa_block_sat_chain.values())
missing_exons = intersect_lists(chain_missed.values())
query_seq_chunks = []
for elem in fragments_data:
# stitch query seq in a proper order; elem[0] -> chain_id
query_seq_chunks.append(query_sequences[elem[0]])
query_seq = "".join(query_seq_chunks)
# remove chain_id data from dicts
chain_ids = list(chain_exon_gap.keys())
for chain_id in chain_ids:
del chain_exon_gap[chain_id]
del chain_exon_class[chain_id]
del chain_exon_exp_reg[chain_id]
del aa_block_sat_chain[chain_id]
del chain_missed[chain_id]
del query_sequences[chain_id]
# load new values
chain_exon_gap[FRAGMENT] = exon_gap
chain_exon_class[FRAGMENT] = exon_class
chain_exon_exp_reg[FRAGMENT] = exon_exp_region
aa_block_sat_chain[FRAGMENT] = aa_block_sat
chain_missed[FRAGMENT] = missing_exons
query_sequences[FRAGMENT] = query_seq
inverts[FRAGMENT] = None
# some queries might be skipped -> we can eventually skip all of them
# which means that there is nothing to call CESAR on
# then it's better to halt the execution
die("No queries left") if len(query_sequences.keys()) == 0 else None
# predict the amount of memory
qlength_max = max([len(v) for v in query_sequences.values()])
memory = memory_check(bed_data["block_sizes"], qlength_max, args["estimate_memory"])
verbose(f"\nExpecting a memory consumption of: {memory} GB")
# arrange input for CESAR and save it
# is_temp is True if /dev/shm is in use; flag to remove that
cesar_in_filename, is_temp = make_in_filename(args["cesar_input_save_to"])
# create temp file for force-include regions, if required
force_include_reg_file = make_reg_file(
args["opt_cesar"],
force_include_regions_opt_v,
predefined_file=args["opt_regions_save"],
)
# for LASTZ-optimised CESAR we may also add +/-10 bp exon flanks
exon_flanks_file = make_exon_flanks_file(
args["opt_cesar"], exon_flanks, predefined_file=args["exon_flanks_file"]
)
# check whether some reference splice sites are non-canonical
# doesn't apply to single-exon genes
ref_ss_data = analyse_ref_ss(s_sites) if len(exon_sequences) != 1 else None
# there are two sources of U12 introns data:
# 1) U12 file provided at the very beginning
# 2) If splice site in reference is non canonical -> we also threat this as U12
# even if this splice site is not in the U12 data
append_u12(args["u12"], args["gene"], ref_ss_data)
make_cesar_in(prepared_exons, query_sequences, cesar_in_filename, ref_ss_data)
# run cesar itself
cesar_bin = args.get("cesar_binary") if args.get("cesar_binary") else DEFAULT_CESAR
if not args["cesar_output"]:
cesar_raw_out = run_cesar(
cesar_in_filename,
memory,
is_temp,
memlim,
cesar_bin,
force_include_reg_file,
exon_flanks_file,
args["predefined_regions"],
args["opt_precompute"],
)
if args["opt_precompute"]:
print(cesar_raw_out)
exit(0)
else: # very specific case, load already saved CESAR output
with open(args["cesar_output"], "r") as f:
cesar_raw_out = f.read()
# if force_include_reg_file:
# os.remove(force_include_reg_file) if os.path.isfile(force_include_reg_file) else None
os.remove(cesar_in_filename) if is_temp else None # wipe temp if temp
# save raw CESAR output and close if required
save(cesar_raw_out, args["raw_output"], t0) if args["raw_output"] else None
# process the output, extract different features per exon
if args["fragments"]:
# bit more complicated parsing of fragmented output
proc_out = process_cesar_out__fragments(
cesar_raw_out, fragments_data, query_loci, inverts
)
else: # not fragmented: use classic procedure
proc_out = process_cesar_out(cesar_raw_out, query_loci, inverts)
query_exon_sequences = proc_out[0] # sequences of predicted exons in query
ref_exon_sequences_ali = proc_out[1] # reference sequences -> aligned
pIDs = proc_out[2] # nucleotide %IDs
pBl = proc_out[3] # BLOSUM scores for protein sequences
query_coords = proc_out[4] # genomic coordinates in the query
exon_num_corr = proc_out[5] # in case of intron del: ref/que correspondence
prot_s = proc_out[6] # protein sequences in query
codon_s = proc_out[7]
aa_cesar_sat = proc_out[8] # says whether an exon has outstanding quality
aa_eq_len = aa_eq_len_check(exon_sequences, query_exon_sequences)
if chains:
# if and only if there are chains, it's possible to extract
# another check for exceptional exons
chain_exon_class = get_a_plus(
chain_exon_class, aa_cesar_sat, aa_block_sat_chain, aa_eq_len
)
# time to arrange all these data altogether
final_output, chain_ex_inc = arrange_output(
args["gene"],
ref_exon_sequences_ali,
query_exon_sequences,
pIDs,
pBl,
query_coords,
chain_exon_gap,
chain_exon_class,
chain_exon_exp_reg,
exon_num_corr,
chain_missed,
args["paral"],
)
exon_to_len = {k + 1: len(v) for k, v in exon_sequences.items()}
verbose(f"Exon lens are: {exon_to_len}")
# this is for inact mutations check:
chain_to_exon_to_properties = (
chain_exon_class,
chain_exon_gap,
pIDs,
pBl,
chain_missed,
chain_ex_inc,
exon_to_len,
)
verbose(f"Chain to exon to properties = {chain_to_exon_to_properties}")
if args["check_loss"]: # call inact mutations scanner,
loss_report = inact_mut_check(
cesar_raw_out,
v=VERBOSE,
gene=args["gene"],
ex_prop=chain_to_exon_to_properties,
ref_ss=ref_ss_data,
sec_codons=sec_codons,
no_fpi=args["no_fpi"],
)
else: # do not call inact mut scanner
loss_report = None
# save protein/codon ali and text output
save_prot(args["gene"], prot_s, args["prot_out"])
save_codons(args["gene"], codon_s, args["codon_out"])
save(final_output, args["output"], t0, loss_report)
sys.exit(0)
| 22,316
|
def get_descriptors(smiles):
""" Use RDkit to get molecular descriptors for the given smiles string """
mol = Chem.MolFromSmiles(smiles)
return pd.Series({name: func(mol) for name, func in descList.items()})
| 22,317
|
def mro(*bases):
"""Calculate the Method Resolution Order of bases using the C3 algorithm.
Suppose you intended creating a class K with the given base classes. This
function returns the MRO which K would have, *excluding* K itself (since
it doesn't yet exist), as if you had actually created the class.
Another way of looking at this, if you pass a single class K, this will
return the linearization of K (the MRO of K, *including* itself).
"""
seqs = [list(C.__mro__) for C in bases] + [list(bases)]
res = []
while True:
non_empty = list(filter(None, seqs))
if not non_empty:
# Nothing left to process, we're done.
return tuple(res)
for seq in non_empty: # Find merge candidates among seq heads.
candidate = seq[0]
not_head = [s for s in non_empty if candidate in s[1:]]
if not_head:
# Reject the candidate.
candidate = None
else:
break
if not candidate:
raise TypeError("inconsistent hierarchy, no C3 MRO is possible")
res.append(candidate)
for seq in non_empty:
# Remove candidate.
if seq[0] == candidate:
del seq[0]
| 22,318
|
def ends_with(s, suffix, ignore_case=False):
"""
suffix: str, list, or tuple
"""
if is_str(suffix):
suffix = [suffix]
suffix = list(suffix)
if ignore_case:
for idx, suf in enumerate(suffix):
suffix[idx] = to_lowercase(suf)
s = to_lowercase(s)
suffix = tuple(suffix)
return s.endswith(suffix)
| 22,319
|
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
# 学学
version1 = [int(val) for val in version1.split(".")]
version2 = [int(val) for val in version2.split(".")]
if len(version1) > len(version2):
min_version = version2
max_version = version1
else:
min_version = version1
max_version = version2
# Compare up to min character
for i in range(len(min_version)):
if version1[i] > version2[i]:
return 1
elif version1[i] < version2[i]:
return -1
if len(version1) == len(version2):
return 0
for j in range(i + 1, len(max_version)):
if max_version[j] > 0:
return 1 if max_version == version1 else - 1
return 0
| 22,320
|
def test_set_device_id():
""" test_set_device_id """
with pytest.raises(TypeError):
context.set_context(device_id=1)
context.set_context(device_id="cpu")
assert context.get_context("device_id") == 1
| 22,321
|
def imagined_reward_data(data,col,book,network):
"""
This function is a not a standalone function. An excel spreadsheet must
be created with the appropriate sheet name. This exists to make the code
in other functions more readable since there was a lot of repeat.
"""
imagined_sheet = book['Imagined Rewards']
imagined_sheet.cell(row=1, column=col+1).value = col-1
row_count = 2
for senator in network:
imagined_sheet.cell(row=row_count,column=col+1).value = data[senator]
row_count+=1
| 22,322
|
def create_capital():
""" Use fy and p-t-d capital sets and ref sets to make capital datasets """
adopted = glob.glob(conf['temp_data_dir'] \
+ "/FY*_ADOPT_CIP_BUDGET.xlsx")
proposed = glob.glob(conf['temp_data_dir'] \
+ "/FY*_PROP_CIP_BUDGET.xlsx")
todate = glob.glob(conf['temp_data_dir'] \
+ "/FY*_2DATE_CIP_BUDGET.xlsx")
budgets = adopted + proposed + todate
fund_ref = pd.read_csv(prod_path \
+ "/budget_reference_funds_datasd_v1.csv",dtype={'fund_number':str})
proj_ref = pd.read_csv(prod_path \
+ "/budget_reference_projects_datasd_v1.csv",dtype={'project_number':str})
accounts_ref = pd.read_csv(prod_path \
+ "/budget_reference_accounts_datasd_v1.csv",dtype={'account_number':str})
for count, budget in enumerate(budgets):
fy_pattern = re.compile(r'([0-9][0-9])')
this_fy = fy_pattern.findall(budget)
if "2DATE" in budget:
out_fname = prod_path \
+ "/budget_capital_ptd_FY{}_datasd_v1.csv".format(this_fy[0])
elif "PROP" in budget:
out_fname = prod_path \
+ "/budget_capital_FY{}_prop_datasd_v1.csv".format(this_fy[0])
else:
out_fname = prod_path \
+ "/budget_capital_FY{}_datasd_v1.csv".format(this_fy[0])
df = pd.read_excel(budget)
df = df.iloc[:, [0,1,2,3]]
df.columns = ['amount','code','project_number','object_number']
df['code'] = df['code'].astype(str)
df['project_number'] = df['project_number'].astype(str)
df['object_number'] = df['object_number'].astype(str)
df = pd.merge(df,
fund_ref[['fund_type','fund_number']],
left_on='code',
right_on='fund_number',
how='left')
df = pd.merge(df,
proj_ref[['asset_owning_dept','project_name','project_number']],
left_on='project_number',
right_on='project_number',
how='left')
df = pd.merge(df,
accounts_ref[['account','account_number']],
left_on='object_number',
right_on='account_number',
how='left')
df = df[['amount',
'fund_type',
'fund_number',
'asset_owning_dept',
'project_name',
'project_number',
'account',
'account_number']]
general.pos_write_csv(df,out_fname)
return "Successfully created capital budgets"
| 22,323
|
def gen_gt_from_quadrilaterals(gt_quadrilaterals, input_gt_class_ids, image_shape, width_stride, box_min_size=3):
"""
从gt 四边形生成,宽度固定的gt boxes
:param gt_quadrilaterals: GT四边形坐标,[n,(x1,y1,x2,y2,x3,y3,x4,y4)]
:param input_gt_class_ids: GT四边形类别,一般就是1 [n]
:param image_shape:
:param width_stride: 分割的步长,一般16
:param box_min_size: 分割后GT boxes的最小尺寸
:return:
gt_boxes:[m,(y1,x1,y2,x2)]
gt_class_ids: [m]
"""
h, w = list(image_shape)[:2]
x_array = np.arange(0, w + 1, width_stride, np.float32) # 固定宽度间隔的x坐标点
# 每个四边形x 最小值和最大值
x_min_np = np.min(gt_quadrilaterals[:, ::2], axis=1)
x_max_np = np.max(gt_quadrilaterals[:, ::2], axis=1)
gt_boxes = []
gt_class_ids = []
for i in np.arange(len(gt_quadrilaterals)):
xs = get_xs_in_range(x_array, x_min_np[i], x_max_np[i]) # 获取四边形内的x中坐标点
ys_min, ys_max = get_min_max_y(gt_quadrilaterals[i], xs)
# print("xs:{}".format(xs))
# 为每个四边形生成固定宽度的gt
for j in range(len(xs) - 1):
x1, x2 = xs[j], xs[j + 1]
y1, y2 = np.min(ys_min[j:j + 2]), np.max(ys_max[j:j + 2])
gt_boxes.append([y1, x1, y2, x2])
gt_class_ids.append(input_gt_class_ids[i])
gt_boxes = np.reshape(np.array(gt_boxes), (-1, 4))
gt_class_ids = np.reshape(np.array(gt_class_ids), (-1,))
# 过滤高度太小的边框
height = gt_boxes[:, 2] - gt_boxes[:, 0]
width = gt_boxes[:, 3] - gt_boxes[:, 1]
indices = np.where(np.logical_and(height >= 8, width >= 2))
return gt_boxes[indices], gt_class_ids[indices]
| 22,324
|
def monotonic(l: list):
"""Return True is list elements are monotonically increasing or decreasing.
>>> monotonic([1, 2, 4, 20])
True
>>> monotonic([1, 20, 4, 10])
False
>>> monotonic([4, 1, 0, -10])
True
"""
#[SOLUTION]
if l == sorted(l) or l == sorted(l, reverse=True):
return True
return False
| 22,325
|
def ploterror(ErrorArray,PSyndrome,paths,vertices,CorrectionArray):
"""Given an error array, this will graphically plot which qubits have error"""
height = len(ErrorArray[:,0,0])
width = len(ErrorArray[0,0,:])
scale = 100
radius = 5
image = Image.new('RGB', (width*scale, height*scale ))
draw = ImageDraw.Draw(image)
draw.rectangle((0,0, width*scale-1,height*scale-1), outline = 'white')
l=3
#draw qubits
for i in range(len(ErrorArray[:,0,0])):
for j in range(2):
for k in range(len(ErrorArray[0,0,:])):
#draw qubits with error
if ErrorArray[i,j,k] == -1:
if j ==0:#vertical lines
draw.ellipse(((k+0.75)*scale-radius,(i+0.25)*scale-radius,(k+0.75)*scale+radius,(i+0.25)*scale+radius), fill = 'red', outline ='red')
draw.line(((k+0.75)*scale-l,(i+0.25-0.5)*scale,(k+0.75)*scale-l,(i+0.25+0.5)*scale), fill = 'red',width = 2)
if i == 0 :
draw.line(((k+0.75)*scale-l,height*scale,(k+0.75)*scale-l,height*scale-0.25*scale), fill = 'red',width = 2)
else:#horizontal lines
draw.ellipse(((k+0.25)*scale-radius,(i+0.75)*scale-radius,(k+0.25)*scale+radius,(i+0.75)*scale+radius), fill = 'red', outline ='red')
draw.line(((k+0.25-0.5)*scale,(i+0.75)*scale-l,(k+0.25+0.5)*scale,(i+0.75)*scale-l), fill = 'red',width = 2)
if k == 0 :
draw.line((width*scale,(i+0.75)*scale-l,width*scale - 0.2*scale,(i+0.75)*scale-l), fill = 'red',width = 2)
#draw qubits free of error
else:
if j ==0:
draw.ellipse(((k+0.75)*scale-radius,(i+0.25)*scale-radius,(k+0.75)*scale+radius,(i+0.25)*scale+radius), fill = 'blue', outline ='blue')
else:
draw.ellipse(((k+0.25)*scale-radius,(i+0.75)*scale-radius,(k+0.25)*scale+radius,(i+0.75)*scale+radius), fill = 'blue', outline ='blue')
#draw line between matching points
#doesnt yet draw matches over boundaries correctly
for i in paths:
list2=[]
for x in i:
for y in x:
list2.append((y+(0.75))*scale)
#test if the distance moved is larger than 1 -> jumpes over the edge
#need to run for all pairs of points
list3=list(list2)
part1=[]
part2=[]
colour = randcolour()
#draw corrections
for i in range(len(CorrectionArray[:,0,0])):
for j in range(2):
for k in range(len(CorrectionArray[0,0,:])):
#draw qubits with error
if CorrectionArray[i,j,k] == -1:
if j ==0:#vertical lines
draw.line(((k+0.75)*scale+l,(i+0.25-0.5)*scale,(k+0.75)*scale+l,(i+0.25+0.5)*scale), fill = 'yellow',width = 2)
if i == 0 :
draw.line(((k+0.75)*scale+l,height*scale,(k+0.75)*scale+l,height*scale-0.25*scale), fill = 'yellow',width = 2)
else:#horizontal lines
draw.line(((k+0.25-0.5)*scale,(i+0.75)*scale+l,(k+0.25+0.5)*scale,(i+0.75)*scale+l), fill = 'yellow',width = 2)
if k == 0 :
draw.line((width*scale,(i+0.75)*scale+l,width*scale - 0.2*scale,(i+0.75)*scale+l), fill = 'yellow',width = 2)
#draw stabilizers
for i in range(len(PSyndrome[:,0])):
for j in range(len(PSyndrome[0,:])):
if PSyndrome[i,j] == -1:
draw.rectangle(((j+0.75)*scale-radius,(i+0.75)*scale-radius,(j+0.75)*scale+radius,(i+0.75)*scale+radius), fill = 'green', outline = 'green')
#draw.text(((j+0.75)*scale,(i+0.75)*scale),'-')
else:
draw.text(((j+0.75)*scale,(i+0.75)*scale),'+')
draw.text(((j+0.6)*scale,(i+0.85)*scale),'('+str(i)+','+str(j)+')')
image.save('graphicaloutput.png')
| 22,326
|
def get_region_dimm_list(region):
"""
returns list of pmem dimms assocaited with pmem region
"""
name = 'get_region_dimm_list()'
tic = time.perf_counter()
global ndctl
dimm_list = []
# if DEBUG: print("DEBUG: Function:", __name__, "Region:", region )
# if VERBOSE: print(' getting:', __name__, end="...")
for r in range(len(ndctl['regions'])):
# if this region matches arg, get DIMM mappings
if ndctl['regions'][r]['dev'] == region:
for d in range(len(ndctl['regions'][r]['mappings'])):
if DEBUG: print(' ndctl[regions][r]mappings', ndctl['regions'][r]['mappings'][d]['dimm'])
dimm_list.append(ndctl['regions'][r]['mappings'][d]['dimm'])
continue
# if VERBOSE: print('Done')
# if DEBUG: print("Debug:", __name__, region, "DIMMS", dimm_list)
toc = time.perf_counter()
delta_t = toc - tic
td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc}
timers.append(td)
return dimm_list
| 22,327
|
def readme():
"""Get the long description from the README file."""
with open(path.join(project_path, 'README.rst'), encoding='utf-8') as f:
return f.read()
| 22,328
|
def RFR_dict(input_date: str = None, cache: dict = {}) -> dict:
"""
Returns a dict with url and filenames from the EIOPA website based on the
input_date
>>> RFR_dict(datetime(2018,1,1))
{'input_date': datetime.datetime(2018, 1, 1, 0, 0),
'reference_date': '20171231',
'url': 'https://eiopa.europa.eu/Publications/Standards/',
'path_zipfile': '',
'name_zipfile': 'EIOPA_RFR_20171231.zip',
'path_excelfile': '',
'name_excelfile': 'EIOPA_RFR_20171231_Term_Structures.xlsx'}
Args:
input_date: required date
cache: the cache with the data
Returns
The updated cache with the data
"""
cache = RFR_reference_date(input_date, cache)
reference_date = cache["reference_date"]
full_url = eiopa_link(cache["input_date"], data_type="rfr")
cache["url"] = os.path.dirname(full_url)
cache["name_zipfile"] = os.path.basename(full_url)
cache["name_excelfile"] = (
"EIOPA_RFR_" + reference_date + "_Term_Structures" + ".xlsx"
)
cache["name_excelfile_spreads"] = (
"EIOPA_RFR_" + reference_date + "_PD_Cod" + ".xlsx"
)
return cache
| 22,329
|
def get_weight(stats):
"""
Return a data point weight for the result.
"""
if stats is None or 'ci_99_a' not in stats or 'ci_99_b' not in stats:
return None
try:
a = stats['ci_99_a']
b = stats['ci_99_b']
if math.isinf(a) or math.isinf(b):
# Infinite interval is due to too few samples --- consider
# weight as missing
return None
return 2 / abs(b - a)
except ZeroDivisionError:
return None
| 22,330
|
def drop_table():
"""as the name implies..."""
print("WARNING: Dropping table")
TBL_SRV.delete_table(TBL_NAME)
| 22,331
|
def get_true_posterior(X: Tensor, y: Tensor) -> (Tensor, Tensor, float, float, float):
"""
Get the parameters of the true posterior of a linear regression model fit to the given data.
Args:
X: The features, of shape (n_samples, n_features).
y: The targets, of shape (n_samples,).
Returns:
mean: The posterior mean, of shape (n_features,).
covar: The posterior covariance, of shape (n_features, n_features).
bias: The posterior bias.
alpha: The precision of the Gaussian prior.
beta: The precision of Gaussian target noise.
"""
br = BayesianRidge()
br.fit(X.numpy(), y.numpy())
mean = torch.from_numpy(br.coef_).float()
covar = torch.from_numpy(br.sigma_).float()
bias = br.intercept_
alpha = br.lambda_
beta = br.alpha_
return mean, covar, bias, alpha, beta
| 22,332
|
def findmax(engine,user,measure,depth):
"""Returns a list of top (user,measure) pairs, sorted by measure, up to a given :depth"""
neighbors = engine.neighbors(user)
d = {v:measure(user,v) for v in neighbors}
ranked = sorted(neighbors,key=lambda v:d[v],reverse=True)
return list((v,d[v]) for v in ranked[:depth])
| 22,333
|
def cart_update(request, pk):
"""
Add/Remove single product (possible multiple qty of product) to cart
:param request: Django's HTTP Request object,
pk: Primary key of
products to be added to cart
:return: Success message
"""
if request.method == 'GET':
sess = request.session
qty = request.GET.get('qty', False)
if qty:
# Initialize a cart and its qty in session if they don't exist
sess['cart_qty'] = sess.get('cart_qty', 0) + int(qty)
sess['cart'] = sess.get('cart', OrderedDict())
# In case the it is add to cart and product not already in cart
new_cart_item = {'qty': 0, 'pk': str(pk)}
# Update cart item quantity of new/existing item
sess['cart'][str(pk)] = sess['cart'].get(str(pk), new_cart_item)
new_qty = sess['cart'][str(pk)]['qty'] + int(qty)
new_qty_above_max = Product.objects.get(pk=pk).quantity < new_qty
# import pdb; pdb.set_trace()
if not new_qty_above_max:
# Sets new quantity to 0 in case quantity has gone negative
sess['cart'][str(pk)]['qty'] = int((abs(new_qty) + new_qty) / 2)
return JsonResponse({'success': True})
return JsonResponse({
'success': False,
'msg': 'Max quantity of this product has already been added.'
})
| 22,334
|
def hyperopt_cli(
config: Union[str, dict],
dataset: str = None,
training_set: str = None,
validation_set: str = None,
test_set: str = None,
training_set_metadata: str = None,
data_format: str = None,
experiment_name: str = "experiment",
model_name: str = "run",
# model_load_path=None,
# model_resume_path=None,
skip_save_training_description: bool = False,
skip_save_training_statistics: bool = False,
skip_save_model: bool = False,
skip_save_progress: bool = False,
skip_save_log: bool = False,
skip_save_processed_input: bool = False,
skip_save_unprocessed_output: bool = False,
skip_save_predictions: bool = False,
skip_save_eval_stats: bool = False,
skip_save_hyperopt_statistics: bool = False,
output_directory: str = "results",
gpus: Union[str, int, List[int]] = None,
gpu_memory_limit: int = None,
allow_parallel_threads: bool = True,
callbacks: List[Callback] = None,
backend: Union[Backend, str] = None,
random_seed: int = default_random_seed,
debug: bool = False,
**kwargs,
):
"""Searches for optimal hyperparameters.
# Inputs
:param config: (Union[str, dict]) in-memory representation of
config or string path to a YAML config file.
:param dataset: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing the entire dataset to be used for training.
If it has a split column, it will be used for splitting (0 for train,
1 for validation, 2 for test), otherwise the dataset will be
randomly split.
:param training_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing training data.
:param validation_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing validation data.
:param test_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing test data.
:param training_set_metadata: (Union[str, dict], default: `None`)
metadata JSON file or loaded metadata. Intermediate preprocessed
structure containing the mappings of the input
dataset created the first time an input file is used in the same
directory with the same name and a '.meta.json' extension.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'excel'`, `'feather'`,
`'fwf'`, `'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
`'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
`'stata'`, `'tsv'`.
:param experiment_name: (str, default: `'experiment'`) name for
the experiment.
:param model_name: (str, default: `'run'`) name of the model that is
being used.
:param skip_save_training_description: (bool, default: `False`) disables
saving the description JSON file.
:param skip_save_training_statistics: (bool, default: `False`) disables
saving training statistics JSON file.
:param skip_save_model: (bool, default: `False`) disables
saving model weights and hyperparameters each time the model
improves. By default Ludwig saves model weights after each epoch
the validation metric improves, but if the model is really big
that can be time consuming. If you do not want to keep
the weights and just find out what performance a model can get
with a set of hyperparameters, use this parameter to skip it,
but the model will not be loadable later on and the returned model
will have the weights obtained at the end of training, instead of
the weights of the epoch with the best validation performance.
:param skip_save_progress: (bool, default: `False`) disables saving
progress each epoch. By default Ludwig saves weights and stats
after each epoch for enabling resuming of training, but if
the model is really big that can be time consuming and will uses
twice as much space, use this parameter to skip it, but training
cannot be resumed later on.
:param skip_save_log: (bool, default: `False`) disables saving
TensorBoard logs. By default Ludwig saves logs for the TensorBoard,
but if it is not needed turning it off can slightly increase the
overall speed.
:param skip_save_processed_input: (bool, default: `False`) if input
dataset is provided it is preprocessed and cached by saving an HDF5
and JSON files to avoid running the preprocessing again. If this
parameter is `False`, the HDF5 and JSON file are not saved.
:param skip_save_unprocessed_output: (bool, default: `False`) by default
predictions and their probabilities are saved in both raw
unprocessed numpy files containing tensors and as postprocessed
CSV files (one for each output feature). If this parameter is True,
only the CSV ones are saved and the numpy ones are skipped.
:param skip_save_predictions: (bool, default: `False`) skips saving test
predictions CSV files
:param skip_save_eval_stats: (bool, default: `False`) skips saving test
statistics JSON file
:param skip_save_hyperopt_statistics: (bool, default: `False`) skips saving
hyperopt stats file.
:param output_directory: (str, default: `'results'`) the directory that
will contain the training statistics, TensorBoard logs, the saved
model and the training progress files.
:param gpus: (list, default: `None`) list of GPUs that are available
for training.
:param gpu_memory_limit: (int, default: `None`) maximum memory in MB to
allocate per GPU device.
:param allow_parallel_threads: (bool, default: `True`) allow TensorFlow
to use multithreading parallelism to improve performance at
the cost of determinism.
:param callbacks: (list, default: `None`) a list of
`ludwig.callbacks.Callback` objects that provide hooks into the
Ludwig pipeline.
:param backend: (Union[Backend, str]) `Backend` or string name
of backend to use to execute preprocessing / training steps.
:param random_seed: (int: default: 42) random seed used for weights
initialization, splits and any other random function.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
**kwargs:
# Return
:return" (`None`)
"""
return hyperopt(
config=config,
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
experiment_name=experiment_name,
model_name=model_name,
# model_load_path=model_load_path,
# model_resume_path=model_resume_path,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
skip_save_hyperopt_statistics=skip_save_hyperopt_statistics,
output_directory=output_directory,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
callbacks=callbacks,
backend=backend,
random_seed=random_seed,
debug=debug,
**kwargs,
)
| 22,335
|
def mlp_gradient(x, y, ws, bs, phis, alpha):
"""
Return a list containing the gradient of the cost with respect to z^(k)for each layer.
:param x: a list of lists representing the x matrix.
:param y: a list of lists of output values.
:param ws: a list of weight matrices (one for each layer)
:param bs: a list of biases (one for each layer)
:param phis: a list of activation functions:param hs
: a list of outputs for each layer include h^(0) = x
:return: A list of gradients of J with respect to z^(k) for k=1..l
"""
hs = mlp_feed_forward(x, ws, bs, phis)
D = mlp_propagate_error(x, y, ws, bs, phis, hs)
result_w = []
result_b = []
w_1 = np.dot(np.transpose(x), D[0])
step = np.multiply(alpha, ws[0])
w_1 = np.add(w_1, step)
w_1 = np.ndarray.tolist(w_1)
result_w.append(w_1)
for layers in range(1, len(ws)):
w_2 = np.dot(np.transpose(hs[layers]), D[layers])
w_2 = np.add(w_2, np.multiply(alpha, ws[layers]))
result_w.append(w_2)
for layers in range(len(ws)):
ones = np.ones((len(x), 1))
b_1 = np.dot(np.transpose(ones), D[layers])
result_b.append(b_1)
result_w = np.reshape(result_w, (1, -1))
return result_w, result_b
| 22,336
|
def test_tracks_changes_from_multiple_actions():
"""Tests that it tracks the changes as a result of actions correctly"""
agent = DQN_HER(config)
agent.reset_game()
for ix in range(4):
previous_obs = agent.observation
previous_desired_goal = agent.desired_goal
previous_achieved_goal = agent.achieved_goal
agent.action = ix
agent.conduct_action_in_changeable_goal_envs(agent.action)
assert agent.next_state.shape[0] == 8
assert isinstance(agent.next_state_dict, dict)
assert not all(agent.observation == previous_obs)
assert not all(agent.achieved_goal == previous_achieved_goal)
assert all(agent.desired_goal == previous_desired_goal)
agent.track_changeable_goal_episodes_data()
agent.save_experience()
if agent.done: agent.save_alternative_experience()
agent.state_dict = agent.next_state_dict # this is to set the state for the next iteration
agent.state = agent.next_state
states, actions, rewards, next_states, dones = agent.HER_memory.sample(4)
assert all(states[1] == torch.Tensor([1.0, 1., 1., 1., 0., 0., 0. , 0.]))
assert all(actions == torch.Tensor([[1.], [0.], [3.], [2.]]))
assert all(rewards == torch.Tensor([[-1.], [-1.], [4.], [-1.]]))
assert all(dones == torch.Tensor([[0.], [0.], [1.], [0.]]))
| 22,337
|
def src_one(y: torch.Tensor, D: torch.Tensor, *,
k=None, device=None) -> torch.Tensor:
"""
y = Dx
:param y: image (h*w)
:param D: dict (class_sz, train_im_sz, h*w)
:param k:
:param device: pytorch device
:return: predict tensor(int)
"""
assert y.dim() == 1
assert D.dim() == 3
assert y.size(dim=0) == D.size(dim=2)
class_sz, train_im_sz, n_features = D.shape # n_features=h*w
D_x = D.view(class_sz * train_im_sz, n_features)
D_x = D_x.permute([1, 0]) # D_x(n_features, class_sz*train_im_sz)
# y(n_features)
a = omp(D_x, y, k=k, device=device) # a(class_sz*train_im_sz)
X_i = D.permute([0, 2, 1]) # X_i(class_sz, h*w, train_im_sz)
a_i = a.view(class_sz, train_im_sz, 1) # a(class_sz, train_im_sz, 1)
y_p = torch.matmul(X_i, a_i).view(class_sz, n_features)
e_y = torch.mean((y - y_p) ** 2, dim=1)
return torch.argmin(e_y)
| 22,338
|
def test_get_compliment_file(dummy_data):
"""Ensure Colony object is able to get the correct complimentary file."""
bam_files, vcf_files = dummy_data
registry = Registry(bam_files, vcf_files)
random_start = bam_files[0]
test_colony = Colony(random_start, registry)
assert os.path.splitext(test_colony.vcf_file)[0] == os.path.splitext(
test_colony.bam_file)[0], "Function does not provide the "
"correct file."
| 22,339
|
def average_false_positive_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average false positive score. Used for when we have more than 2 classes and want our models'
average performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each false positive score for each group/class
"""
if len(np.unique(y_true)) < 3:
return false_positive_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += false_positive_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes)
| 22,340
|
def display_text_paragraph(text: str):
"""Displays paragraph of text (e.g. explanation, plot interpretation)
Args:
text (str): Informational text
Returns:
html.Small: Wrapper for text paragraph
"""
return html.P(children=[text],
style={'font-size': '14px',
'white-space': 'pre-wrap'
})
| 22,341
|
def is_numeric(_type) -> bool:
"""
Check if sqlalchemy _type is derived from Numeric
"""
return issubclass(_type.__class__, Numeric)
| 22,342
|
def make_generic_time_plotter(
retrieve_data,
label,
dt,
time_unit=None,
title=None,
unit=None,
):
"""Factory function for creating plotters that can plot data over time.
The function returns a function which can be called whenever the plot should be drawn.
This function takes no arguments and will create a new figure and plot the given data when called.
This function doesn't call plt.show() so this must be done by the calling code.
:param retrive_data: function that returns data to plot over time when called with no arguments.
:param str label: Label representing the data.
:param number dt: delta time between time steps in data.
:param str time_unit: unit of time, e.g. 'fs'.
:param str title: title of plot.
:param str unit: unit of data, e.g. 'K'.
"""
def plotter():
data = retrieve_data()
t = np.arange(0, len(data)*dt, dt)
fig = plt.figure()
ax = plt.axes()
plt.title(title if title else label)
plt.xlabel(f"Time [{time_unit}]" if time_unit else f"Time")
plt.ylabel(f"{label} [{unit}]" if unit else f"{label}")
ax.plot(t, data, marker = 'o')
return plotter
| 22,343
|
def start(update: Update, context: CallbackContext) -> None:
"""Displays info on how to trigger an error."""
update.effective_message.reply_html(
'Use /bad_command to cause an error.\n'
f'Your chat id is <code>{update.effective_chat.id}</code>.'
)
| 22,344
|
def registration(request):
"""Registration product page
"""
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(user_form.cleaned_data['password'])
# Save the User object
new_user.save()
return render(request, 'registration//register_done.html', {'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request, 'registration/registr.html', {'user_form': user_form})
| 22,345
|
def error_measure(predictions, labels):
""" calculate sum squared error of predictions """
return np.sum(np.power(predictions - labels, 2)) / (predictions.shape[0])
| 22,346
|
def add3(self, x, y):
"""Celery task: add numbers."""
return x + y
| 22,347
|
def assignMonitoringTo(owner, objName):
""" Assign monitoring to an object """
obj = MonitoredEnviron(getattr(owner, objName))
setattr(owner, objName, obj)
| 22,348
|
def dummy_function(*args, **kwargs):
"""A dummy function that doesn't do anything and just returns.
Used for making functions dryable.
"""
return
| 22,349
|
def zscore(dat, mean, sigma):
"""Calculates zscore of a data point in (or outside of) a dataset
zscore: how many sigmas away is a value from the mean of a dataset?
Parameters
----------
dat: float
Data point
mean: float
Mean of dataset
sigma: flaot
Sigma of dataset
"""
zsc = (dat-mean)/sigma
return zsc
| 22,350
|
def _create_ast_bilinear_form(terminal_expr, atomic_expr_field,
tests, d_tests,
trials, d_trials,
fields, d_fields, constants,
nderiv, dim, mapping, d_mapping, is_rational_mapping, spaces, mapping_space, mask, tag, is_parallel,
**kwargs):
"""
This function creates the assembly function of a bilinearform
Parameters
----------
terminal_expr : <Matrix>
atomic representation of the bilinear form
atomic_expr_field: <dict>
dict of atomic expressions of fields
tests : <list>
list of tests functions
d_tests : <dict>
dictionary that contains the symbolic spans and basis values of each test function
trials : <list>
list of trial functions
d_trials: <list>
dictionary that contains the symbolic spans and basis values of each trial function
fields : <list>
list of fields
constants : <list>
list of constants
nderiv : int
the order of the bilinear form
dim : int
number of dimension
mapping : <Mapping>
Sympde Mapping object
d_mapping : <dict>
dictionary that contains the symbolic spans and basis values of the mapping
is_rational_mapping : <bool>
takes the value of True if the mapping is rational
spaces : <list>
list of sympde symbolic test and trial spaces
mask : <int|None>
the masked direction in case of boundary domain
tag : <str>
tag to be added to variable names
is_parallel : <bool>
True if the domain is distributed
Returns
-------
node : DefNode
represents the a function definition node that computes the assembly
"""
pads = variables(('pad1, pad2, pad3'), dtype='int')[:dim]
b0s = variables(('b01, b02, b03'), dtype='int')[:dim]
e0s = variables(('e01, e02, e03'), dtype='int')[:dim]
g_quad = GlobalTensorQuadrature(False)
l_quad = LocalTensorQuadrature(False)
quad_order = kwargs.pop('quad_order', None)
# ...........................................................................................
g_span = OrderedDict((u,d_tests[u]['span']) for u in tests)
f_span = OrderedDict((f,d_fields[f]['span']) for f in fields)
if mapping_space:
m_span = OrderedDict((f,d_mapping[f]['span']) for f in d_mapping)
else:
m_span = {}
m_trials = OrderedDict((u,d_trials[u]['multiplicity']) for u in trials)
m_tests = OrderedDict((v,d_tests[v]['multiplicity']) for v in tests)
lengths_trials = OrderedDict((u,LengthDofTrial(u)) for u in trials)
lengths_tests = OrderedDict((v,LengthDofTest(v)) for v in tests)
lengths_outer_tests = OrderedDict((v,LengthOuterDofTest(v)) for v in tests)
lengths_inner_tests = OrderedDict((v,LengthInnerDofTest(v)) for v in tests)
lengths_fields = OrderedDict((f,LengthDofTest(f)) for f in fields)
# ...........................................................................................
quad_length = LengthQuadrature()
el_length = LengthElement()
lengths = [el_length, quad_length]
# ...........................................................................................
geo = GeometryExpressions(mapping, nderiv)
g_coeffs = {f:[MatrixGlobalBasis(i,i) for i in expand([f])] for f in fields}
l_mats = BlockStencilMatrixLocalBasis(trials, tests, terminal_expr, dim, tag)
g_mats = BlockStencilMatrixGlobalBasis(trials, tests, pads, m_tests, terminal_expr, l_mats.tag)
# ...........................................................................................
if quad_order is not None:
ind_quad = index_quad.set_range(stop=Tuple(*quad_order))
else:
ind_quad = index_quad.set_range(stop=quad_length)
ind_element = index_element.set_range(stop=el_length)
if mapping_space:
ind_dof_test = index_dof_test.set_range(stop=Tuple(*[d+1 for d in list(d_mapping.values())[0]['degrees']]))
# ...........................................................................................
eval_mapping = EvalMapping(ind_quad, ind_dof_test, list(d_mapping.values())[0]['global'],
mapping, geo, mapping_space, nderiv, mask, is_rational_mapping)
eval_fields = []
for f in fields:
f_ex = expand([f])
coeffs = [CoefficientBasis(i) for i in f_ex]
l_coeffs = [MatrixLocalBasis(i) for i in f_ex]
ind_dof_test = index_dof_test.set_range(stop=lengths_fields[f]+1)
eval_field = EvalField(atomic_expr_field[f], ind_quad, ind_dof_test, d_fields[f]['global'],
coeffs, l_coeffs, g_coeffs[f], [f], mapping, nderiv, mask)
eval_fields += [eval_field]
g_stmts = []
if mapping_space:
g_stmts.append(eval_mapping)
g_stmts += [*eval_fields]
g_stmts_texpr = []
# sort tests and trials by their space type
test_groups = regroup(tests)
trial_groups = regroup(trials)
# expand every VectorFunction into IndexedVectorFunctions
ex_tests = expand(tests)
ex_trials = expand(trials)
#=========================================================begin kernel======================================================
for _, sub_tests in test_groups:
for _, sub_trials in trial_groups:
tests_indices = [ex_tests.index(i) for i in expand(sub_tests)]
trials_indices = [ex_trials.index(i) for i in expand(sub_trials)]
sub_terminal_expr = terminal_expr[tests_indices,trials_indices]
if is_zero(sub_terminal_expr):
continue
q_basis_tests = OrderedDict((v,d_tests[v]['global']) for v in sub_tests)
q_basis_trials = OrderedDict((u,d_trials[u]['global']) for u in sub_trials)
m_tests = OrderedDict((v,d_tests[v]['multiplicity']) for v in sub_tests)
m_trials = OrderedDict((u,d_trials[u]['multiplicity']) for u in sub_trials)
tests_degree = OrderedDict((v,d_tests[v]['degrees']) for v in sub_tests)
trials_degrees = OrderedDict((u,d_trials[u]['degrees']) for u in sub_trials)
bs = OrderedDict()
es = OrderedDict()
for v in sub_tests:
v_str = str(SymbolicExpr(v))
bs[v] = variables(('b_{v}_1, b_{v}_2, b_{v}_3'.format(v=v_str)), dtype='int')[:dim] if is_parallel else [S.Zero]*dim
es[v] = variables(('e_{v}_1, e_{v}_2, e_{v}_3'.format(v=v_str)), dtype='int')[:dim] if is_parallel else [S.Zero]*dim
if all(a==1 for a in m_tests[sub_tests[0]]+m_trials[sub_trials[0]]):
stmts = []
for v in sub_tests+sub_trials:
stmts += construct_logical_expressions(v, nderiv)
l_sub_mats = BlockStencilMatrixLocalBasis(sub_trials, sub_tests, sub_terminal_expr, dim, l_mats.tag,
tests_degree=tests_degree, trials_degree=trials_degrees,
tests_multiplicity=m_tests, trials_multiplicity=m_trials)
# Instructions needed to retrieve the precomputed values of the
# fields (and their derivatives) at a single quadrature point
stmts += flatten([eval_field.inits for eval_field in eval_fields])
loop = Loop((l_quad, *q_basis_tests.values(), *q_basis_trials.values(), geo), ind_quad, stmts=stmts, mask=mask)
loop = Reduce('+', ComputeKernelExpr(sub_terminal_expr, weights=False), ElementOf(l_sub_mats), loop)
# ... loop over trials
length = Tuple(*[d+1 for d in trials_degrees[sub_trials[0]]])
ind_dof_trial = index_dof_trial.set_range(stop=length)
loop1 = Loop((), ind_dof_trial, [loop])
# ... loop over tests
length = Tuple(*[d+1 for d in tests_degree[sub_tests[0]]])
ends = Tuple(*[d+1-e for d,e in zip(tests_degree[sub_tests[0]], es[sub_tests[0]])])
starts = Tuple(*bs[sub_tests[0]])
ind_dof_test = index_dof_test.set_range(start=starts, stop=ends, length=length)
loop = Loop((), ind_dof_test, [loop1])
# ...
body = (Reset(l_sub_mats), loop)
stmts = Block(body)
g_stmts += [stmts]
if is_parallel:
ln = Tuple(*[d-1 for d in tests_degree[sub_tests[0]]])
start_expr = TensorMax(TensorMul(TensorAdd(TensorMul(ind_element, Tuple(*[-1]*dim)), ln), Tuple(*b0s)),Tuple(*[S.Zero]*dim))
start_expr = TensorAssignExpr(Tuple(*bs[sub_tests[0]]), start_expr)
end_expr = TensorMax(TensorMul(TensorAdd(TensorMul(Tuple(*[-1]*dim), ind_element.length), TensorAdd(ind_element, Tuple(*tests_degree[sub_tests[0]]))), Tuple(*e0s)), Tuple(*[S.Zero]*dim))
end_expr = TensorAssignExpr(Tuple(*es[sub_tests[0]]), end_expr)
g_stmts_texpr += [start_expr, end_expr]
else:
l_stmts = []
mask_inner = [[False, True] for i in range(dim)]
for mask_inner_i in product(*mask_inner):
mask_inner_i = Tuple(*mask_inner_i)
not_mask_inner_i = Tuple(*[not i for i in mask_inner_i])
stmts = []
for v in sub_tests+sub_trials:
stmts += construct_logical_expressions(v, nderiv)
# Instructions needed to retrieve the precomputed values of the
# fields (and their derivatives) at a single quadrature point
stmts += flatten([eval_field.inits for eval_field in eval_fields])
multiplicity = Tuple(*m_tests[sub_tests[0]])
length = Tuple(*[(d+1)%m if T else (d+1)//m for d,m,T in zip(tests_degree[sub_tests[0]], multiplicity, mask_inner_i)])
ind_outer_dof_test = index_outer_dof_test.set_range(stop=length)
outer = Tuple(*[d//m for d,m in zip(tests_degree[sub_tests[0]], multiplicity)])
outer = TensorAdd(TensorMul(ind_outer_dof_test, not_mask_inner_i),TensorMul(outer, mask_inner_i))
l_sub_mats = BlockStencilMatrixLocalBasis(sub_trials, sub_tests, sub_terminal_expr, dim, l_mats.tag, outer=outer,
tests_degree=tests_degree, trials_degree=trials_degrees,
tests_multiplicity=m_tests, trials_multiplicity=m_trials)
loop = Loop((l_quad, *q_basis_tests.values(), *q_basis_trials.values(), geo), ind_quad, stmts=stmts, mask=mask)
loop = Reduce('+', ComputeKernelExpr(sub_terminal_expr, weights=False), ElementOf(l_sub_mats), loop)
# ... loop over trials
length_t = Tuple(*[d+1 for d in trials_degrees[sub_trials[0]]])
ind_dof_trial = index_dof_trial.set_range(stop=length_t)
loop = Loop((), ind_dof_trial, [loop])
rem_length = Tuple(*[(d+1)-(d+1)%m for d,m in zip(tests_degree[sub_tests[0]], multiplicity)])
ind_inner_dof_test = index_inner_dof_test.set_range(stop=multiplicity)
expr1 = TensorAdd(TensorMul(ind_outer_dof_test, multiplicity),ind_inner_dof_test)
expr2 = TensorAdd(rem_length, ind_outer_dof_test)
expr = TensorAssignExpr(index_dof_test, TensorAdd(TensorMul(expr1,not_mask_inner_i),TensorMul(expr2, mask_inner_i)))
loop = Loop((expr,), ind_inner_dof_test, [loop], mask=mask_inner_i)
loop = Loop((), ind_outer_dof_test, [loop])
l_stmts += [loop]
g_stmts += [Reset(l_sub_mats), *l_stmts]
#=========================================================end kernel=========================================================
# ... loop over global elements
loop = Loop((g_quad, *g_span.values(), *m_span.values(), *f_span.values(), *g_stmts_texpr),
ind_element, stmts=g_stmts, mask=mask)
body = [Reduce('+', l_mats, g_mats, loop)]
# ...
args = OrderedDict()
args['tests_basis'] = tuple(d_tests[v]['global'] for v in tests)
args['trial_basis'] = tuple(d_trials[u]['global'] for u in trials)
args['spans'] = g_span.values()
args['quads'] = g_quad
args['tests_degrees'] = lengths_tests
args['trials_degrees'] = lengths_trials
args['quads_degree'] = lengths
args['global_pads'] = pads
args['local_pads'] = Pads(tests, trials)
args['mats'] = [l_mats, g_mats]
if mapping_space:
args['mapping'] = eval_mapping.coeffs
args['mapping_degrees'] = LengthDofTest(list(d_mapping.keys())[0])
args['mapping_basis'] = list(d_mapping.values())[0]['global']
args['mapping_spans'] = list(d_mapping.values())[0]['span']
if fields:
args['f_span'] = f_span.values()
args['f_coeffs'] = flatten(list(g_coeffs.values()))
args['field_basis'] = tuple(d_fields[f]['global'] for f in fields)
args['fields_degrees'] = lengths_fields.values()
args['f_pads'] = [f.pads for f in eval_fields]
fields = tuple(f.base if isinstance(f, IndexedVectorFunction) else f for f in fields)
args['fields'] = tuple(dict.fromkeys(fields))
if constants:
args['constants'] = constants
args['starts'] = b0s
args['ends'] = e0s
local_vars = []
node = DefNode('assembly', args, local_vars, body, 'bilinearform')
return node
| 22,351
|
def get_package_for_module(module):
"""Get package name for a module.
Helper calculates the package name of a module.
Args:
module: Module to get name for. If module is a string, try to find
module in sys.modules.
Returns:
If module contains 'package' attribute, uses that as package name.
Else, if module is not the '__main__' module, the module __name__.
Else, the base name of the module file name. Else None.
"""
if isinstance(module, six.string_types):
try:
module = sys.modules[module]
except KeyError:
return None
try:
return six.text_type(module.package)
except AttributeError:
if module.__name__ == '__main__':
try:
file_name = module.__file__
except AttributeError:
pass
else:
base_name = os.path.basename(file_name)
split_name = os.path.splitext(base_name)
if len(split_name) == 1:
return six.text_type(base_name)
else:
return u'.'.join(split_name[:-1])
return six.text_type(module.__name__)
| 22,352
|
def asen(x):
"""
El arcoseno de un número.
El resultado está expresado en radianes.
.. math::
\\arcsin(x)
Args:
x (float): Argumento.
Returns:
El ángulo expresado en radianes.
"""
return math.asin(x)
| 22,353
|
def get_mock_response(status_code: int, reason: str, text: str):
"""
Return mock response.
:param status_code: An int representing status_code.
:param reason: A string to represent reason.
:param text: A string to represent text.
:return: MockResponse object.
"""
MockResponse = namedtuple("MockResponse", ["status_code", "reason", "text"])
mock_response = MockResponse(status_code, reason, text)
return mock_response
| 22,354
|
def get_user_stack_depth(tb: TracebackType, f: StackFilter) -> int:
"""Determines the depth of the stack within user-code.
Takes a 'StackFilter' function that filters frames by whether
they are in user code or not and returns the number of frames
in the traceback that are within user code.
The return value can be negated for use with the limit argument
to functions in the traceback module.
"""
depth = 0
for s, _ in traceback.walk_tb(tb):
if depth or f(s):
depth += 1
return depth
| 22,355
|
def unused(attr):
"""
This function check if an attribute is not set (has no value in it).
"""
if attr is None:
return True
else:
return False
| 22,356
|
def compute_npipelines_xgbrf_5_6():
"""Compute the total number of XGB/RF pipelines evaluated"""
df = _load_pipelines_df()
npipelines_rf = np.sum(df['pipeline'].str.contains('random_forest'))
npipelines_xgb = np.sum(df['pipeline'].str.contains('xgb'))
total = npipelines_rf + npipelines_xgb
result = pd.DataFrame(
[npipelines_rf, npipelines_xgb, total],
index=['RF', 'XGB', 'total'],
columns=['pipelines']
)
fn = OUTPUT_DIR.joinpath('5_6_npipelines_xgbrf.csv')
result.to_csv(fn)
return result
| 22,357
|
def get_evaluate_SLA(SLA_terms, topology, evaluate_individual):
"""Generate a function to evaluate if the flow reliability and latency requirements are met
Args:
SLA_terms {SLA} -- an SLA object containing latency and bandwidth requirements
topology {Topology} -- the reference topology object for the flow
evaluate_individual {function}: a cost function, which returns the metric for a given individual
individual {DEAP individual (list)} -- the individual
Returns:
evaluate_SLA {Function}: a function returning True if the requirements are met, False otherwise
"""
def evaluate_SLA(individual):
evaluation = evaluate_individual(individual)
if evaluation[3] > SLA_terms.latency or evaluation[1] > 1:
return False
return True
return evaluate_SLA
| 22,358
|
def extras(config: DictConfig) -> None:
"""Control flow by main config file.
Args:
config (DictConfig): [description]
"""
log = get_logger(__name__)
# make it possible to add new keys to config
OmegaConf.set_struct(config, False)
# disable python warnings if <config.ignore_warnings=True>
if config.get("ignore_warnings"):
log.info("Disabling python warnings! <config.ignore_warnings=True>")
warnings.filterwarnings("ignore")
# force debugger friendly configuration if <config.trainer.fast_dev_run=True>
if config.trainer.get("fast_dev_run"):
log.info("Forcing debugger friendly configuration! <config.trainer.fast_dev_run=True>")
# Debuggers don't like GPUs or multiprocessing
if config.trainer.get("gpus"):
config.trainer.gpus = 0
if config.datamodule.get("pin_memory"):
config.datamodule.pin_memory = False
if config.datamodule.get("num_workers"):
config.datamodule.num_workers = 0
if config.trainer.get("sync_batchnorm"):
config.trainer.sync_batchnorm = False
if config.get("callbacks"):
for _, cb_conf in config["callbacks"].items():
if "_target_" in cb_conf and (cb_conf.get("apply_on_epoch") or cb_conf.get("stop_after_epoch")):
log.info("Change <%s> parameters to default", cb_conf._target_)
cb_conf.apply_on_epoch = 0
cb_conf.stop_after_epoch = -1
if cb_conf.get("probability"):
cb_conf.probability = 1.0
# disable adding new keys to config
OmegaConf.set_struct(config, True)
| 22,359
|
def main():
"""Brute force test every network."""
dataset = 'cifar10_cnn'
all_possible_genes = {
'nb_neurons': [16, 32, 64, 128],
'nb_layers': [1, 2, 3, 4, 5],
'activation': ['relu', 'elu', 'tanh', 'sigmoid', 'hard_sigmoid','softplus','linear'],
'optimizer': ['rmsprop', 'adam', 'sgd', 'adagrad', 'adadelta', 'adamax', 'nadam'],
}
logging.info("***Brute forcing networks***")
genomes = generate_genome_list(all_possible_genes)
train_genomes(genomes, dataset)
| 22,360
|
def cloud_watch(tasks, name, log_name):
"""Real time speedometer in AWS CloudWatch."""
state['operators'][name] = CloudWatchOperator(log_name=log_name,
name=name,
verbose=state['verbose'])
for task in tasks:
handle_task_skip(task, name)
if not task['skip']:
state['operators'][name](task['log'])
yield task
| 22,361
|
def latest_consent(user, research_study_id):
"""Lookup latest valid consent for user
:param user: subject of query
:param research_study_id: limit query to respective value
If latest consent for user is 'suspended' or 'deleted', this function
will return None. See ``consent_withdrawal_dates()`` for that need.
:returns: the most recent consent based on given criteria, or None
if no match is located
"""
# consents are ordered desc(acceptance_date)
for consent in user.valid_consents:
if consent.research_study_id != research_study_id:
continue
if consent.status == 'consented':
return consent
return None
| 22,362
|
def microarray():
""" Fake microarray dataframe
"""
data = np.arange(9).reshape(3, 3)
cols = pd.Series(range(3), name='sample_id')
ind = pd.Series([1058685, 1058684, 1058683], name='probe_id')
return pd.DataFrame(data, columns=cols, index=ind)
| 22,363
|
def find_next_tag(template: str, pointer: int, left_delimiter: str) -> Tuple[str, int]:
"""Find the next tag, and the literal between current pointer and that tag"""
split_index = template.find(left_delimiter, pointer)
if split_index == -1:
return (template[pointer:], len(template))
return (template[pointer:split_index], split_index)
| 22,364
|
def hasNLines(N,filestr):
"""returns true if the filestr has at least N lines and N periods (~sentences)"""
lines = 0
periods = 0
for line in filestr:
lines = lines+1
periods = periods + len(line.split('.'))-1
if lines >= N and periods >= N:
return True;
return False;
| 22,365
|
def test_contains_true():
"""Contain a banana."""
from trie import Trie
t = Trie()
t.insert('Banana')
assert t.contains('Banana')
| 22,366
|
def CAMNS_LP(xs, N, lptol=1e-8, exttol=1e-8, verbose=True):
"""
Solve CAMNS problem via reduction to Linear Programming
Arguments:
----------
xs : np.ndarray of shape (M, L)
Observation matrix consisting of M observations
N : int
Number of observations
lptol : float
Tolerance for Linear Programming problem
exttol : float
Tolerance for extreme point check
verbose : bool
Whether to print information about progress
Returns:
--------
np.ndarray of shape (N, L)
Estimated source matrix
"""
M, L = xs.shape # Extract dimensions
xs = xs.T
d = np.mean(xs, axis=1, keepdims=True)
C, _, _ = np.linalg.svd(xs - d, full_matrices=False)
C = C[:, :(N - 1)] # Truncate the redundant one
# Step 1. Preparing variables
B = np.diag(np.ones(L))
l = 0 # Number of extracted sources
S = np.zeros((0, L)) # Source matrix
epoch = 1
while l < N:
if verbose:
print("Epoch {}:".format(epoch))
print("=" * 58)
epoch += 1
# Step 2. Choosing random vector and generating direction r
w = np.random.randn(L)
r = B @ w
# Step 3. Solving linear programming problems using CVXPY
alpha1_star = cp.Variable(C.shape[1])
alpha2_star = cp.Variable(C.shape[1])
problem1 = cp.Problem(cp.Minimize(
r.T @ (C @ alpha1_star)), [C @ alpha1_star + d.flatten() >= 0])
problem2 = cp.Problem(cp.Maximize(
r.T @ (C @ alpha2_star)), [C @ alpha2_star + d.flatten() >= 0])
if verbose:
print("\tLaunching LP solver 1")
p_star = problem1.solve()
if verbose:
print("\tLaunching LP solver 2")
q_star = problem2.solve()
if verbose:
print("\tLP solvers have finished, checking results")
alpha1_star = np.expand_dims(alpha1_star.value, axis=1)
alpha2_star = np.expand_dims(alpha2_star.value, axis=1)
s1 = C @ alpha1_star + d
s2 = C @ alpha2_star + d
# Step 4. Checking results (with augmentations from MATLAB implementation)
if l == 0:
if is_extreme_point(C, alpha1_star, d, exttol):
S = np.append(S, [s1.squeeze()], axis=0)
if is_extreme_point(C, alpha2_star, d, exttol):
S = np.append(S, [s2.squeeze()], axis=0)
else:
if np.abs(p_star) / (np.linalg.norm(r) * np.linalg.norm(s1)) >= lptol:
if is_extreme_point(C, alpha1_star, d, exttol):
S = np.append(S, [s1.squeeze()], axis=0)
if np.abs(q_star) / (np.linalg.norm(r) * np.linalg.norm(s2)) >= lptol:
if is_extreme_point(C, alpha2_star, d, exttol):
S = np.append(S, [s2.squeeze()], axis=0)
# Step 5. Updating l
l = S.shape[0]
if verbose:
print("\tRetrieved {}/{} sources\n".format(l, N))
# Step 6. Updating B
Q1, R1 = np.linalg.qr(S.T)
B = np.diag(np.ones(L)) - Q1 @ Q1.T
# Step 7 is kinda implicit, as it is hidden in the loop condition
# Yay, we're done!
return S
| 22,367
|
def hamiltonian_c(n_max, in_w, e, d):
"""apply tridiagonal real Hamiltonian matrix to a complex vector
Parameters
----------
n_max : int
maximum n for cutoff
in_w : np.array(complex)
state in
d : np.array(complex)
diagonal elements of Hamiltonian
e : np.array(complex)
off diagonal elements of Hamiltonian
Returns
-------
out_w : np.array(complex)
application of Hamiltonian to vector
"""
n_max = int(n_max)
out_w = in_w[:n_max]*d[:n_max]
out_w[:(n_max-1)] += e[:(n_max-1)]*in_w[1:n_max]
out_w[1:n_max] += e[:n_max-1] * in_w[:n_max-1]
return out_w
| 22,368
|
def unique_badge():
""" keep trying until a new random badge number has been found to return """
rando = str(randint(1000000000, 9999999999))
badge = User.query.filter_by(badge=rando).first()
print("rando badge query = {}".format(badge))
if badge:
unique_badge()
return rando
| 22,369
|
def mid_price(high, low, timeperiod: int = 14):
"""Midpoint Price over period 期间中点价格
:param high:
:param low:
:param timeperiod:
:return:
"""
return MIDPRICE(high, low, timeperiod)
| 22,370
|
def load_pyger_pickle(filename):
""" Load pyger data from pickle file back into object compatible with pyger plotting methods
:param filename: File name of pickled output from calc_constraints()
This is only meant to be used to read in the initial constraints object produced by
calc_constraints(), not the cooldown data produced by calc_constraints2(). The data prduced
by calc_constraints2() should be able to be read in with a simple pickle.load() function.
"""
class saved_pyger_data(object):
def __init__(self, pickled_constraint):
for key in pickled_constraint:
self.__dict__.update({key:pickled_constraint[key]})
rawdata = pickle.load(open(filename,'rb'))
pyger_compatible_data = {}
for name in list(rawdata.keys()):
constraint = saved_pyger_data(rawdata[name])
pyger_compatible_data.update({name:constraint})
return pyger_compatible_data
| 22,371
|
def absPath(myPath):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
return os.path.join(base_path, os.path.basename(myPath))
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath)
| 22,372
|
def plot_mean_MQ_vs_derr(ax, xmv_mat):
"""Plot mean_MQ (y-axis) against d_err (x-axis) for given range of variant sizes
:param xmv_mat:
:param fig_prefix:
:param plot_bin_size:
:return:
"""
mq_mat = xmv_mat.sum(axis=2)
data_cnt = mq_mat.sum(axis=1)
mq_vector = np.arange(mq_mat.shape[1])
mean_mq = np.dot(mq_vector, mq_mat.T) / np.clip(data_cnt, 1, data_cnt.max() + 1).astype(float)
max_derr = int((mean_mq.shape[0] - 3) / 2)
x1 = max_derr * 1.3
xt = [-max_derr, -int(max_derr/2), 0, int(max_derr/2), max_derr]
xlim = [-max_derr * 1.1, x1 * 1.1]
ax.plot(range(-max_derr, max_derr + 1), mean_mq[:2 * max_derr + 1], 'k.')
# ax.scatter(range(-max_derr, max_derr + 1), mean_mq[:2 * max_derr + 1],
# 400 * data_cnt[:2 * max_derr + 1] / data_cnt[:2 * max_derr + 2].max(),
# color='none', edgecolors='k')
ax.plot([x1], mean_mq[2 * max_derr + 1:2 * max_derr + 2], 'ko')
# ax.scatter([x1], mean_mq[2 * max_derr + 1],
# 400 * mean_mq[2 * max_derr + 1] / mean_mq[:2 * max_derr + 2].max(),
# color='none', edgecolors='k')
ax.set_xticks(xt + [x1])
ax.set_xticklabels(xt + ['WC'])
ax.set_xlim(xlim)
# ax.set_xlim((-50, 50))
ax.set_yticks([0, 20, 40, 60])
ax.set_ylim([-5, 70])
ax.axvline(x=0, color='k', linestyle=':')
plt.xlabel(r'$d_{err}$')
plt.ylabel('Mean MQ')
# plt.imshow(xmv_mat.sum(axis=2).T, cmap=plt.cm.gray_r, norm=LogNorm(vmin=0.01, vmax=1e6))
| 22,373
|
def cart_item_pre_save_receiver(sender, instance, *args, **kwargs ):
"""
https://docs.djangoproject.com/en/1.9/ref/signals/#pre-save
This works since as we create the cart item, it assumes qty is 1.
:param sender:
:param instance:
:param args:
:param kwargs:
:return:
"""
qty = instance.quantity
if int(qty) >= 1:
price = instance.item.get_price() # check if instance method or property
line_item_total = Decimal(qty) * Decimal(price)
instance.line_item_total = line_item_total
| 22,374
|
def createitemdict(index, tf2info):
"""Take a TF2 item and return a custom dict with a limited number of
keys that are used for search"""
item = tf2info.items[index]
name = item['item_name']
classes = tf2api.getitemclasses(item)
attributes = tf2api.getitemattributes(item,
tf2info.attributes, tf2info.effects)
storeprice = tf2api.getstoreprice(item, tf2info.storeprices)
backpackprice = tf2api.getmarketprice(item, tf2info.backpackprices)
tags = tf2api.getitemtags(item)
# Sort blueprints by crafting chance
blueprint = sorted(tf2info.blueprints[index],
key=lambda k: k['chance'], reverse=True)
description = ''
if 'bundle' in tags and storeprice:
descriptions = tf2info.bundles[index]['descriptions']
text = []
items = []
for i in range(len(descriptions)):
key = str(i)
value = descriptions[key]['value']
if value in tf2info.itemsbyname:
items.append(value)
else:
text.append(value)
description = '{}---{}'.format('\n'.join(text), '\n'.join(items))
elif 'item_description' in item:
description = item['item_description']
if 'bundle' in tags and name in tf2info.itemsets:
description += '---' + '\n'.join(tf2info.itemsets[name]['items'])
levels = OrderedDict.fromkeys(
str(item[i]) for i in ('min_ilevel', 'max_ilevel'))
level = 'Level {} {}'.format('-'.join(levels), item['item_type_name'])
image, image_large = (url and url.replace(
'http://media.steampowered.com', 'https://steamcdn-a.akamaihd.net'
) for url in (item['image_url'], item['image_url_large']))
itemdict = {'index': index,
'name': name,
'image': image,
'image_large': image_large,
'description': description,
'level': level,
'attributes': attributes,
'classes': classes,
'tags': tags,
'storeprice': storeprice,
'marketprice': {'backpack.tf': backpackprice},
'blueprints': blueprint}
if 'paint' in tags:
paintvalue = item['attributes'][0]['value']
# Ignore Paint Tool
if paintvalue != 0:
itemdict['image'] = itemdict['image_large'] = (
'/images/paints/Paint_Can_{}.png'.format(paintvalue))
return itemdict
| 22,375
|
def port_list(request, board_id):
"""Get ports attached to a board."""
return iotronicclient(request).port.list()
| 22,376
|
def main():
"""
main entrypoint
"""
parent_dir = sys.argv[1]
parent_dir_ext = os.path.normpath(parent_dir).split(os.sep)[-2]
## strip trailing slashes and then grab second to last folder name;
seed_list = get_dir_list(parent_dir)
## list of all seeds
inner_dirs = []
## get all the dir_rsls
string_list = []
only_print = False
for seed_dir in seed_list:
inner_dirs.extend(get_dir_list(seed_dir))
for _, dir_rsl in enumerate(inner_dirs):
current_string_list = [dir_rsl + "\n"]
mode = 'audio_avg'
# list of all csv files
num_csvs = None
if num_csvs is None:
lst_csv = glob.glob(dir_rsl + '/*.csv', recursive=False)
dirs_read = [dir_rsl]
else:
lst_csv = []
dirs_read = []
directories = [os.path.join(dir_rsl, d) for d in os.listdir(dir_rsl)]
directories = [d for d in directories if os.path.isdir(d)]
for directory in directories:
current_lst = glob.glob(directory + '/*.csv', recursive=False)
if len(current_lst) == int(num_csvs):
lst_csv.extend(current_lst)
dirs_read.append(directory)
lst_lbl, lst_scr = [], []
mtr_all = defaultdict(list)
if lst_csv == [] or len(lst_csv) != 5:
continue
print(f"{len(lst_csv)} csvs found;")
fn_metrics = {}
for fn in lst_csv:
fn_base = os.path.basename(fn)
if not fn_base.startswith('audio'):
continue
# read from csv
df = pd.read_csv(fn)
# get scores and labels
if mode == 'chunk':
lbl = df.label.to_numpy()
scr = df.score.to_numpy()
elif mode == 'audio_avg':
tmp = df.groupby('audio_fn').mean().to_numpy()
lbl = tmp[:,0].astype(np.int)
scr = tmp[:,-1]
mtr = calc_performance_metrics(scr, lbl)
for k, mtr_val in mtr.items():
if k == 'mat':
continue
mtr_all[k].append(mtr_val)
fn_metrics[fn] = {mk: mv for mk, mv in mtr.items() if mk != 'mat'}
lst_lbl.append(lbl)
lst_scr.append(scr)
for filename, fn_mtr in fn_metrics.items():
collect_output(filename, current_string_list, only_print=only_print)
for metric, metric_val in fn_mtr.items():
collect_output("\t{}, {:.3f}".format(metric, metric_val), current_string_list,
only_print=only_print)
collect_output('avg_performance:', current_string_list, only_print=only_print)
for k, v in mtr_all.items():
collect_output('{}: {:.3f}, {:.3f}'.format(k, np.mean(v), np.std(v)),
current_string_list, only_print=only_print)
curr_hmp_roc = get_roc_info(lst_lbl, lst_scr)
curr_hmp_pr = get_pr_info(lst_lbl, lst_scr)
legend_dict = {0: ('magenta', 'CNN')}
fig_name = f'{dir_rsl}/individual_roc.png'
plot_individual_curve(curr_hmp_roc, legend_dict, 'roc', fig_name)
fig_name = f'{dir_rsl}/individual_pr.png'
plot_individual_curve(curr_hmp_pr, legend_dict, 'pr', fig_name)
string_list.extend(current_string_list)
now = str(datetime.now()).replace(' ', '_').replace(':', '_')
txt_out = os.path.join('txt', str(datetime.now()).split(' ', maxsplit=1)[0], parent_dir_ext)
if not os.path.isdir(txt_out):
os.makedirs(txt_out)
txt_out = os.path.join(txt_out, f'{now}_output.txt')
write_txt(string_list, txt_out)
print(txt_out)
| 22,377
|
def _get_archive(url, mode='r', opts=None):
"""Get archive plugin for given URL."""
if opts is None:
opts = {}
logger.debug('readdata._get_archive: url %s' % url)
url_tuple = urllib.parse.urlsplit(url, scheme="file")
if os.name == 'nt' and \
url_tuple.scheme == 'file' and \
fnmatch.fnmatch(url_tuple.netloc, '[A-Za-z]:\\*'):
# Windows: Parse without /x:, then re-attach drive letter
_path = url_tuple.netloc
else:
_path = url_tuple.path
# url_tuple = urllib.parse.urlsplit(url, scheme='file')
mimetype = mimetypes.guess_type(_path)[0]
archive = imagedata.archives.find_mimetype_plugin(
mimetype,
url,
mode,
opts=opts)
logger.debug('readdata._get_archive: _mimetypes %s' % mimetype)
logger.debug('readdata._get_archive: archive %s' % archive.name)
return archive
| 22,378
|
def pure_python_npairs_per_object_3d(sample1, sample2, rbins, period=None):
"""
"""
if period is None:
xperiod, yperiod, zperiod = np.inf, np.inf, np.inf
else:
xperiod, yperiod, zperiod = period, period, period
npts1, npts2, num_rbins = len(sample1), len(sample2), len(rbins)
counts = np.zeros((npts1, num_rbins), dtype=int)
for i in range(npts1):
for j in range(npts2):
dx = sample1[i, 0] - sample2[j, 0]
dy = sample1[i, 1] - sample2[j, 1]
dz = sample1[i, 2] - sample2[j, 2]
if dx > xperiod/2.:
dx = xperiod - dx
elif dx < -xperiod/2.:
dx = -(xperiod + dx)
if dy > yperiod/2.:
dy = yperiod - dy
elif dy < -yperiod/2.:
dy = -(yperiod + dy)
if dz > zperiod/2.:
dz = zperiod - dz
elif dz < -zperiod/2.:
dz = -(zperiod + dz)
d = np.sqrt(dx*dx + dy*dy + dz*dz)
for irbin, r in enumerate(rbins):
if d < r:
counts[i, irbin] += 1
return counts
| 22,379
|
def cal_aic(X, y_pred, centers, weight=None):
"""Ref: https://en.wikipedia.org/wiki/Akaike_information_criterion
"""
if weight is None:
weight = np.ones(X.shape[0], dtype=X.dtype)
para_num = centers.shape[0] * (X.shape[1] + 1)
return cal_log_likelihood(X, y_pred, centers, weight) - para_num
| 22,380
|
def get_gid(cfg, groupname):
"""
[description]
gets and returns the GID for a given groupname
[parameter info]
required:
cfg: the config object. useful everywhere
groupname: the name of the group we want to find the GID for
[return value]
returns an integer representing the GID of the group if successful
returns False if unsuccessful
"""
# validate/construct/get the realm.site_id.domain data
fqgn = mothership.validate.v_get_fqn(cfg, name=groupname)
groupname, realm, site_id, domain = mothership.validate.v_split_fqn(cfg, fqgn)
# gather group data
g = cfg.dbsess.query(Groups).\
filter(Groups.groupname==groupname).\
filter(Groups.site_id==site_id).\
filter(Groups.realm==realm).first()
if g:
return g.gid
else:
return False
| 22,381
|
def test_get_level_nonexistent_file(init_statick):
"""
Test searching for a level which doesn't have a corresponding file.
Expected result: None is returned
"""
args = Args("Statick tool")
args.parser.add_argument(
"--profile", dest="profile", type=str, default="nonexistent.yaml"
)
level = init_statick.get_level("some_package", args.get_args([]))
assert level is None
| 22,382
|
def _elements_from_data(
edge_length: float,
edge_width: float,
layers: Set[TemperatureName],
logger: Logger,
portion_covered: float,
pvt_data: Dict[Any, Any],
x_resolution: int,
y_resolution: int,
) -> Any:
"""
Returns mapping from element coordinate to element based on the input data.
:param edge_length:
The maximum length of an edge element along the top and bottom edges of the
panel, measured in meters.
:param edge_width:
The maximum width of an edge element along the side edges of the panel, measured
in meters.
:param layers:
The `set` of layers to include in the system.
:param logger:
The :class:`logging.Logger` logger instance used for the run.
:param portion_covered:
The portion of the PVT absorber that is covered with PV cells. The uncovered
section is mapped as solar absorber only with glazing as appropriate.
:param pvt_data:
The raw PVT data, extracted from the data file.
:param x_resolution:
The x resolution for the run.
:param y_resolution:
The y resolution for the run.
:return:
A mapping between the element coordinates and the element for all elements
within the panel.
"""
# * If 1x1, warn that 1x1 resolution is depreciated and should not really be used.
if x_resolution == 1 and y_resolution == 1:
logger.warn(
"Running the system at a 1x1 resolution is depreciated. Consider running "
"at a higher resolution."
)
return {
element.ElementCoordinates(0, 0): element.Element(
TemperatureName.absorber in layers,
TemperatureName.glass in layers,
pvt_data["pvt_collector"]["length"],
True,
TemperatureName.pv in layers,
TemperatureName.upper_glass in layers,
pvt_data["pvt_collector"]["width"],
0,
0,
0,
)
}
# Extract the necessary parameters from the system data.
try:
number_of_pipes = pvt_data["absorber"]["number_of_pipes"]
except KeyError as e:
raise MissingParametersError(
"Element", "The number of pipes attached to the absorber must be supplied."
) from None
try:
panel_length = pvt_data["pvt_collector"]["length"]
except KeyError as e:
raise MissingParametersError(
"Element", "PVT panel length must be supplied."
) from None
try:
panel_width = pvt_data["pvt_collector"]["width"]
except KeyError as e:
raise MissingParametersError(
"Element", "PVT panel width must be supplied."
) from None
try:
bond_width = pvt_data["bond"]["width"]
except KeyError as e:
raise MissingParametersError(
"Element", "Collector-to-pipe bond width must be supplied."
) from None
# * Determine the spacing between the pipes.
pipe_spacing = (x_resolution - number_of_pipes) / (number_of_pipes + 1)
if int(pipe_spacing) != pipe_spacing:
raise InvalidParametersError(
"The resolution supplied results in an uneven pipe distribution.",
"pipe_spcaing",
)
# * Determine the indicies of elements that have pipes attached.
pipe_positions = list(
range(int(pipe_spacing), x_resolution - 2, int(pipe_spacing) + 1)
)
# Determine whether the width of the elements is greater than or less than the edge
# width and adjust accordingly.
nominal_element_width: float = (
panel_width - number_of_pipes * bond_width - 2 * edge_width
) / (x_resolution - number_of_pipes - 2)
if nominal_element_width < edge_width:
nominal_element_width = (panel_width - number_of_pipes * bond_width) / (
x_resolution - number_of_pipes
)
edge_width = nominal_element_width
# Likewise, determine whether the nominal element height is greater than the edge
# height and adjust accordingly.
nominal_element_length: float = (panel_length - 2 * edge_length) / (
y_resolution - 2
)
if nominal_element_length < edge_length:
nominal_element_length = panel_length / y_resolution
edge_length = nominal_element_length
# * Instantiate the array of elements.
# Construct the elemented array based on the arguments.
pv_coordinate_cutoff = int(y_resolution * portion_covered)
try:
elements = {
element.ElementCoordinates(
x_coordinate(element_number, x_resolution),
y_coordinate(element_number, x_resolution),
): element.Element(
absorber=TemperatureName.absorber in layers,
glass=TemperatureName.glass in layers,
length=edge_length
if y_coordinate(element_number, x_resolution) in {0, y_resolution - 1}
else nominal_element_length,
pipe=x_coordinate(element_number, x_resolution) in pipe_positions
if TemperatureName.pipe in layers
else False,
pv=y_coordinate(element_number, x_resolution) <= pv_coordinate_cutoff
if TemperatureName.pv in layers
else False,
upper_glass=TemperatureName.upper_glass in layers,
# Use the edge with if the element is an edge element.
width=edge_width
if x_coordinate(element_number, x_resolution) in {0, x_resolution - 1}
# Otherwise, use the bond width if the element is a pipe element.
else bond_width
if x_coordinate(element_number, x_resolution) in pipe_positions
# Otherwise, use the nominal element width.
else nominal_element_width,
x_index=x_coordinate(element_number, x_resolution),
y_index=y_coordinate(element_number, x_resolution),
pipe_index=pipe_positions.index(
x_coordinate(element_number, x_resolution)
)
if x_coordinate(element_number, x_resolution) in pipe_positions
else None,
)
for element_number in range(x_resolution * y_resolution)
}
except KeyError as e:
raise MissingParametersError(
"PVT", f"Missing parameters when instantiating the PV-T system: {str(e)}"
) from None
return elements
| 22,383
|
def GetControllers(wing_serial):
"""Returns control gain matrices for any kite serial number."""
if wing_serial == m.kWingSerial01:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.209, -0.209, 0.0, 0.0, 0.009, 0.009, -0.005, 0.017]
)
longitudinal_gains_min_airspeed = (
[[0.005, 0.034, -0.716, -0.333, 0.311],
[-9.239, -68.061, 1361.046, 641.777, -589.016]]
)
longitudinal_gains_nominal_airspeed = (
[[0.014, 0.013, -0.509, -0.168, 0.316],
[-6.676, -6.529, 234.939, 80.993, -147.915]]
)
longitudinal_gains_max_airspeed = (
[[0.009, 0.007, -0.401, -0.136, 0.316],
[-1.965, -1.585, 79.966, 28.908, -65.259]]
)
lateral_gains_min_airspeed = (
[[1.477, -1.589, -0.434, 0.296, -0.75, 0.329],
[0.224, 1.045, 0.065, -0.554, -0.429, -0.282],
[-18215.48, -42217.142, -2192.239, 28689.136, 25162.461, 12500.22]]
)
lateral_gains_nominal_airspeed = (
[[1.063, -1.48, -0.234, 0.195, -0.772, 0.317],
[0.38, 1.123, 0.036, -0.386, -0.609, -0.376],
[-6604.64, -11507.484, -340.275, 5156.255, 9047.472, 4427.592]]
)
lateral_gains_max_airspeed = (
[[0.982, -1.395, -0.198, 0.149, -0.786, 0.309],
[0.27, 1.107, 0.027, -0.287, -0.613, -0.391],
[-2275.783, -4917.11, -119.56, 1730.983, 4062.059, 2033.279]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.587, 0.004, 0.11], [-0.03, -6.079, -0.026], [0.243, 0.006, -1.06]]
)
elif wing_serial == m.kWingSerial04Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.211, -0.211, 0.0, 0.0, 0.011, 0.011, 0.005, 0.008]
)
longitudinal_gains_min_airspeed = (
[[-0.002, 0.033, -0.73, -0.357, 0.311],
[4.546, -64.827, 1390.055, 688.515, -589.338]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.548, -0.182, 0.316],
[-5.284, -6.728, 254.985, 87.909, -148.241]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.441, -0.144, 0.316],
[-1.676, -1.659, 89.199, 30.631, -65.417]]
)
lateral_gains_min_airspeed = (
[[1.44, -1.617, -0.409, 0.296, -0.743, 0.333],
[0.254, 1.044, 0.06, -0.551, -0.432, -0.277],
[-19794.459, -43094.943, -1997.427, 28857.76, 25564.594, 12475.401]]
)
lateral_gains_nominal_airspeed = (
[[1.036, -1.502, -0.224, 0.194, -0.768, 0.32],
[0.433, 1.136, 0.033, -0.39, -0.614, -0.374],
[-7324.836, -11932.75, -305.45, 5272.765, 9170.382, 4461.79]]
)
lateral_gains_max_airspeed = (
[[0.956, -1.415, -0.192, 0.148, -0.783, 0.311],
[0.323, 1.123, 0.025, -0.291, -0.617, -0.389],
[-2588.374, -5128.587, -109.03, 1771.109, 4110.266, 2055.664]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.92, -0.002, 0.128],
[-0.027, -5.506, -0.024],
[0.252, 0.005, -1.083]]
)
elif wing_serial == m.kWingSerial04Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.206, -0.206, 0.0, 0.0, 0.006, 0.006, 0.004, 0.009]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.743, -0.369, 0.311],
[6.879, -65.952, 1416.656, 711.871, -589.814]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.562, -0.187, 0.316],
[-5.048, -6.949, 262.084, 90.532, -148.346]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.451, -0.146, 0.316],
[-1.652, -1.714, 91.319, 31.157, -65.442]]
)
lateral_gains_min_airspeed = (
[[1.473, -1.597, -0.421, 0.294, -0.746, 0.331],
[0.237, 1.043, 0.061, -0.559, -0.431, -0.28],
[-19140.086, -42177.964, -1957.253, 29074.862, 25405.542, 12435.884]]
)
lateral_gains_nominal_airspeed = (
[[1.063, -1.486, -0.228, 0.193, -0.769, 0.319],
[0.403, 1.124, 0.033, -0.391, -0.613, -0.374],
[-6972.675, -11547.427, -303.445, 5257.572, 9146.686, 4416.334]]
)
lateral_gains_max_airspeed = (
[[0.983, -1.401, -0.195, 0.148, -0.782, 0.311],
[0.294, 1.108, 0.025, -0.291, -0.617, -0.389],
[-2439.793, -4940.323, -108.619, 1761.451, 4108.271, 2029.644]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.721, -0.001, 0.125],
[-0.028, -5.332, -0.022],
[0.243, 0.005, -1.048]]
)
elif wing_serial == m.kWingSerial05Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.204, -0.204, 0.0, 0.0, 0.004, 0.004, 0.004, 0.007]
)
longitudinal_gains_min_airspeed = (
[[-0.003, 0.033, -0.732, -0.353, 0.311],
[5.756, -65.225, 1393.028, 681.0, -589.458]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.552, -0.181, 0.316],
[-5.157, -6.823, 257.066, 87.46, -148.262]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.445, -0.143, 0.316],
[-1.655, -1.692, 90.117, 30.558, -65.423]]
)
lateral_gains_min_airspeed = (
[[1.438, -1.611, -0.401, 0.289, -0.743, 0.333],
[0.257, 1.041, 0.058, -0.542, -0.432, -0.278],
[-20011.52, -42735.847, -1914.014, 28358.023, 25584.42, 12448.614]]
)
lateral_gains_nominal_airspeed = (
[[1.043, -1.502, -0.221, 0.191, -0.767, 0.32],
[0.428, 1.13, 0.032, -0.383, -0.614, -0.374],
[-7288.823, -11800.514, -296.679, 5172.453, 9185.489, 4445.84]]
)
lateral_gains_max_airspeed = (
[[0.965, -1.415, -0.191, 0.146, -0.782, 0.311],
[0.318, 1.117, 0.024, -0.286, -0.617, -0.389],
[-2567.285, -5064.437, -106.454, 1742.745, 4117.291, 2047.19]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-6.043, -0.014, 0.134],
[-0.027, -5.618, -0.024],
[0.257, 0.005, -1.105]]
)
elif wing_serial == m.kWingSerial05Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.198, -0.198, 0.0, 0.0, -0.002, -0.002, 0.003, 0.009]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.744, -0.364, 0.311],
[7.876, -66.499, 1418.317, 702.426, -589.905]]
)
longitudinal_gains_nominal_airspeed = (
[[0.01, 0.014, -0.565, -0.186, 0.316],
[-4.942, -7.054, 263.793, 89.867, -148.357]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.454, -0.145, 0.316],
[-1.635, -1.75, 92.08, 31.033, -65.443]]
)
lateral_gains_min_airspeed = (
[[1.47, -1.591, -0.412, 0.286, -0.746, 0.331],
[0.24, 1.039, 0.059, -0.549, -0.431, -0.281],
[-19344.869, -41752.487, -1867.667, 28478.098, 25425.604, 12404.153]]
)
lateral_gains_nominal_airspeed = (
[[1.07, -1.485, -0.226, 0.189, -0.768, 0.32],
[0.397, 1.117, 0.033, -0.383, -0.613, -0.374],
[-6919.209, -11394.187, -294.167, 5138.956, 9160.95, 4397.605]]
)
lateral_gains_max_airspeed = (
[[0.993, -1.401, -0.193, 0.145, -0.782, 0.312],
[0.287, 1.101, 0.025, -0.285, -0.618, -0.389],
[-2410.981, -4866.463, -105.87, 1728.008, 4114.679, 2019.74]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.857, -0.012, 0.131],
[-0.03, -5.457, -0.022],
[0.249, 0.005, -1.072]]
)
elif wing_serial == m.kWingSerial06Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.208, -0.208, 0.0, 0.0, 0.008, 0.008, 0.006, 0.007]
)
longitudinal_gains_min_airspeed = (
[[-0.003, 0.032, -0.731, -0.358, 0.311],
[6.453, -64.539, 1392.121, 689.765, -589.371]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.553, -0.183, 0.316],
[-5.088, -6.779, 257.684, 88.435, -148.279]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.447, -0.144, 0.316],
[-1.637, -1.678, 90.467, 30.782, -65.435]]
)
lateral_gains_min_airspeed = (
[[1.438, -1.616, -0.408, 0.296, -0.742, 0.333],
[0.255, 1.044, 0.059, -0.552, -0.432, -0.277],
[-19907.663, -43108.523, -1968.711, 28927.246, 25591.178, 12468.239]]
)
lateral_gains_nominal_airspeed = (
[[1.038, -1.503, -0.224, 0.194, -0.768, 0.32],
[0.435, 1.136, 0.033, -0.391, -0.614, -0.374],
[-7364.944, -11935.606, -300.999, 5287.24, 9178.769, 4462.368]]
)
lateral_gains_max_airspeed = (
[[0.958, -1.416, -0.192, 0.148, -0.783, 0.311],
[0.325, 1.123, 0.024, -0.291, -0.617, -0.389],
[-2605.535, -5129.038, -107.775, 1775.087, 4114.053, 2056.295]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.912, -0.009, 0.13],
[-0.025, -5.494, -0.024],
[0.252, 0.005, -1.081]]
)
elif wing_serial == m.kWingSerial06Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.203, -0.203, 0.0, 0.0, 0.003, 0.003, 0.004, 0.008]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.743, -0.369, 0.311],
[8.412, -65.841, 1417.073, 711.786, -589.819]]
)
longitudinal_gains_nominal_airspeed = (
[[0.01, 0.014, -0.566, -0.188, 0.316],
[-4.888, -7.008, 264.204, 90.884, -148.372]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.455, -0.146, 0.316],
[-1.62, -1.735, 92.313, 31.262, -65.454]]
)
lateral_gains_min_airspeed = (
[[1.471, -1.596, -0.42, 0.293, -0.746, 0.331],
[0.239, 1.043, 0.06, -0.559, -0.431, -0.28],
[-19231.343, -42149.313, -1926.317, 29079.197, 25426.856, 12427.585]]
)
lateral_gains_nominal_airspeed = (
[[1.065, -1.487, -0.228, 0.193, -0.769, 0.319],
[0.404, 1.123, 0.033, -0.391, -0.613, -0.374],
[-6992.628, -11534.142, -299.093, 5258.12, 9152.573, 4415.616]]
)
lateral_gains_max_airspeed = (
[[0.986, -1.402, -0.194, 0.148, -0.782, 0.311],
[0.294, 1.108, 0.025, -0.29, -0.617, -0.389],
[-2447.327, -4933.324, -107.393, 1761.417, 4110.821, 2029.552]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.725, -0.008, 0.128],
[-0.027, -5.331, -0.022],
[0.243, 0.005, -1.048]]
)
elif wing_serial == m.kWingSerial07Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.211, -0.211, 0.0, 0.0, 0.011, 0.011, 0.005, 0.008]
)
longitudinal_gains_min_airspeed = (
[[-0.002, 0.033, -0.73, -0.357, 0.311],
[4.546, -64.827, 1390.055, 688.515, -589.338]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.548, -0.182, 0.316],
[-5.284, -6.728, 254.985, 87.909, -148.241]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.441, -0.144, 0.316],
[-1.676, -1.659, 89.199, 30.631, -65.417]]
)
lateral_gains_min_airspeed = (
[[1.44, -1.617, -0.409, 0.296, -0.743, 0.333],
[0.254, 1.044, 0.06, -0.551, -0.432, -0.277],
[-19794.459, -43094.943, -1997.427, 28857.76, 25564.594, 12475.401]]
)
lateral_gains_nominal_airspeed = (
[[1.036, -1.502, -0.224, 0.194, -0.768, 0.32],
[0.433, 1.136, 0.033, -0.39, -0.614, -0.374],
[-7324.836, -11932.75, -305.45, 5272.765, 9170.382, 4461.79]]
)
lateral_gains_max_airspeed = (
[[0.956, -1.415, -0.192, 0.148, -0.783, 0.311],
[0.323, 1.123, 0.025, -0.291, -0.617, -0.389],
[-2588.374, -5128.587, -109.03, 1771.109, 4110.266, 2055.664]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.92, -0.002, 0.128],
[-0.027, -5.506, -0.024],
[0.252, 0.005, -1.083]]
)
elif wing_serial == m.kWingSerial07Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.206, -0.206, 0.0, 0.0, 0.006, 0.006, 0.004, 0.009]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.743, -0.369, 0.311],
[6.879, -65.952, 1416.656, 711.871, -589.814]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.562, -0.187, 0.316],
[-5.048, -6.949, 262.084, 90.532, -148.346]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.451, -0.146, 0.316],
[-1.652, -1.714, 91.319, 31.157, -65.442]]
)
lateral_gains_min_airspeed = (
[[1.473, -1.597, -0.421, 0.294, -0.746, 0.331],
[0.237, 1.043, 0.061, -0.559, -0.431, -0.28],
[-19140.086, -42177.964, -1957.253, 29074.862, 25405.542, 12435.884]]
)
lateral_gains_nominal_airspeed = (
[[1.063, -1.486, -0.228, 0.193, -0.769, 0.319],
[0.403, 1.124, 0.033, -0.391, -0.613, -0.374],
[-6972.675, -11547.427, -303.445, 5257.572, 9146.686, 4416.334]]
)
lateral_gains_max_airspeed = (
[[0.983, -1.401, -0.195, 0.148, -0.782, 0.311],
[0.294, 1.108, 0.025, -0.291, -0.617, -0.389],
[-2439.793, -4940.323, -108.619, 1761.451, 4108.271, 2029.644]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.721, -0.001, 0.125],
[-0.028, -5.332, -0.022],
[0.243, 0.005, -1.048]]
)
else:
assert False, 'wing_serial %d was not recognized' % wing_serial
return {
'airspeed_table': (
airspeed_table),
'flap_offsets': (
flap_offsets),
'longitudinal_gains_min_airspeed': (
longitudinal_gains_min_airspeed),
'longitudinal_gains_nominal_airspeed': (
longitudinal_gains_nominal_airspeed),
'longitudinal_gains_max_airspeed': (
longitudinal_gains_max_airspeed),
'lateral_gains_min_airspeed': (
lateral_gains_min_airspeed),
'lateral_gains_nominal_airspeed': (
lateral_gains_nominal_airspeed),
'lateral_gains_max_airspeed': (
lateral_gains_max_airspeed),
'B_flaps_to_pqr_min_airspeed': (
B_flaps_to_pqr_min_airspeed),
}
| 22,384
|
def rotate(mat, degrees):
"""
Rotates the input image by a given number of degrees about its center.
Border pixels are extrapolated by replication.
:param mat: input image
:param degrees: number of degrees to rotate (positive is counter-clockwise)
:return: rotated image
"""
rot_mat = cv2.getRotationMatrix2D((mat.shape[1] / 2, mat.shape[0] / 2), degrees, 1)
return cv2.warpAffine(mat, rot_mat, (mat.shape[1], mat.shape[0]),
borderMode=cv2.BORDER_REPLICATE)
| 22,385
|
def copy_budget(budget):
"""
Función para copiar un presupuesto y sus recursos y tareas
Creado: 10 feb de 2019
Por: Carlos Maldonado
"""
pass
| 22,386
|
def spec_defaults():
"""
Return a mapping with spec attribute defaults to ensure that the
returned results are the same on RubyGems 1.8 and RubyGems 2.0
"""
return {
'base_dir': None,
'bin_dir': None,
'cache_dir': None,
'doc_dir': None,
'gem_dir': None,
'gems_dir': None,
'ri_dir': None,
'spec_dir': None,
'spec_file': None,
'cache_file': None,
'full_gem_path': None,
'full_name': None,
'metadata': {},
'full_name': None,
'homepage': '',
'licenses': [],
'loaded_from': None,
}
| 22,387
|
def rdict(x):
"""
recursive conversion to dictionary
converts objects in list members to dictionary recursively
"""
if isinstance(x, list):
l = [rdict(_) for _ in x]
return l
elif isinstance(x, dict):
x2 = {}
for k, v in x.items():
x2[k] = rdict(v)
return x2
else:
if hasattr(x, '__dict__'):
d = x.__dict__
toremove = []
for k, v in d.items():
if v is None:
toremove.append(k)
else:
d[k] = rdict(v)
for k in toremove:
del(d[k])
return d
else:
return x
| 22,388
|
def get_pid(part_no):
"""Extract the PID from the part number page"""
url = 'https://product.tdk.com/en/search/capacitor/ceramic/mlcc/info?part_no=' + part_no
page = requests.get(url)
if (page.status_code != 200):
print('Error getting page({}): {}'.format(page.status_code, url))
return None
soup = BeautifulSoup(page.text, 'html.parser')
pid_input = soup.find(id='pid')
if pid_input is None:
return None
return pid_input['value']
| 22,389
|
def verbatim_det_lcs_all(plags, psr, susp_text, src_text, susp_offsets, src_offsets, th_shortest):
"""
DESCRIPTION: Uses longest common substring algorithm to classify a pair of documents being compared as verbatim plagarism candidate (the pair of documents), and removing the none verbatim cases if positive
INPUT: plags <list of list of two tuples [(int, int), (int, int)]> - Have the plagiarism cases represented by min and max sentence index in suspicious and source document respectively
psr <list of list of tuples (int, int)> - Contains the clusters
susp_text <string> - Suspicios document text
src_text <string> - Source document text
susp_offsets <list of tuples (int, int)> - Contain the char offset and length of each suspicious document sentence
src_offsets <list of tuples (int, int)> - Contain the char offset and length of each source document sentence
th_shortest <int> - Threshold in characters of shortest common substring allowed
OUTPUT: res_plags <list of list of two tuples [(int, int), (int, int)]> - Contains the plagiarism cases as common substrings or the same as the arguments depending on type_plag
res_psr <list of list of tuples (int, int)> - Contains the clusters with seeds present in the common substrings, or the same as the arguments depending on type_plag
type_plag <0 or 1> - 1: verbatim plagiarism case 0: Other plagiarism case
res_long_frag <list> - Contains the lengths of common substrings
"""
#plags [[(susp_ini, susp_end), (src_ini, src_end)], ...]
res_plags = []
res_psr = []
res_long_frag = []
i = 0
type_plag = 0 #0: Unknown, 1: no-obfuscation
#print 'Plags:', len(plags)
while i < len(plags): #For each plagiarism case
#print 'Case',i
#print 'Plag case', plags[i]
#print 'Seeds', psr[i]
#sentences in seeds an those not in seeds
res2 = common_substring_pro_all(susp_text[susp_offsets[plags[i][0][0]][0] : susp_offsets[plags[i][0][1]][0] + susp_offsets[plags[i][0][1]][1]], src_text[src_offsets[plags[i][1][0]][0] : src_offsets[plags[i][1][1]][0] + src_offsets[plags[i][1][1]][1]], th_shortest)
res = []
#Remove overlapping
for tup_i in res2:
flag = 0
for tup_j in res2:
if tup_i != tup_j and tup_i[2] >= tup_j[2] and tup_i[3] <= tup_j[3]:
flag = 1
break
if flag == 0:
res.append(tup_i)
#print 'Res2', res2
#print 'Res', res
#max_len = max([res[1] - res[0], res[3] - res[2]])
#max_len = [(x[1] - x[0], x[3] - x[2]) for x in res]
if len(res) > 0:
if type_plag == 1:
#print max_len, True, 'Removing seeds with lcs shorter than', th_shortest
for sub_case in res:
res_plags.append([(susp_offsets[plags[i][0][0]][0] + sub_case[0], susp_offsets[plags[i][0][0]][0] + sub_case[1]), (src_offsets[plags[i][1][0]][0] + sub_case[2], src_offsets[plags[i][1][0]][0] + sub_case[3])])
res_psr.append(psr[i])
res_long_frag.append(max([sub_case[1] - sub_case[0], sub_case[3] - sub_case[2]]))
else:
#print max_len, 'Type 02-no-obfuscation detected. Starting over!'
#print max_len, 'Type 02-no-obfuscation detected. Removing previously added cases!'
type_plag = 1
res_plags = []
res_psr = []
res_long_frag = []
for sub_case in res:
res_plags.append([(susp_offsets[plags[i][0][0]][0] + sub_case[0], susp_offsets[plags[i][0][0]][0] + sub_case[1]), (src_offsets[plags[i][1][0]][0] + sub_case[2], src_offsets[plags[i][1][0]][0] + sub_case[3])])
res_psr.append(psr[i])
res_long_frag.append(max([sub_case[1] - sub_case[0], sub_case[3] - sub_case[2]]))
#i = -1
else:
if type_plag != 1:
#print max_len, False, 'Adding'
res_plags.append(plags[i])
res_psr.append(psr[i])
res_long_frag.append(-1)
#else:
#print max_len, False, 'Removing case because 02-no-obfuscation was detected'
i += 1
return res_plags, res_psr, type_plag, res_long_frag
| 22,390
|
def autopooler(n,
it,
*a,
chunksize=1,
dummy=False,
return_iter=False,
unordered=False,
**ka):
"""Uses multiprocessing.Pool or multiprocessing.dummy.Pool to run iterator in parallel.
Parameters
------------
n: int
Number of parallel processes. Set to 0 to use auto detected CPU count.
it: iterator of (function,tuple,dict)
Each iteration computes **function**\ (\*\ **tuple**\ ,\*\*\ **dict**\ ). **function** must be picklable, i.e. a base level function in a module or file.
a: tuple
Arguments passed to Pool.
chunksize: int
Number of iterations passed to each process each time.
dummy: bool
Whether to use multiprocessing.dummy instead
return_iter: bool
Not Implemented. Whether to return iterator of results instead. If not, return list of results.
unordered: bool
Whether the order of output matters.
ka: dict
Keyword arguments passed to Pool
Returns
----------
list (or iterator if return_iter) of any
Results returned by function(\*tuple,\*\*dict), in same order of the iterator if not unordered.
"""
import multiprocessing
import logging
if dummy:
import multiprocessing.dummy as m
else:
import multiprocessing as m
if n == 0:
n = autocount()
logging.info('Using {} threads'.format(n))
if n == 1:
ans = map(autopooler_caller, it)
if not return_iter:
ans = list(ans)
assert len(ans) > 0
else:
import itertools
# Catches iterator errors (only if occurs at the first), and emptiness
it = itertools.chain([next(it)], it)
with m.Pool(n, *a, **ka) as p:
if unordered:
ans = p.imap_unordered(autopooler_caller, it, chunksize)
else:
ans = p.imap(autopooler_caller, it, chunksize)
if not return_iter:
ans = list(ans)
else:
raise NotImplementedError
return ans
| 22,391
|
def grow_population(initial, days_to_grow):
"""
Track the fish population growth from an initial population, growing over days_to_grow number of days.
To make this efficient two optimizations have been made:
1. Instead of tracking individual fish (which doubles every approx. 8 days which will result O(10^9)
fish over 256 days), we instead compute the sum of fish with the same due date and use the due date
as the offset into the current popluation list. For example, if 5 fish have a timer of 1 and 2 fish
have a timer of 4 the population would be tracked as: [0, 5, 0, 0, 2, 0, 0, 0, 0]
2. Modulo arithmetic is used instead of fully iterating through the entire list to decrement the due
date of each fish every day. Using modula arithmetic provides a projection into the fish data that
looks like its changing each day without needing O(n) operations and instead we can update the list
in constant time regardless of the number of different ages for fish.
"""
current = list(initial)
if days_to_grow == 0:
return current
for day in range(0, days_to_grow):
due_index = day % 9
due_count = current[due_index]
current[(day+7)%9] += due_count
current[(day+9)%9] += due_count
current[due_index] = max(0, current[due_index] - due_count)
return current
| 22,392
|
def get_QBrush():
"""QBrush getter."""
try:
import PySide.QtGui as QtGui
return QtGui.QBrush
except ImportError:
import PyQt5.QtGui as QtGui
return QtGui.QBrush
| 22,393
|
def apply_odata_query(query: ClauseElement, odata_query: str) -> ClauseElement:
"""
Shorthand for applying an OData query to a SQLAlchemy query.
Args:
query: SQLAlchemy query to apply the OData query to.
odata_query: OData query string.
Returns:
ClauseElement: The modified query
"""
lexer = ODataLexer()
parser = ODataParser()
model = query.column_descriptions[0]["entity"]
ast = parser.parse(lexer.tokenize(odata_query))
transformer = AstToSqlAlchemyClauseVisitor(model)
where_clause = transformer.visit(ast)
for j in transformer.join_relationships:
if str(j) not in _get_joined_attrs(query):
query = query.join(j)
return query.filter(where_clause)
| 22,394
|
def get_sql(conn, data, did, tid, exid=None, template_path=None):
"""
This function will generate sql from model data.
:param conn: Connection Object
:param data: data
:param did: Database ID
:param tid: Table id
:param exid: Exclusion Constraint ID
:param template_path: Template Path
:return:
"""
name = data['name'] if 'name' in data else None
if exid is not None:
sql = render_template("/".join([template_path, 'properties.sql']),
did=did, tid=tid, cid=exid)
status, res = conn.execute_dict(sql)
if not status:
raise Exception(res)
if len(res['rows']) == 0:
raise ObjectGone(
_('Could not find the exclusion constraint in the table.'))
old_data = res['rows'][0]
if 'name' not in data:
name = data['name'] = old_data['name']
sql = render_template("/".join([template_path, 'update.sql']),
data=data, o_data=old_data)
else:
if 'columns' not in data:
return _('-- definition incomplete'), name
elif isinstance(data['columns'], list) and len(data['columns']) < 1:
return _('-- definition incomplete'), name
sql = render_template("/".join([template_path, 'create.sql']),
data=data, conn=conn)
return sql, name
| 22,395
|
def bytes_base64(x):
# type: (AnyStr) -> bytes
"""Turn bytes into base64"""
if six.PY2:
return base64.encodestring(x).replace('\n', '') # type: ignore
return base64.encodebytes(bytes_encode(x)).replace(b'\n', b'')
| 22,396
|
def fetcher(station, sts, ets):
"""Do fetching"""
dbconn = get_dbconn('postgis')
cursor = dbconn.cursor('raobstreamer')
stations = [station, ]
if station.startswith("_"):
nt = NetworkTable("RAOB")
stations = nt.sts[station]['name'].split("--")[1].strip().split(",")
cursor.execute("""
SELECT f.valid at time zone 'UTC', p.levelcode, p.pressure, p.height,
p.tmpc, p.dwpc, p.drct, round((p.smps * 1.94384)::numeric,0),
p.bearing, p.range_miles, f.station from
raob_profile p JOIN raob_flights f on
(f.fid = p.fid) WHERE f.station in %s and valid >= %s and valid < %s
""", (tuple(stations), sts, ets))
ssw(("station,validUTC,levelcode,pressure_mb,height_m,tmpc,"
"dwpc,drct,speed_kts,bearing,range_sm\n"))
for row in cursor:
ssw(("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
) % (row[10], m(row[0]),
m(row[1]), m(row[2]), m(row[3]), m(row[4]),
m(row[5]), m(row[6]), m(row[7]),
m(row[8]), m(row[9])))
| 22,397
|
def is_planar_enforced(gdf):
"""Test if a geodataframe has any planar enforcement violations
Parameters
----------
Returns
-------
boolean
"""
if is_overlapping(gdf):
return False
if non_planar_edges(gdf):
return False
_holes = holes(gdf)
if _holes.shape[0] > 0:
return False
return True
| 22,398
|
def bson2uuid(bval: bytes) -> UUID:
"""Decode BSON Binary UUID as UUID."""
return UUID(bytes=bval)
| 22,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.