content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def remove_app(app_name, app_path):
"""Remove an application."""
# usage: mbl-app-manager remove [-h] app_name app_path
print("Remove {} from {}".format(app_name, app_path))
command = [MBL_APP_MANAGER, "-v", "remove", app_name, app_path]
print("Executing command: {}".format(command))
return subprocess.run(command, check=False).returncode | 5,333,700 |
def lcp_iscsi_vnic_add(handle, name, parent_dn, addr="derived",
admin_host_port="ANY",
admin_vcon="any", stats_policy_name="global-default",
admin_cdn_name=None, cdn_source="vnic-name",
switch_id="A", pin_to_group_name=None, vnic_name=None,
qos_policy_name=None,
adaptor_profile_name="global-default",
ident_pool_name=None, order="unspecified",
nw_templ_name=None, vlan_name="default",
**kwargs):
"""
Adds iSCSI vNIC to LAN Connectivity Policy
Args:
handle (UcscHandle)
parent_dn (string) : Dn of LAN connectivity policy name
name (string) : Name of iscsi vnic
admin_host_port (string) : Admin host port placement for vnic
admin_vcon (string) : Admin vcon for vnic
stats_policy_name (string) : Stats policy name
cdn_source (string) : CDN source ['vnic-name', 'user-defined']
admin_cdn_name (string) : CDN name
switch_id (string): Switch id
pin_to_group_name (string) : Pinning group name
vnic_name (string): Overlay vnic name
qos_policy_name (string): Qos policy name
adaptor_profile_name (string): Adaptor profile name
ident_pool_name (string) : Identity pool name
order (string) : Order of the vnic
nw_templ_name (string) : Network template name
addr (string) : Address of the vnic
vlan_name (string): Name of the vlan
**kwargs: Any additional key-value pair of managed object(MO)'s
property and value, which are not part of regular args.
This should be used for future version compatibility.
Returns:
VnicIScsiLCP : Managed Object
Example:
lcp_iscsi_vnic_add(handle, "test_iscsi",
"org-root/lan-conn-pol-samppol2",
nw_ctrl_policy_name="test_nwpol", switch_id= "A",
vnic_name="vnic1",
adaptor_profile_name="global-SRIOV")
"""
from ucscsdk.mometa.vnic.VnicIScsiLCP import VnicIScsiLCP
from ucscsdk.mometa.vnic.VnicVlan import VnicVlan
mo = handle.query_dn(parent_dn)
if not mo:
raise UcscOperationError("lcp_iscsi_vnic_add",
"LAN connectivity policy '%s' does not exist"
% parent_dn)
if cdn_source not in ['vnic-name', 'user-defined']:
raise UcscOperationError("lcp_iscsi_vnic_add",
"Invalid CDN source name")
admin_cdn_name = "" if cdn_source == "vnic-name" else admin_cdn_name
mo_1 = VnicIScsiLCP(parent_mo_or_dn=mo,
addr=addr,
admin_host_port=admin_host_port,
admin_vcon=admin_vcon,
stats_policy_name=stats_policy_name,
cdn_source=cdn_source,
admin_cdn_name=admin_cdn_name,
switch_id=switch_id,
pin_to_group_name=pin_to_group_name,
vnic_name=vnic_name,
qos_policy_name=qos_policy_name,
adaptor_profile_name=adaptor_profile_name,
ident_pool_name=ident_pool_name,
order=order,
nw_templ_name=nw_templ_name,
name=name)
mo_1.set_prop_multiple(**kwargs)
VnicVlan(parent_mo_or_dn=mo_1, name="", vlan_name=vlan_name)
handle.add_mo(mo_1)
handle.commit()
return mo_1 | 5,333,701 |
def fixture_ecomax_with_data(ecomax: EcoMAX) -> EcoMAX:
"""Return ecoMAX instance with test data."""
ecomax.product = ProductInfo(model="test_model")
ecomax.set_data(_test_data)
ecomax.set_parameters(_test_parameters)
return ecomax | 5,333,702 |
def _get_cached_setup(setup_id):
"""Load a run from the cache."""
cache_dir = config.get_cache_directory()
setup_cache_dir = os.path.join(cache_dir, "setups", str(setup_id))
try:
setup_file = os.path.join(setup_cache_dir, "description.xml")
with io.open(setup_file, encoding='utf8') as fh:
setup_xml = xmltodict.parse(fh.read())
setup = _create_setup_from_xml(setup_xml)
return setup
except (OSError, IOError):
raise openml.exceptions.OpenMLCacheException("Setup file for setup id %d not cached" % setup_id) | 5,333,703 |
async def request_get_stub(url: str, stub_for: str, status_code: int = 200):
"""Returns an object with stub response.
Args:
url (str): A request URL.
stub_for (str): Type of stub required.
Returns:
StubResponse: A StubResponse object.
"""
return StubResponse(stub_for=stub_for, status_code=status_code) | 5,333,704 |
def single_gpu_test(model, data_loader, rescale=True, show=False, out_dir=None):
"""Test with single GPU.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
show (bool): Whether show results during infernece. Default: False.
out_dir (str, optional): If specified, the results will be dumped
into the directory to save output results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
seg_targets = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
if 'gt_semantic_seg' in data:
target = data.pop('gt_semantic_seg')
for gt in target:
gt = gt.cpu().numpy()[0] # 1*h*w ==> h*w
seg_targets.append(gt)
with torch.no_grad():
result = model(return_loss=False, rescale=rescale, **data)
if isinstance(result, list):
results.extend(result)
else:
results.append(result)
if show or out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result,
palette=dataset.PALETTE,
show=show,
out_file=out_file)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
if seg_targets:
return [results, seg_targets]
return results | 5,333,705 |
def get_dump_time(f):
"""
Writes the time and date of the system dump
"""
f.write('Date of environment dump: \n')
f.write(str(datetime.datetime.now())+'\n') | 5,333,706 |
def readAllCarts():
"""
This function responds to a request for /api/people
with the complete lists of people
:return: json string of list of people
"""
# Create the list of people from our data
return[CART[key] for key in sorted(CART.keys())] | 5,333,707 |
def build_encapsulated_packet(select_test_interface, ptfadapter, tor, tunnel_traffic_monitor):
"""Build the encapsulated packet sent from T1 to ToR."""
_, server_ipv4 = select_test_interface
config_facts = tor.get_running_config_facts()
try:
peer_ipv4_address = [_["address_ipv4"] for _ in config_facts["PEER_SWITCH"].values()][0]
except IndexError:
raise ValueError("Failed to get peer ToR address from CONFIG_DB")
tor_ipv4_address = [_ for _ in config_facts["LOOPBACK_INTERFACE"]["Loopback0"]
if is_ipv4_address(_.split("/")[0])][0]
tor_ipv4_address = tor_ipv4_address.split("/")[0]
inner_dscp = random.choice(range(0, 33))
inner_ttl = random.choice(range(3, 65))
inner_packet = testutils.simple_ip_packet(
ip_src="1.1.1.1",
ip_dst=server_ipv4,
ip_dscp=inner_dscp,
ip_ttl=inner_ttl
)[IP]
packet = testutils.simple_ipv4ip_packet(
eth_dst=tor.facts["router_mac"],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_src=peer_ipv4_address,
ip_dst=tor_ipv4_address,
ip_dscp=inner_dscp,
ip_ttl=255,
inner_frame=inner_packet
)
logging.info("the encapsulated packet to send:\n%s", tunnel_traffic_monitor._dump_show_str(packet))
return packet | 5,333,708 |
def test_ternary_auto_po2():
"""Test ternary auto_po2 scale quantizer."""
np.random.seed(42)
N = 1000000
m_list = [1.0, 0.1, 0.01, 0.001]
for m in m_list:
x = np.random.uniform(-m, m, (N, 10)).astype(K.floatx())
x = K.constant(x)
quantizer_ref = ternary(alpha="auto")
quantizer = ternary(alpha="auto_po2")
q_ref = K.eval(quantizer_ref(x))
q = K.eval(quantizer(x))
ref = get_weight_scale(quantizer_ref, q_ref)
expected = np.power(2.0, np.round(np.log2(ref)))
result = get_weight_scale(quantizer, q)
assert_allclose(result, expected, rtol=0.0001) | 5,333,709 |
def index():
"""
Renders the index page.
"""
return render_template("index.html") | 5,333,710 |
def get_send_idp_location():
"""Queries GPS NMEA strings from the modem and submits to a send/processing routine."""
global log
global modem
global tracking_interval
MAX_ATTEMPTS = 3
loc = Location()
log.debug("Requesting location to send")
retrieved = False
sentences = []
attempts = 0
while not retrieved and attempts < MAX_ATTEMPTS:
retrieved, sentences = modem.at_get_nmea(refresh=tracking_interval)
attempts += 1
time.sleep(3)
if retrieved:
for s in sentences:
parsed, parse_err = parse_nmea_to_location(s, loc)
if not parsed:
log.warning("NMEA sentence parsing failed (%s)" % parse_err)
send_idp_location(loc)
if tracking_interval > 0:
log.debug("Next location report in ~%d seconds" % tracking_interval)
else:
log.warning("Timed out %d attempts to query GNSS" % MAX_ATTEMPTS)
return | 5,333,711 |
def condense_colors(svg):
"""Condense colors by using hexadecimal abbreviations where possible.
Consider using an abstract, general approach instead of hard-coding.
"""
svg = re.sub('#000000', '#000', svg)
svg = re.sub('#ff0000', '#f00', svg)
svg = re.sub('#00ff00', '#0f0', svg)
svg = re.sub('#0000ff', '#00f', svg)
svg = re.sub('#00ffff', '#0ff', svg)
svg = re.sub('#ff00ff', '#f0f', svg)
svg = re.sub('#ffff00', '#ff0', svg)
svg = re.sub('#ffffff', '#fff', svg)
svg = re.sub('#cc0000', '#c00', svg)
svg = re.sub('#00cc00', '#0c0', svg)
svg = re.sub('#0000cc', '#00c', svg)
svg = re.sub('#00cccc', '#0cc', svg)
svg = re.sub('#cc00cc', '#c0c', svg)
svg = re.sub('#cccc00', '#cc0', svg)
svg = re.sub('#cccccc', '#ccc', svg)
svg = re.sub('#999999', '#999', svg)
svg = re.sub('#808080', 'grey', svg)
return svg | 5,333,712 |
def install_pytest_confirmation():
"""Ask if pytest should be installed"""
return f'{fg(2)} Do you want to install pytest? {attr(0)}' | 5,333,713 |
def dense_nopack(cfg, data, weight, bias=None, out_dtype=None):
"""Compute dense without packing"""
debug = True
if debug:
print("bias", bias)
print("data_dtype", data.dtype)
print("weight_dtype", weight.dtype)
print("out_dtype", out_dtype)
if out_dtype is None:
out_dtype = data.dtype
M, K = get_const_tuple(data.shape)
N, _ = get_const_tuple(weight.shape)
if debug:
print("data", M, K)
print("weight", N, _)
print("bias", bias)
# create tuning space
# cfg.define_split("tile_y", 32 if isinstance(M, tvm.tir.Var) else M, num_outputs=2)
# cfg.define_split("tile_x", 32 if isinstance(N, tvm.tir.Var) else N, num_outputs=2)
# cfg.define_split("tile_k", 32 if isinstance(K, tvm.tir.Var) else K, num_outputs=2)
# if cfg.is_fallback:
# _default_dense_nopack_config(cfg, M, N, K)
#
# vec = cfg["tile_k"].size[-1]
# k = te.reduce_axis((0, K // vec), "k")
# CC = te.compute(
# (M, N, vec),
# lambda z, y, x: te.sum(
# data[z, k * vec + x].astype(out_dtype) * weight[y, k * vec + x].astype(out_dtype),
# axis=k,
# ),
# )
#
# kk = te.reduce_axis((0, vec), "kk")
# C = te.compute((M, N), lambda y, x: te.sum(CC[y, x, kk], axis=kk), tag="dense_nopack")
# if bias is not None:
# C = te.compute((M, N), lambda i, j: C[i, j] + bias[j].astype(out_dtype), tag=tag.BROADCAST)
out = te.placeholder((M,N,), name="out", dtype=out_dtype)
CC = te.extern(
(M, N),
[data, weight],
lambda ins, outs: tvm.tir.call_packed("tvm.contrib.xilinx_matmul_pynq", ins[0], ins[1], outs[0]),
dtype=out_dtype,
name="matmul_pynq",
)
# kk = te.reduce_axis((0, vec), "kk")
# C = te.compute((M, N), lambda y, x: te.sum(CC[y, x, kk], axis=kk), tag="dense_nopack")
if bias is not None:
C = te.compute((M, N), lambda i, j: CC[i, j] + bias[j].astype(out_dtype))
return C
return CC | 5,333,714 |
def rgb(r=0, g=0, b=0, mode='RGB'):
"""
Convert **r**, **g**, **b** values to a `string`.
:param r: red part
:param g: green part
:param b: blue part
:param string mode: ``'RGB | %'``
:rtype: string
========= =============================================================
mode Description
========= =============================================================
``'RGB'`` returns a rgb-string format: ``'rgb(r, g, b)'``
``'%'`` returns percent-values as rgb-string format: ``'rgb(r%, g%, b%)'``
========= =============================================================
"""
def percent(value):
value = int(value)
if value < 0:
value = 0
if value > 100:
value = 100
return value
if mode.upper() == 'RGB':
return "rgb(%d,%d,%d)" % (int(r) & 255, int(g) & 255, int(b) & 255)
elif mode == "%":
# see http://www.w3.org/TR/SVG11/types.html#DataTypeColor
# percentage is an 'integer' value
return "rgb(%d%%,%d%%,%d%%)" % (percent(r), percent(g), percent(b))
else:
raise ValueError("Invalid mode '%s'" % mode) | 5,333,715 |
def plot_dist_weights_pseudomonas(
LV_id, LV_matrix, shared_genes, num_genes, gene_id_mapping, out_filename
):
"""
This function creates a distribution of weights for selected
`LV_id`. This allows us to explore the contribution of genes
to this LV.
Here we are looking at only those HW genes identified using
2.5 standard deviation from the mean weight at the `LV_id`
Arguments
----------
LV_id: str
identifier for LV
LV_matrix: df
gene x LV matrix with weight values
shared_genes: list
list of genes that are shared by the multiPLIER or eADAGE analysis
(so they have LV weight information) and SOPHIE analysis (so they have
generic label)
num_genes: int
Number of genes to display
gene_id_mapping: df
dataframe containing mapping between genes and "generic" or "other"
label
out_filename: str
file to save plot to
"""
# Get weight for LV_id
LV_id_weight = LV_matrix[LV_id]
# Calculate thresholds
eADAGE_std_cutoff = 2.5
mean_weight = LV_id_weight.mean()
std_weight = LV_id_weight.std() * eADAGE_std_cutoff
upper_threshold = mean_weight + std_weight
lower_threshold = mean_weight - std_weight
# Get high weight genes
HW_pos_genes = list(LV_id_weight[(LV_id_weight > upper_threshold).values].index)
HW_neg_genes = list(LV_id_weight[(LV_id_weight < lower_threshold).values].index)
HW_genes = HW_pos_genes + HW_neg_genes
# Sort HW genes by abs weight
sorted_HW_genes = list(
LV_id_weight[HW_genes].abs().sort_values(ascending=False).index
)[0:num_genes]
# Get gene with num_gene top weights
LV_matrix.index.rename("geneID", inplace=True)
weight_df = LV_matrix.loc[sorted_HW_genes, LV_id].reset_index()
print(weight_df)
# Add label for if generic or not
gene_ids = list(weight_df["geneID"].values)
weight_df["gene type"] = list(gene_id_mapping.loc[gene_ids, "gene type"].values)
fig = sns.barplot(
data=weight_df,
x=LV_id,
y="geneID",
hue="gene type",
hue_order=["generic", "other"],
dodge=False,
palette=["#2c7fb8", "lightgrey"],
)
fig.set_xlabel("Weight", fontsize=14, fontname="Verdana")
fig.set_ylabel("Gene", fontsize=14, fontname="Verdana")
fig.set_title(f"Weight distribution for {LV_id}", fontsize=14, fontname="Verdana")
fig.figure.savefig(
out_filename,
format="svg",
bbox_inches="tight",
transparent=True,
pad_inches=0,
dpi=300,
) | 5,333,716 |
def load_csv_translations(fname, pfx=''):
"""
Load translations from a tab-delimited file. Add prefix
to the keys. Return a dictionary.
"""
translations = {}
with open(fname, 'r', encoding='utf-8-sig') as fIn:
for line in fIn:
line = line.strip('\r\n ')
if len(line) <= 2 or line.count('\t') != 1:
continue
key, value = line.split('\t')
key = pfx + key
translations[key] = value
return translations | 5,333,717 |
def compute_bleu_rouge(pred_dict, ref_dict, bleu_order=4):
"""
Compute bleu and rouge scores.
"""
assert set(pred_dict.keys()) == set(ref_dict.keys()), \
"missing keys: {}".format(set(ref_dict.keys()) - set(pred_dict.keys()))
scores = {}
bleu_scores, _ = Bleu(bleu_order).compute_score(ref_dict, pred_dict)
for i, bleu_score in enumerate(bleu_scores):
scores['Bleu-%d' % (i + 1)] = bleu_score
rouge_score, _ = Rouge().compute_score(ref_dict, pred_dict)
scores['Rouge-L'] = rouge_score
f1_exact = f1_exact_eval()
pred_list, ref_list = [], []
for k in pred_dict.keys():
pred_list.append(pred_dict[k][0])
ref_list.append(ref_dict[k][0])
f1_score, exact_score = f1_exact.compute_scores(pred_list, ref_list)
meter_score = compute_meter_score(pred_list, ref_list)
scores['f1'] = f1_score
scores['exact'] = exact_score
scores['meter'] = meter_score
return scores | 5,333,718 |
def create_simulated_data(mf_simulator, data_dir, n_sample=1.):
"""
Create FITS-files with simulated data.
:param mf_simulator:
:param data_dir:
:param n_sample:
"""
# Mapping from frequencies to FITS file names
fnames_dict = dict()
os.chdir(data_dir)
for freq in mf_simulator.freqs:
fnames_dict.update({freq: str(freq) + '_' + 'sim.uvf'})
# Creating sample
for i in range(n_sample):
print "Creating sample {}-th of {}".format(i + 1, n_sample)
fnames_dict_i = fnames_dict.copy()
fnames_dict_i.update({freq: name + '_' + str(i + 1).zfill(3) for
freq, name in fnames_dict.items()})
mf_simulator.simulate()
mf_simulator.save_fits(fnames_dict_i) | 5,333,719 |
def check_upload_details(study_id=None, patient_id=None):
""" Get patient data upload details """
participant_set = Participant.objects.filter(patient_id=patient_id)
if not participant_set.exists() or str(participant_set.values_list('study', flat=True).get()) != study_id:
Response('Error: failed to get upload details for Patient %s'%patient_id, mimetype='text/plain')
user = participant_set.get()
upinfo = user.get_upload_info()
sorted_dates = sorted(upinfo.keys())
dates = [str(datetime.now())[:10]]
if sorted_dates:
first_date = datetime.strptime(sorted_dates[0], '%Y-%m-%d')
today_date = datetime.strptime(dates[0], '%Y-%m-%d')
day = first_date
dates = []
while day <= today_date:
dates += [str(day)[:10]]
day += timedelta(days=1)
dev_settings = user.study.device_settings.as_dict()
checkable_states = [[f, ('black' if f in ALLOW_EMPTY_FILES else 'red') if dst else 'lightgray']
for f in CHECKABLE_FILES for dst in [dev_settings.get(UPLOAD_FILE_TYPE_MAPPING[f], False)]]
return render_template(
'upload_details.html',
dates=dates,
upinfo=upinfo,
checkables=checkable_states,
patient=user
) | 5,333,720 |
def write(project_id, content):
"""Write project info"""
open(f'{files_path}/{project_id}.json', 'w').write(json.dumps(content)) | 5,333,721 |
def test_run_external_command_success(tmp_path):
"""
Output can be captured from a successful process.
"""
script = tmp_path / 'script.ps1'
script.write_text(_cmd_success)
p = run_external_command(('powershell', '-executionpolicy', 'Bypass', '-File', str(script)))
assert p.returncode == 0
assert p.stdout.strip() == 'output'
assert p.stderr.strip() == '' | 5,333,722 |
def crc32c_rev(name):
"""Compute the reversed CRC32C of the given function name"""
value = 0
for char in name:
value ^= ord(char)
for _ in range(8):
carry = value & 1
value = value >> 1
if carry:
value ^= CRC32_REV_POLYNOM
return value | 5,333,723 |
def sils_cut(T,f,c,d,h):
"""solve_sils -- solve the lot sizing problem with cutting planes
- start with a relaxed model
- add cuts until there are no fractional setup variables
Parameters:
- T: number of periods
- P: set of products
- f[t]: set-up costs (on period t)
- c[t]: variable costs
- d[t]: demand values
- h[t]: holding costs
Returns the final model solved, with all necessary cuts added.
"""
Ts = range(1,T+1)
model = sils(T,f,c,d,h)
y,x,I = model.data
# relax integer variables
for t in Ts:
y[t].vtype = "C"
# compute D[i,j] = sum_{t=i}^j d[t]
D = {}
for t in Ts:
s = 0
for j in range(t,T+1):
s += d[j]
D[t,j] = s
EPS = 1.e-6
cuts = True
while cuts:
model.optimize()
cuts = False
for ell in Ts:
lhs = 0
S,L = [],[]
for t in range(1,ell+1):
yt = model.getVal(y[t])
xt = model.getVal(x[t])
if D[t,ell]*yt < xt:
S.append(t)
lhs += D[t,ell]*yt
else:
L.append(t)
lhs += xt
if lhs < D[1,ell]:
# add cutting plane constraint
model.addCons(quicksum([x[t] for t in L]) +\
quicksum(D[t,ell] * y[t] for t in S)
>= D[1,ell])
cuts = True
model.data = y,x,I
return model | 5,333,724 |
def progressive_fixed_point(func, start, init_disc, final_disc, ratio=2):
"""Progressive fixed point calculation"""
while init_disc <= final_disc * ratio:
start = fixedpoint.fixed_point(func, start, disc=init_disc)
init_disc *= ratio
return start | 5,333,725 |
async def create_new_game(redis: Redis = Depends(redis.wrapper.get)):
"""Create a new game with an unique ID."""
game = get_new_game()
handle_score(game)
game_dict = game_to_dict(game)
game_id = token_urlsafe(32)
await redis.set(game_id, json.dumps(game_dict))
return GameState(gameId=game_id, **handle_hidden_cards(game_dict)) | 5,333,726 |
def _write_reaction_lines(reactions, species_delimiter, reaction_delimiter,
include_TS, stoich_format, act_method_name,
ads_act_method, act_unit, float_format,
column_delimiter, sden_operation,
**kwargs):
"""Write the reaction lines in the Chemkin format
Parameters
----------
reactions : list of :class:`~pmutt.reaction.ChemkinReaction` objects
Chemkin reactions to write in surf.inp file
species_delimiter : str
Delimiter to separate species when writing reactions
reaction_delimiter : str
Delimiter to separate reaction sides
act_method_name : str
Name of method to use to calculate activation function
act_unit : str
Units to calculate activation energy
float_format : str
String format to print floating numbers
stoich_format : str
String format to print stoichiometric coefficients
column_delimiter : str
Delimiter to separate columns
kwargs : keyword arguments
Parameters needed to calculate activation energy and preexponential
factor
Returns
-------
reaction_lines : str
Reactions represented in Chemkin format
"""
max_reaction_len = _get_max_reaction_len(
reactions=reactions,
species_delimiter=species_delimiter,
reaction_delimiter=reaction_delimiter,
stoich_format=stoich_format,
include_TS=include_TS)
float_field = '{:%s}' % float_format
reaction_lines = []
for reaction in reactions:
# Get reaction string
reaction_str = reaction.to_string(
species_delimiter=species_delimiter,
reaction_delimiter=reaction_delimiter,
stoich_format=stoich_format,
include_TS=include_TS).ljust(max_reaction_len)
# Calculate preexponential factor and determine activation energy method
if reaction.is_adsorption:
A = reaction.sticking_coeff
Ea_method = getattr(reaction, ads_act_method)
else:
# If using delta_G, take out entropic contribution in A
if act_method_name in ('get_GoRT_act', 'get_G_act',
'get_delta_GoRT', 'get_delta_G'):
include_entropy = False
else:
include_entropy = True
A = reaction.get_A(include_entropy=include_entropy,
sden_operation=sden_operation,
**kwargs)
Ea_method = getattr(reaction, act_method_name)
if act_method_name != 'get_EoRT_act' and \
act_method_name != 'get_E_act':
kwargs['activation'] = True
A_str = float_field.format(A)
# Format beta value
beta_str = float_field.format(reaction.beta)
# Calculate activation energy
kwargs['units'] = act_unit
try:
Ea = _force_pass_arguments(Ea_method, **kwargs)
except AttributeError:
Ea = 0.
Ea_str = float_field.format(Ea)
reaction_line = '{0}{4}{1}{4}{2}{4}{3}'.format(reaction_str, A_str,
beta_str, Ea_str,
column_delimiter)
if reaction.is_adsorption:
reaction_line = '{}\nSTICK'.format(reaction_line)
reaction_lines.append(reaction_line)
return reaction_lines | 5,333,727 |
def test_apijson_get():
"""
>>> application = make_simple_application(project_dir='.')
>>> handler = application.handler()
>>> #bad json
>>> data ='''{
... ,,,
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': 'not json data in the request'}
>>> #query self user
>>> data ='''{
... "user":{
... "@role":"OWNER"
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'user': {'username': 'admin', 'nickname': 'Administrator', 'email': 'admin@localhost', 'is_superuser': True, 'last_login': None, 'date_join': '2018-01-01 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 1}}
>>> #query with id
>>> data ='''{
... "user":{
... "@role":"ADMIN",
... "id": 2
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'user': {'username': 'usera', 'nickname': 'User A', 'email': 'usera@localhost', 'is_superuser': False, 'last_login': None, 'date_join': '2018-02-02 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 2}}
>>> #query with @column
>>> data ='''{
... "user":{
... "@role":"OWNER",
... "@column": "id,username,email,nickname,is_superuser"
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'user': {'username': 'admin', 'nickname': 'Administrator', 'email': 'admin@localhost', 'is_superuser': True, 'id': 1}}
>>> #query with @column which have a non existing column name
>>> data ='''{
... "user":{
... "@role":"OWNER",
... "@column": "id,username,email,nickname,is_superuser,nonexisting"
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'user': {'username': 'admin', 'nickname': 'Administrator', 'email': 'admin@localhost', 'is_superuser': True, 'id': 1}}
>>> #query with a non existing column property
>>> data ='''{
... "user":{
... "nonexisting": 1
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'user' have no attribute 'nonexisting'"}
>>> #query one with a non existing model
>>> data ='''{
... "nonexist":{
... "id": 1
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "model 'nonexist' not found"}
>>> #query one with a non expose model
>>> data ='''{
... "role":{
... "id": 1
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'role' not accessible"}
>>> #query one with UNKNOWN role (expected ok)
>>> data ='''{
... "moment":{
... "id": 1
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}}
>>> #query one with UNKNOWN role (expected fail)
>>> data ='''{
... "privacy":{
... "id": 1
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'privacy' not accessible by role 'UNKNOWN'"}
>>> #query one without user but use a non-UNKNOWN role
>>> data ='''{
... "publicnotice":{
... "@role":"OWNER",
... "id": 1
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "no login user for role 'OWNER'"}
>>> #query one with OWNER but cannot filter with OWNER
>>> data ='''{
... "publicnotice":{
... "@role":"OWNER",
... "id": 1
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'publicnotice' cannot filter with owner"}
>>> #query one with OWNER which will use owner_condition() to filter
>>> data ='''{
... "moment":{
... "@role":"OWNER"
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("usera"), middlewares=[])
Moment: owner_condition
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}}
>>> #query one with UNKNOWN
>>> data ='''{
... "publicnotice":{
... "id": 1
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'publicnotice': {'date': '2018-12-09 00:00:00', 'content': 'notice: a', 'id': 1}}
>>> #query array with a non expose model
>>> data ='''{
... "[]":{
... "role": {"@role":"ADMIN"}
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'role' not accessible by apijson"}
>>> #query array with a non existing model
>>> data ='''{
... "[]":{
... "nonexisting": {"@role":"ADMIN"}
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "model 'nonexisting' not found"}
>>> #query array with a non existing role
>>> data ='''{
... "[]":{
... "user": {"@role":"NONEXISTING"}
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'user' not accessible by role 'NONEXISTING'"}
>>> #query array with a role user don't have
>>> data ='''{
... "[]":{
... "user": {"@role":"ADMIN"}
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("usera"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "user doesn't have role 'ADMIN'"}
>>> #query array with no permission
>>> data ='''{
... "[]":{
... "user": {"@role":"superuser"}
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'user' not accessible by role 'superuser'"}
>>> #query array
>>> data ='''{
... "[]":{
... "user": {"@role":"ADMIN"}
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'admin', 'nickname': 'Administrator', 'email': 'admin@localhost', 'is_superuser': True, 'last_login': None, 'date_join': '2018-01-01 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 1}}, {'user': {'username': 'usera', 'nickname': 'User A', 'email': 'usera@localhost', 'is_superuser': False, 'last_login': None, 'date_join': '2018-02-02 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 2}}, {'user': {'username': 'userb', 'nickname': 'User B', 'email': 'userb@localhost', 'is_superuser': False, 'last_login': None, 'date_join': '2018-03-03 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 3}}, {'user': {'username': 'userc', 'nickname': 'User C', 'email': 'userc@localhost', 'is_superuser': False, 'last_login': None, 'date_join': '2018-04-04 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 4}}]}
>>> #query array
>>> data ='''{
... "[]":{
... "user": {
... "@role":"ADMIN",
... "@column":"id,username,nickname,email"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'admin', 'nickname': 'Administrator', 'email': 'admin@localhost', 'id': 1}}, {'user': {'username': 'usera', 'nickname': 'User A', 'email': 'usera@localhost', 'id': 2}}, {'user': {'username': 'userb', 'nickname': 'User B', 'email': 'userb@localhost', 'id': 3}}, {'user': {'username': 'userc', 'nickname': 'User C', 'email': 'userc@localhost', 'id': 4}}]}
>>> #query array with non existing role
>>> data ='''{
... "[]":{
... "user": {
... "@role":"NONEXISTING",
... "@column":"id,username,nickname,email"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'user' not accessible by role 'NONEXISTING'"}
>>> #query array with UNKNOWN
>>> data ='''{
... "[]":{
... "user": {
... "@column":"id,username,nickname,email"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'user' not accessible by role 'UNKNOWN'"}
>>> #query array without login user
>>> data ='''{
... "[]":{
... "user": {
... "@role":"ADMIN",
... "@column":"id,username,nickname,email"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "no login user for role 'ADMIN'"}
>>> #query array with a role which the user doesn't really have
>>> data ='''{
... "[]":{
... "user": {
... "@role":"ADMIN",
... "@column":"id,username,nickname,email"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("usera"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "user doesn't have role 'ADMIN'"}
>>> #query array with @count
>>> data ='''{
... "[]":{
... "@count":3,
... "user": {
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("usera"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'admin', 'nickname': 'Administrator', 'email': 'admin@localhost', 'is_superuser': True, 'last_login': None, 'date_join': '2018-01-01 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 1}}, {'user': {'username': 'usera', 'nickname': 'User A', 'email': 'usera@localhost', 'is_superuser': False, 'last_login': None, 'date_join': '2018-02-02 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 2}}, {'user': {'username': 'userb', 'nickname': 'User B', 'email': 'userb@localhost', 'is_superuser': False, 'last_login': None, 'date_join': '2018-03-03 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 3}}]}
>>> #query array ,@count is bad param
>>> data ='''{
... "[]":{
... "@count":"bad",
... "user": {
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("usera"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "@count should be an int, but get 'bad'"}
>>> #query array with @count and @page
>>> data ='''{
... "[]":{
... "@count":2,
... "@page":1,
... "user": {
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("usera"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'userb', 'nickname': 'User B', 'email': 'userb@localhost', 'is_superuser': False, 'last_login': None, 'date_join': '2018-03-03 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 3}}, {'user': {'username': 'userc', 'nickname': 'User C', 'email': 'userc@localhost', 'is_superuser': False, 'last_login': None, 'date_join': '2018-04-04 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 4}}]}
>>> #query array with @count and @page, @page bad param
>>> data ='''{
... "[]":{
... "@count":2,
... "@page":"bad",
... "user": {
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("usera"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "@page should be an int, but get 'bad'"}
>>> #query array with @count and @page, @page <0
>>> data ='''{
... "[]":{
... "@count":2,
... "@page":-2,
... "user": {
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("usera"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "page should >0, but get '-2'"}
>>> #query array with @count/@page/@query, @query bad param
>>> data ='''{
... "[]":{
... "@count":2,
... "@page":1,
... "@query":3,
... "user": {
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("usera"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "bad param 'query': 3"}
>>> #query array with @count/@page/@query, @query = 0
>>> data ='''{
... "[]":{
... "@count":2,
... "@page":1,
... "@query":0,
... "user": {
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("usera"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'userb', 'nickname': 'User B', 'email': 'userb@localhost', 'is_superuser': False, 'last_login': None, 'date_join': '2018-03-03 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 3}}, {'user': {'username': 'userc', 'nickname': 'User C', 'email': 'userc@localhost', 'is_superuser': False, 'last_login': None, 'date_join': '2018-04-04 00:00:00', 'image': '', 'active': False, 'locked': False, 'deleted': False, 'auth_type': 'default', 'timezone': '', 'id': 4}}]}
>>> #query array with OWNER but cannot filter with OWNER
>>> data ='''{
... "[]":{
... "publicnotice": {
... "@role":"OWNER"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'publicnotice' cannot filter with owner"}
>>> #query array with OWNER
>>> data ='''{
... "[]":{
... "comment": {
... "@role":"OWNER"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'comment': {'user_id': 1, 'to_id': 3, 'moment_id': 1, 'date': '2018-11-01 00:00:00', 'content': 'comment from admin', 'id': 1}}]}
>>> #query array with OWNER, the model using owner_condition
>>> data ='''{
... "[]":{
... "moment": {
... "@role":"OWNER"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("usera"), middlewares=[])
Moment: owner_condition
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}}]}
>>> #query array with some filter column
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "username":"admin"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'admin', 'nickname': 'Administrator', 'email': 'admin@localhost', 'id': 1}}]}
>>> #query array with reference, @query = 1
>>> data ='''{
... "[]":{
... "@count":2,
... "@page":0,
... "@query":1,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN"
... }
... },
... "total@":"/[]/total"
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'total': 4}
>>> #query array with reference, @query = 2
>>> data ='''{
... "[]":{
... "@count":2,
... "@page":0,
... "@query":2,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN"
... }
... },
... "total@":"/[]/total"
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'userc', 'nickname': 'User C', 'email': 'userc@localhost', 'id': 4}}, {'user': {'username': 'userb', 'nickname': 'User B', 'email': 'userb@localhost', 'id': 3}}], 'total': 4}
>>> #query array with @order +
>>> data ='''{
... "[]":{
... "@count":2,
... "@page":0,
... "@query":2,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id+",
... "@role":"ADMIN"
... }
... },
... "total@":"/[]/total"
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'admin', 'nickname': 'Administrator', 'email': 'admin@localhost', 'id': 1}}, {'user': {'username': 'usera', 'nickname': 'User A', 'email': 'usera@localhost', 'id': 2}}], 'total': 4}
>>> #query array with @order having a non existing column
>>> data ='''{
... "[]":{
... "@count":2,
... "@page":0,
... "@query":2,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"nonexist+",
... "@role":"ADMIN"
... }
... },
... "total@":"/[]/total"
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'user' doesn't have column 'nonexist'"}
>>> #query array with @expr
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "@expr":["username$","|","nickname$"],
... "username$":"%b%",
... "nickname$":"%c%"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'userc', 'nickname': 'User C', 'email': 'userc@localhost', 'id': 4}}, {'user': {'username': 'userb', 'nickname': 'User B', 'email': 'userb@localhost', 'id': 3}}]}
>>> #query array with @expr, bad param which is not list
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "@expr":{},
... "username$":"%b%",
... "nickname$":"%c%"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "only accept array in @expr, but get 'OrderedDict()'"}
>>> #query array with @expr, bad param which is an empty list
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "@expr":[],
... "username$":"%b%",
... "nickname$":"%c%"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "only accept 2 or 3 items in @expr, but get '[]'"}
>>> #query array with @expr, bad param which is >3 items list
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "@expr":["username$","|","username$","|","nickname$"],
... "username$":"%b%",
... "nickname$":"%c%"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "only accept 2 or 3 items in @expr, but get '['username$', '|', 'username$', '|', 'nickname$']'"}
>>> #query array with @expr, bad param which have bad operator
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "@expr":["username$","*","nickname$"],
... "username$":"%b%",
... "nickname$":"%c%"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "unknown operator: '*'"}
>>> #query array with @expr, bad expr: & only 1 parameter
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "@expr":["&","nickname$"],
... "username$":"%b%",
... "nickname$":"%c%"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'&'(and) expression need 3 items, but get '['&', 'nickname$']'"}
>>> #query array with @expr, bad expr: | only 1 parameter
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "@expr":["|","nickname$"],
... "username$":"%b%",
... "nickname$":"%c%"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'|'(or) expression need 3 items, but get '['|', 'nickname$']'"}
>>> #query array with @expr, bad expr: | only 1 parameter
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "@expr":["username$","!","nickname$"],
... "username$":"%b%",
... "nickname$":"%c%"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'!'(not) expression need 2 items, but get '['username$', '!', 'nickname$']'"}
>>> #query array with like
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "username$":"%b%"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'userb', 'nickname': 'User B', 'email': 'userb@localhost', 'id': 3}}]}
>>> #query array with like, but gave a nonexist column
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "nonexist$":"%b%"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "model does not have column: 'nonexist'"}
>>> #query array with a nonexist column
>>> data ='''{
... "[]":{
... "@count":4,
... "@page":0,
... "user":{
... "@column":"id,username,nickname,email",
... "@order":"id-",
... "@role":"ADMIN",
... "nonexist":1
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "non-existent column or not support item: 'nonexist'"}
>>> #query array, {} with list
>>> data ='''{
... "[]":{
... "moment": {
... "id{}": [1, 2]
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}}, {'moment': {'user_id': 3, 'date': '2018-11-02 00:00:00', 'content': 'test moment from b', 'picture_list': '[]', 'id': 2}}]}
>>> #query array, !{} with list
>>> data ='''{
... "[]":{
... "moment": {
... "id!{}": [1, 2]
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 4, 'date': '2018-11-06 00:00:00', 'content': 'test moment from c', 'picture_list': '[]', 'id': 3}}]}
>>> #query array, {} with a non-exist column name
>>> data ='''{
... "[]":{
... "moment": {
... "nonexist{}": [1, 2]
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "model does not have column: 'nonexist'"}
>>> #query array, {} >=
>>> data ='''{
... "[]":{
... "moment": {
... "id{}": ">=2"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 3, 'date': '2018-11-02 00:00:00', 'content': 'test moment from b', 'picture_list': '[]', 'id': 2}}, {'moment': {'user_id': 4, 'date': '2018-11-06 00:00:00', 'content': 'test moment from c', 'picture_list': '[]', 'id': 3}}]}
>>> #query array, {} =
>>> data ='''{
... "[]":{
... "moment": {
... "id{}": "=2"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 3, 'date': '2018-11-02 00:00:00', 'content': 'test moment from b', 'picture_list': '[]', 'id': 2}}]}
>>> #query array, {} >
>>> data ='''{
... "[]":{
... "moment": {
... "id{}": ">2"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 4, 'date': '2018-11-06 00:00:00', 'content': 'test moment from c', 'picture_list': '[]', 'id': 3}}]}
>>> #query array, {} <=
>>> data ='''{
... "[]":{
... "moment": {
... "id{}": "<=2"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}}, {'moment': {'user_id': 3, 'date': '2018-11-02 00:00:00', 'content': 'test moment from b', 'picture_list': '[]', 'id': 2}}]}
>>> #query array, {} <
>>> data ='''{
... "[]":{
... "moment": {
... "id{}": "<2"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}}]}
>>> #query array, {} <= with datetime
>>> data ='''{
... "[]":{
... "moment": {
... "date{}": "<='2018-11-02 00:00'"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}}, {'moment': {'user_id': 3, 'date': '2018-11-02 00:00:00', 'content': 'test moment from b', 'picture_list': '[]', 'id': 2}}]}
>>> #query array, {} >= with datetime
>>> data ='''{
... "[]":{
... "moment": {
... "date{}": ">='2018-11-02 00:00'"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 3, 'date': '2018-11-02 00:00:00', 'content': 'test moment from b', 'picture_list': '[]', 'id': 2}}, {'moment': {'user_id': 4, 'date': '2018-11-06 00:00:00', 'content': 'test moment from c', 'picture_list': '[]', 'id': 3}}]}
>>> #query array, {} >= with a invalid datetime
>>> data ='''{
... "[]":{
... "moment": {
... "date{}": ">='2018-11-42 00:00'"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "''2018-11-42 00:00'' cannot convert to datetime"}
>>> #query array, !{} <
>>> data ='''{
... "[]":{
... "moment": {
... "id!{}": "<2"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 3, 'date': '2018-11-02 00:00:00', 'content': 'test moment from b', 'picture_list': '[]', 'id': 2}}, {'moment': {'user_id': 4, 'date': '2018-11-06 00:00:00', 'content': 'test moment from c', 'picture_list': '[]', 'id': 3}}]}
>>> #query array, {} !=
>>> data ='''{
... "[]":{
... "moment": {
... "id{}": "!=2"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}}, {'moment': {'user_id': 4, 'date': '2018-11-06 00:00:00', 'content': 'test moment from c', 'picture_list': '[]', 'id': 3}}]}
>>> #query array, {} with wrong operator
>>> data ='''{
... "[]":{
... "moment": {
... "id{}": "%=2"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "not support '%=2'"}
>>> #query array, {} condition list
>>> data ='''{
... "[]":{
... "user": {
... "@role": "ADMIN",
... "id{}": "<=2,>3",
... "@column": "username,nickname,id"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'admin', 'nickname': 'Administrator', 'id': 1}}, {'user': {'username': 'usera', 'nickname': 'User A', 'id': 2}}, {'user': {'username': 'userc', 'nickname': 'User C', 'id': 4}}]}
>>> #query array, |{} condition list
>>> data ='''{
... "[]":{
... "user": {
... "@role": "ADMIN",
... "id|{}": "<=2,>3",
... "@column": "username,nickname,id"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'admin', 'nickname': 'Administrator', 'id': 1}}, {'user': {'username': 'usera', 'nickname': 'User A', 'id': 2}}, {'user': {'username': 'userc', 'nickname': 'User C', 'id': 4}}]}
>>> #query array, &{} condition list
>>> data ='''{
... "[]":{
... "user": {
... "@role": "ADMIN",
... "id&{}": ">2,<=4",
... "@column": "username,nickname,id"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'userb', 'nickname': 'User B', 'id': 3}}, {'user': {'username': 'userc', 'nickname': 'User C', 'id': 4}}]}
>>> #query array, &{} condition list
>>> data ='''{
... "[]":{
... "user": {
... "@role": "ADMIN",
... "date_join&{}": ">='2018-1-1 00:00',<='2018-2-2 00:00'",
... "@column": "username,nickname,id,date_join"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'admin', 'nickname': 'Administrator', 'date_join': '2018-01-01 00:00:00', 'id': 1}}, {'user': {'username': 'usera', 'nickname': 'User A', 'date_join': '2018-02-02 00:00:00', 'id': 2}}]}
>>> #query array, {} multiple condition to a same field
>>> data ='''{
... "[]":{
... "user": {
... "@role": "ADMIN",
... "id&{}": ">2,<=4",
... "id{}": "!=3",
... "@column": "username,nickname,id"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'userc', 'nickname': 'User C', 'id': 4}}]}
>>> #query array, !{} condition list
>>> data ='''{
... "[]":{
... "user": {
... "@role": "ADMIN",
... "id!{}": ">2,<=4",
... "@column": "username,nickname,id"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 400, 'msg': "'!' not supported in condition list"}
>>> #query array, |{} condition list, item more than 2
>>> data ='''{
... "[]":{
... "user": {
... "@role": "ADMIN",
... "id|{}": "=1,=2,>=4",
... "@column": "username,id"
... }
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', '[]': [{'user': {'username': 'admin', 'id': 1}}, {'user': {'username': 'usera', 'id': 2}}, {'user': {'username': 'userc', 'id': 4}}]}
>>> #Association query: Two tables, one to one,ref path is absolute path
>>> data ='''{
... "moment":{},
... "user":{
... "@column": "id,username,email",
... "id@": "moment/user_id"
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}, 'user': {'username': 'usera', 'email': 'usera@localhost', 'id': 2}}
>>> #Association query: Two tables, one is array, one is single, there is a abs reference to array
>>> data ='''{
... "moment[]":{"moment":{"@count":3}},
... "user":{
... "@column": "id,username,email",
... "id@": "moment[]/1/moment/user_id"
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'moment[]': [{'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}}, {'moment': {'user_id': 3, 'date': '2018-11-02 00:00:00', 'content': 'test moment from b', 'picture_list': '[]', 'id': 2}}, {'moment': {'user_id': 4, 'date': '2018-11-06 00:00:00', 'content': 'test moment from c', 'picture_list': '[]', 'id': 3}}], 'user': {'username': 'userb', 'email': 'userb@localhost', 'id': 3}}
>>> #Association query: Two tables, one is array, one is single, there is a rel reference to array
>>> data ='''{
... "moment[]":{"moment":{"@count":3}},
... "user":{
... "@column": "id,username,email",
... "id@": "/moment[]/1/moment/user_id"
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'moment[]': [{'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}}, {'moment': {'user_id': 3, 'date': '2018-11-02 00:00:00', 'content': 'test moment from b', 'picture_list': '[]', 'id': 2}}, {'moment': {'user_id': 4, 'date': '2018-11-06 00:00:00', 'content': 'test moment from c', 'picture_list': '[]', 'id': 3}}], 'user': {'username': 'userb', 'email': 'userb@localhost', 'id': 3}}
>>> #Association query: Two tables, one to one,ref path is relative path
>>> data ='''{
... "moment":{},
... "user":{
... "@column": "id,username,email",
... "id@": "/moment/user_id"
... }
... }'''
>>> r = handler.post('/apijson/get', data=data, pre_call=pre_call_as("admin"), middlewares=[])
>>> d = json_loads(r.data)
>>> print(d)
{'code': 200, 'msg': 'success', 'moment': {'user_id': 2, 'date': '2018-11-01 00:00:00', 'content': 'test moment', 'picture_list': '[]', 'id': 1}, 'user': {'username': 'usera', 'email': 'usera@localhost', 'id': 2}}
""" | 5,333,728 |
def post_input_to_user_feedback(context, is_valid, endpoint, token):
"""Send feedback to user feedback endpoint."""
use_token = parse_token_clause(token)
api_url = urljoin(context.coreapi_url, endpoint)
data = generate_data_for_user_feedback(is_valid)
if use_token:
response = requests.post(api_url, json=data,
headers=authorization(context))
else:
response = requests.post(api_url, json=data)
context.response = response | 5,333,729 |
def refines_constraints(storage, constraints):
"""
Determines whether with the storage as basis for the substitution map there is a substitution that can be performed
on the constraints, therefore refining them.
:param storage: The storage basis for the substitution map
:param constraints: The constraint list containing the expressions to be substituted.
:return: True if the substitution would change the constraint list.
"""
storage_names = ["storage[" + str(key) + "]" for key, _ in storage.items()]
for name in storage_names:
for constraint in constraints:
if name in constraint.slot_names:
return True
return False | 5,333,730 |
def dot_fp(x, y):
"""Dot products for consistent scalars, vectors, and matrices.
Possible combinations for x, y:
scal, scal
scal, vec
scal, mat
vec, scal
mat, scal
vec, vec (same length)
mat, vec (n_column of mat == length of vec)
Warning: No broadcasting! There are special functions for that!
Args:
x: scalar, vector, or matrix (fixed point or float)
y: scalar, vector or matrix (fixed point or float)
"""
# If both inputs are np.ndarray we can use np.dot
if _np_instance(x, y):
return np.dot(x, y)
optype = _operation_type(x, y)
if optype == 'scal_scal':
return x * y
elif optype == 'vec_vec':
return _vec_vec_dot_fp(x, y)
elif optype in ['mat_mat_dot', 'mat_mat_all']:
return _mat_mat_dot_fp(x, y)
elif optype in ['mat_vec_dot', 'mat_vec_all']:
return _mat_vec_dot_fp(x, y)
elif optype in ['vec_scal', 'mat_scal']:
return _scal_dot_fp(x, y)
elif optype in ['scal_vec', 'scal_mat']:
return _scal_dot_fp(y, x)
else:
raise ValueError("Dot not possible for {}".format(optype)) | 5,333,731 |
def add_one(num: int) -> int:
"""Increment arg by one."""
return num + 1 | 5,333,732 |
def normalize(data, train_split):
""" Get the standard score of the data.
:param data: data set
:param train_split: number of training samples
:return: normalized data, mean, std
"""
mean = data[:train_split].mean(axis=0)
std = data[:train_split].std(axis=0)
return (data - mean) / std, mean, std | 5,333,733 |
def test_log_file():
"""
test the log content written in log file
"""
_rm_env_config()
file_path = '/tmp/log/mindspore_test'
os.environ['GLOG_v'] = '2'
os.environ['GLOG_logtostderr'] = '0'
os.environ['GLOG_log_dir'] = file_path
if os.path.exists(file_path):
shutil.rmtree(file_path)
filename = ''
os.makedirs(file_path, exist_ok=True)
from mindspore import log as logger
logger.warning("test log message warning")
f_list = os.listdir(file_path)
# print f_list
for file_name in f_list:
if file_name.startswith('mindspore.log'):
filename = f'{file_path}/{file_name}'
cmd = f'cat {filename}'
result = os.popen(cmd).read()
# pylint: disable=anomalous-backslash-in-string
pattern = "\[WARNING\] ME\(.*[0-9]:.*[0-9]\,.*[a-zA-Z0-9]\):.* " \
"\[.*:.*[0-9]\] test log message warning"
match_obj = re.match(pattern, result)
# Clear test file
if os.path.exists(file_path):
shutil.rmtree(file_path)
assert match_obj
# Clean up _global_logger to avoid affecting for next usecase
_clear_logger(logger) | 5,333,734 |
def test_process_one_book_and_return(csv_one_book_plus_empty_line, json_empty, results_path):
""" Process and generate data for one book. """
# GIVEN a CVS file with just one book, AN EMPTY LINE at the end, and emtpy complement file.
source_file = csv_one_book_plus_empty_line
complement_file = json_empty
results_folder = results_path
# WHEN we process that file.
process(source_file, complement_file, results_folder, {"verbose": True})
# THEN All parameters in the book should be loaded
results = load_result("books_read_2022.json", results_folder)
books = results['books']
assert len(books) == 1 | 5,333,735 |
def lemma(name_synsets):
"""
This function return lemma object given the name.
.. note::
Support only English language (*eng*).
:param str name_synsets: name of the synset
:return: lemma object with the given name
:rtype: :class:`Lemma`
:Example:
>>> from pythainlp.corpus.wordnet import lemma
>>>
>>> lemma('practice.v.01.exercise')
Lemma('practice.v.01.exercise')
>>>
>>> lemma('drill.v.03.exercise')
Lemma('drill.v.03.exercise')
>>>
>>> lemma('exercise.n.01.exercise')
Lemma('exercise.n.01.exercise')
"""
return wordnet.lemma(name_synsets) | 5,333,736 |
def export_cookies(domain, cookies, savelist=None, sp_domain=None):
"""
Export cookies used for remembered device/other non-session use
as list of Cookie objects. Only looks in jar matching host name.
Args:
domain (str) - Domain to select cookies from
cookies (requests.cookies.RequestsCookieJar) - Cookie jar object
savelist (list(str)) - (Optional) List of cookies to export
Returns:
list(Cookie) - restorable using set_device_cookies() function
"""
if savelist is None:
savelist = DEFAULT_COOKIE_SAVELIST
# Pulling directly from internal data structure as there is
# no get_cookies method.
cookies_dict = cookies._cookies.get(domain, {}).get('/', None)
# if they exist, add sp cookies to idp cookies
if 'sp_domain' in locals() and sp_domain is not None:
cookies_dict.update(cookies._cookies.get(sp_domain, {}).get('/', None))
if cookies_dict is None:
return []
return [c for c in [cookies_dict.get(si) for si in savelist] if c is not None] | 5,333,737 |
def test_subscribe(class_2_test):
"""
Test subscribe method : original test was written by by Zhen Wang
Source : https://github.com/nehz/pubsub/blob/master/tests.py
"""
# Get an instance of PubSub class
communicator = class_2_test()
channel = "test"
# The Listener subscribes to the channel
message_queue = communicator.subscribe(channel)
assert message_queue.name == channel
# The publisher put the message 'Hello World'
# in the list of message for this channel
communicator.publish(channel, 'Hello World')
# Test if listener has received the string 'Hello World'
assert next(message_queue.listen())['data'] == 'Hello World' | 5,333,738 |
def adaptive_crossover(parents: Tuple[AbstractSolution, AbstractSolution],
variables_number: int,
crossover_pattern: int) -> ChildrenValuesTyping:
"""
Adaptive crossover function.
Crossover is performed according to a pattern that determines which gene (decision variable) value
should be picked from which parent.
:param parents: Pair of parent solution that provides genes for a new pair of children.
:param variables_number: Number of decision variables (genes).
:param crossover_pattern: Pattern of crossover to be used.
:return: Pair of children data sets.
"""
parents_values = list(parents[0].decision_variables_values.items()), \
list(parents[1].decision_variables_values.items())
child_1_values: OrderedDict = OrderedDict()
child_2_values: OrderedDict = OrderedDict()
for i in range(variables_number):
pattern_value = (crossover_pattern >> i) & 1
child_1_values.update([parents_values[pattern_value][i]])
child_2_values.update([parents_values[pattern_value ^ 1][i]])
return child_1_values, child_2_values | 5,333,739 |
def get_type_dict(kb_path, dstc2=False):
"""
Specifically, we augment the vocabulary with some special words, one for each of the KB entity types
For each type, the corresponding type word is added to the candidate representation if a word is found that appears
1) as a KB entity of that type,
"""
type_dict = {'R_restaurant':[]}
kb_path_temp = kb_path
fd = open(kb_path_temp,'r')
for line in fd:
if dstc2:
x = line.replace('\n','').split(' ')
rest_name = x[1]
entity = x[2]
entity_value = x[3]
else:
x = line.split('\t')[0].split(' ')
rest_name = x[1]
entity = x[2]
entity_value = line.split('\t')[1].replace('\n','')
if rest_name not in type_dict['R_restaurant']:
type_dict['R_restaurant'].append(rest_name)
if entity not in type_dict.keys():
type_dict[entity] = []
if entity_value not in type_dict[entity]:
type_dict[entity].append(entity_value)
return type_dict | 5,333,740 |
def check_students(students):
""" Make sure we have requisite fields for each student. """
for s in students:
if 'github_repo' not in s:
print(' missing github_repo for %s' % str(s))
for k, v in s.items():
s[k] = v.strip() if v else None | 5,333,741 |
def validate_export(error_writer,project, sm):
"""
Save error report, project properties, composites, and donors
:param sm: scenario model
"""
errorList = sm.validate()
name = os.path.basename(project)
graph_rules.processProjectProperties(sm)
sm.save()
for err in errorList:
error_writer.writerow((name, err[0].name, err[1],err[2],err[3])) | 5,333,742 |
def parse_time(duration: str, minimum: int = None, maximum: int = None, error_on_exceeded: bool = True) -> int:
"""Function that parses time in a NhNmNs format. Supports weeks, days, hours, minutes and seconds, positive and
negative amounts and max values. Minimum and maximum values can be set (in seconds), and whether a error should
occur or the max / min value should be used when these are exceeded."""
last, t_total = 0, 0
t_frames = {"w": 604800, "d": 86400, "h": 3600, "m": 60, "s": 1}
for index, char in enumerate(duration): # For every character in time string.
if char.lower() in t_frames.keys():
if duration[last:index] != "":
t_total += int(duration[last:index]) * t_frames[char.lower()]
last = index + 1
elif char not in ["+", "-", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]: # Valid characters.
raise ValueError("Invalid character encountered during time parsing.")
if minimum and t_total < minimum: # If total time is less than minimum.
if error_on_exceeded:
raise ValueError("Time too short.")
t_total = minimum
if maximum and t_total > maximum: # If total time is more than maximum.
if error_on_exceeded:
raise ValueError("Time too long.")
t_total = maximum
return t_total | 5,333,743 |
def injectRawKeyboardInput(isPress, code, isExtended):
"""Inject raw input from a system keyboard that is not handled natively by Windows.
For example, this might be used for input from a QWERTY keyboard on a braille display.
NVDA will treat the key as if it had been pressed on a normal system keyboard.
If it is not handled by NVDA, it will be sent to the operating system.
@param isPress: Whether the key is being pressed.
@type isPress: bool
@param code: The scan code (PC set 1) of the key.
@type code: int
@param isExtended: Whether this is an extended key.
@type isExtended: bool
"""
mapScan = code
if isExtended:
# Change what we pass to MapVirtualKeyEx, but don't change what NVDA gets.
mapScan |= 0xE000
vkCode = winUser.user32.MapVirtualKeyExW(mapScan, winUser.MAPVK_VSC_TO_VK_EX, getInputHkl())
flags = 0
if not isPress:
flags |= 2
if isExtended:
flags |= 1
winUser.keybd_event(vkCode, code, flags, None) | 5,333,744 |
def correl_align(s_orig, align_phases=False,tol=1e-4,indirect_dim='indirect',
fig_title='correlation alignment',signal_pathway = {'ph1':0,'ph2':1},
shift_bounds=False, avg_dim = None, max_shift = 100., sigma=20.,direct='t2',fl=None):
"""
Align transients collected with chunked phase cycling dimensions along an indirect
dimension based on maximizing the correlation across all the transients and repeat
alignment until the calculated signal energy remains constant to within a given
tolerance level.
Parameters
==========
s_orig: nddata
A nddata object which contains phase cycle dimensions and an
indirect dimension.
align_phases: boolean
tol: float
Sets the tolerance limit for the alignment procedure.
indirect_dim: str
Name of the indirect dimension along which you seek to align
the transients.
fig_title: str
Title for the figures generated.
signal_pathway: dict
Dictionary containing the signal pathway.
shift_bounds: boolean
Keeps f_shift to be within a specified
limit (upper and lower bounds given by max_shift)
which should be around the location of the expected
signal.
avg_dim: str
Dimension along which the data is being averaged.
max_shift: float
Specifies the upper and lower bounds to the range over
which f_shift will be taken from the correlation function.
Shift_bounds must be True.
sigma: int
Sigma value for the Gaussian mask. Related to the line width
of the given data.
fl: boolean
fl=fl to show the plots and figures produced by this function
otherwise, fl=None.
Returns
=======
f_shift: array
The optimized frequency shifts for each transient which will
maximize their correlation amongst each other, thereby aligning
them.
sigma: float
The width of the Gaussian function used to frequency filter
the data in the calculation of the correlation function.
"""
logging.debug(strm("Applying the correlation routine"))
if avg_dim:
phcycdims = [j for j in s_orig.dimlabels if j.startswith('ph')]
indirect = set(s_orig.dimlabels)-set(phcycdims)-set([direct])
indirect = [j for j in s_orig.dimlabels if j in indirect]
avg_dim_len = len(s_orig.getaxis(avg_dim))
s_orig.smoosh(indirect)
for j in signal_pathway.keys():
assert not s_orig.get_ft_prop(j), str(j)+" must not be in the coherence domain"
signal_keys = list(signal_pathway)
signal_values = list(signal_pathway.values())
ph_len = {j:ndshape(s_orig)[j] for j in signal_pathway.keys()}
N = ndshape(s_orig)[indirect_dim]
sig_energy = (abs(s_orig)**2).data.sum().item() / N
if fl:
fl.push_marker()
fig_forlist, ax_list = plt.subplots(1, 5, figsize=(7,7))
fl.next("Correlation Diagnostics")
fig_forlist.suptitle(" ".join(["Correlation Diagnostic"] + [j for j in [fl.basename] if j is not None]))
fl.image(s_orig.C.setaxis(indirect_dim,'#').set_units(indirect_dim,'scan #'),ax=ax_list[0],human_units=False)
ax_list[0].set_title('before correlation\nsig. energy=%g'%sig_energy)
energy_diff = 1.
i = 0
energy_vals = []
this_E = (abs(s_orig.C.sum(indirect_dim))**2).data.sum().item() / N**2
energy_vals.append(this_E / sig_energy)
last_E = None
for_nu_center =s_orig.C
for_nu_center.ft(list(signal_pathway))
for x in range(len(signal_keys)):
for_nu_center = for_nu_center[signal_keys[x],signal_values[x]]
nu_center = for_nu_center.mean(indirect_dim).C.argmax(direct)
logging.debug(strm("Center frequency", nu_center))
for my_iter in range(100):
i += 1
logging.debug(strm("*** *** ***"))
logging.debug(strm("CORRELATION ALIGNMENT ITERATION NO. ",i))
logging.debug(strm("*** *** ***"))
s_orig.ift(direct)
s_copy = s_orig.C
if align_phases:
ph0 = s_orig.C.sum(direct)
ph0 /= abs(ph0)
s_copy /= ph0
s_copy.ft(direct)
this_mask = exp(-(s_copy.fromaxis(direct)-nu_center)**2/(2*sigma**2))
s_copy *= this_mask
s_copy.ift(direct)
s_copy2 = s_orig.C
for k,v in ph_len.items():
ph = ones(v)
s_copy *= nddata(ph,'Delta'+k.capitalize())
s_copy.setaxis('Delta'+k.capitalize(),'#')
correl = s_copy * 0
for k,v in ph_len.items():
for ph_index in range(v):
s_copy['Delta%s'%k.capitalize(),ph_index] = s_copy['Delta%s'%k.capitalize(),
ph_index].run(lambda x, axis=None: roll(x, ph_index, axis=axis), k)
for j in range(1,N):
correl += s_copy2 * s_copy.C.run(lambda x, axis=None: roll(x,j,axis=axis),
indirect_dim).run(conj)
correl.reorder([indirect_dim,direct],first=False)
if my_iter ==0:
logging.debug(strm("holder"))
if fl:
fl.image(correl.C.setaxis(indirect_dim,'#').set_units(indirect_dim,'scan #'),
ax=ax_list[1])
ax_list[1].set_title('correlation function (t), \nafter apod')
correl.ft_clear_startpoints(direct)
correl.ft(direct, shift=True, pad=2**14)
for k,v in signal_pathway.items():
correl.ft(['Delta%s'%k.capitalize()])
correl = correl['Delta'+k.capitalize(),v]+correl['Delta'+k.capitalize(),0]
if my_iter ==0:
logging.debug(strm("holder"))
if fl:
fl.image(correl.C.setaxis(indirect_dim,'#').set_units(indirect_dim,'scan #'),
ax=ax_list[2],human_units=False)
ax_list[2].set_title('correlation function (v), \nafter apod')
if shift_bounds:
f_shift = correl[direct:(-max_shift,max_shift)].run(real).argmax(direct)
else:
f_shift = correl.run(real).argmax(direct)
s_copy = s_orig.C
s_copy *= exp(-1j*2*pi*f_shift*s_copy.fromaxis(direct))
s_orig.ft(direct)
s_copy.ft(direct)
if my_iter == 0:
logging.debug(strm("holder"))
if fl:
fl.image(s_copy.C.setaxis(indirect_dim,'#').set_units(indirect_dim,'scan #'),
ax=ax_list[3],human_units=False)
ax_list[3].set_title('after correlation\nbefore ph0 restore')
logging.debug(strm('signal energy per transient (recalc to check that it stays the same):',(abs(s_copy**2).data.sum().item() / N)))
this_E = (abs(s_copy.C.sum(indirect_dim))**2).data.sum().item() / N**2
energy_vals.append(this_E / sig_energy)
logging.debug(strm('averaged signal energy (per transient):', this_E))
if last_E is not None:
energy_diff = (this_E - last_E)/sig_energy
logging.debug(strm(energy_diff))
if abs(energy_diff) < tol and my_iter > 4:
break
last_E = this_E
if fl is not None:
fl.next('correlation convergence')
fl.plot(array(energy_vals),'x')
gca().yaxis.set_major_formatter(to_percent)
if fl is not None:
fl.image(s_copy.C.setaxis(indirect_dim,'#').set_units(indirect_dim,'scan #'),ax=ax_list[4])
ax_list[4].set_title('after correlation\nph0 restored \nsig. energy=%g'%sig_energy)
fl.pop_marker()
if avg_dim:
s_orig.chunk(avg_dim,[avg_dim,'power'],[avg_dim_len,-1])
s_orig.reorder(['ph1',avg_dim,'power',direct])
return f_shift, sigma, this_mask | 5,333,745 |
def get_fragment_mz_dict(pep, fragments, mod=None):
"""
:param pep:
:param fragments:
:param mod:
:return:
"""
mz_dict = dict()
for each_fragment in fragments:
frag_type, frag_num, frag_charge = rapid_kit.split_fragment_name(each_fragment)
mz_dict[each_fragment] = calc_fragment_mz(
pep, frag_type, frag_num, frag_charge, mod)
return mz_dict | 5,333,746 |
def remove_filters_from_files(
catfile,
physgrid=None,
obsgrid=None,
outbase=None,
physgrid_outfile=None,
rm_filters=None,
beast_filt=None,
):
"""
Remove filters from catalog, physics grid, and/or obsmodel grid. This has
two primary use cases:
1. When making simulated observations, you want to test how your fit quality
changes with different combinations of filters. In that case, put in
files for both `physgrid` and `obsgrid`. Set `rm_filters` to the
filter(s) you wish to remove, and they will be removed both from those
and from the catalog file. The three new files will be output with the
name prefix set in `outbase`.
2. When running the BEAST, you have a master physics model grid with all
filters present in the survey, but some fields don't have observations in
all of those filters. In that case, put the master grid in `physgrid`
and set `rm_filters` to None. The catalog will be used to determine the
filters to remove (if any). `obsgrid` should be left as None, because in
this use case, the obsmodel grid has not yet been generated. The output
physics model grid will be named using the filename in `physgrid_outfile`
(if given) or with the prefix in `outbase`.
Parameters
----------
catfile : string
file name of photometry catalog
physgrid : string (default=None)
If set, remove filters from this physics model grid
obsgrid : string (default=None)
If set, remove filters from this obsmodel grid
outbase : string (default=None)
Path+file to prepend to all output file names. Useful for case 1 above.
physgrid_outfile : string (default=None)
Path+name of the output physics model grid. Useful for case 2 above.
rm_filters : string or list of strings (default=None)
If set, these are the filters to remove from all of the files. If not
set, only the filters present in catfile will be retained in physgrid
and/or obsgrid.
beast_filt : list of strings
Sometimes there is ambiguity in the filter name (e.g., the grid has
both HST_ACS_WFC_F475W and HST_WFC3_F475W, and the filter name is
F475W). Set this to the BEAST filter name to resolve any
ambiguities. For example, ['HST_WFC3_F475W', 'HST_WFC3_F814W'] ensures
that these are the names used for F475W and F814W.
"""
# read in the photometry catalog
cat = Table.read(catfile)
# if rm_filters set, remove the requested filters from the catalog
if rm_filters is not None:
for cfilter in np.atleast_1d(rm_filters):
colname = "{}_rate".format(cfilter)
if colname.upper() in cat.colnames:
cat.remove_column(colname.upper())
elif colname.lower() in cat.colnames:
cat.remove_column(colname.lower())
else:
print("{} not in catalog file".format(colname))
cat.write("{}_cat.fits".format(outbase), overwrite=True)
# if rm_filters not set, extract the filter names that are present
if rm_filters is None:
cat_filters = [f[:-5].upper() for f in cat.colnames if f[-4:].lower() == "rate"]
# if beast_filt is set, make a list of the short versions
if beast_filt is not None:
beast_filt_short = [(f.split("_"))[-1].upper() for f in beast_filt]
# if physgrid set, process the SED grid
if physgrid is not None:
# read in the sed grid
g0 = SEDGrid(physgrid, backend="cache")
# extract info
filters = g0.filters
shortfilters = [(cfilter.split("_"))[-1].upper() for cfilter in filters]
rindxs = []
rgridcols = []
# loop through filters and determine what needs deleting
for csfilter, cfilter in zip(shortfilters, filters):
# --------------------------
# if the user chose the filters to remove
if rm_filters is not None:
# if the current filter is in the list of filters to remove
if csfilter in np.atleast_1d(rm_filters):
# if there's a list of BEAST instrument+filter references
if beast_filt is not None:
# if the current filter is in the list of BEAST references
if csfilter in beast_filt_short:
# if it's the same instrument, delete it
# (if it's not the same instrument, keep it)
if beast_filt[beast_filt_short.index(csfilter)] == cfilter:
rindxs.append(filters.index(cfilter))
for grid_col in g0.grid.colnames:
if cfilter in grid_col:
rgridcols.append(grid_col)
# if the current filter isn't in the BEAST ref list, delete it
else:
rindxs.append(filters.index(cfilter))
for grid_col in g0.grid.colnames:
if cfilter in grid_col:
rgridcols.append(grid_col)
# if there isn't a list of BEAST refs, delete it
else:
rindxs.append(filters.index(cfilter))
for grid_col in g0.grid.colnames:
if cfilter in grid_col:
rgridcols.append(grid_col)
# --------------------------
# if the removed filters are determined from the catalog file
if rm_filters is None:
# if the current filter is present in the catalog filters
if csfilter in cat_filters:
# if there's a list of BEAST instrument+filter references
# (if there isn't a list of BEAST refs, keep it)
if beast_filt is not None:
# if the current filter is in the list of BEAST references
# (if the current filter isn't in the BEAST ref list, keep it)
if csfilter in beast_filt_short:
# if it's not the same instrument, delete it
# (if it's the same instrument, keep it)
if beast_filt[beast_filt_short.index(csfilter)] != cfilter:
rindxs.append(filters.index(cfilter))
for grid_col in g0.grid.colnames:
if cfilter in grid_col:
rgridcols.append(grid_col)
# if the current filter isn't in the catalog filters, delete it
else:
rindxs.append(filters.index(cfilter))
for grid_col in g0.grid.colnames:
if cfilter in grid_col:
rgridcols.append(grid_col)
# delete column(s)
nseds = np.delete(g0.seds, rindxs, 1)
nlamb = np.delete(g0.lamb, rindxs, 0)
nfilters = np.delete(filters, rindxs, 0)
for rcol in rgridcols:
g0.grid.remove_column(rcol)
print("orig filters: {}".format(" ".join(filters)))
print(" new filters: {}".format(" ".join(nfilters)))
# save the modified grid
g = SEDGrid(np.array(nlamb), seds=nseds, grid=g0.grid, backend="memory")
g.header["filters"] = " ".join(nfilters)
if physgrid_outfile is not None:
g.write(physgrid_outfile)
elif outbase is not None:
g.write("{}_seds.grid.hd5".format(outbase))
else:
raise ValueError("Need to set either outbase or physgrid_outfile")
# if obsgrid set, process the observation model
if obsgrid is not None:
obsgrid = noisemodel.get_noisemodelcat(obsgrid)
with tables.open_file("{}_noisemodel.grid.hd5".format(outbase), "w") as outfile:
outfile.create_array(
outfile.root, "bias", np.delete(obsgrid["bias"], rindxs, 1)
)
outfile.create_array(
outfile.root, "error", np.delete(obsgrid["error"], rindxs, 1)
)
outfile.create_array(
outfile.root,
"completeness",
np.delete(obsgrid["completeness"], rindxs, 1),
) | 5,333,747 |
def merge_dicts(iphonecontrollers, ipadcontrollers):
"""Add ipad controllers to the iphone controllers dict, but never overwrite a custom controller with None!"""
all_controllers = iphonecontrollers.copy()
for identifier, customclass in ipadcontrollers.items():
if all_controllers.get(identifier) is None:
all_controllers[identifier] = customclass
return all_controllers | 5,333,748 |
def SaveResampled(FileName):
"""Save Resampled results in csv
Input:
- FileName: Name of output File
Save the resampled source as csv
Save all time steps
"""
resampled = FindSource('Resampled')
# save data
SaveData(FileName, proxy=resampled, WriteTimeSteps=1,Precision=8,UseScientificNotation=1) | 5,333,749 |
def p_while(p):
"""
while : WHILE ciclo_uno OP expresion CP ciclo_dos bloque ciclo_tres
"""
p[0] = [p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]] | 5,333,750 |
def test_pre_tag_newlines():
"""http://code.pediapress.com/wiki/ticket/79"""
_check_text_in_pretag("\ntext1\ntext2\n\ntext3") | 5,333,751 |
def perimRect(length,width):
"""
Compute perimiter of rectangle
>>> perimRect(2,3)
10
>>> perimRect(4, 2.5)
13.0
>>> perimRect(3, 3)
12
>>>
"""
return 2*(length+width) | 5,333,752 |
def rename_events_in_subjects(subjects, events):
"""
Goes through all subject events and renames the datetime to the event name in the corresponding
index of the events list. Uses zip so if subject information runs out before the events
get used up it's fine.
However we need to make sure we dont' go over the maximum amount of events. We pop of any
trailing subject events in that case otherwise we get a strange error from redcap about
it not being able to parse what we gave it.
"""
for key in subjects:
zipped = list(zip(subjects[key], events))
max_events = len(events)
for index, record in enumerate(zipped):
log_subject_events(subjects, key, index, events)
subjects[key][index]['redcap_event_name'] = events[index]['unique_event_name']
while len(subjects[key]) > max_events:
subjects[key].pop() | 5,333,753 |
def main():
"""Run the app."""
app.run(host='0.0.0.0', port=8000, debug=True) | 5,333,754 |
def h_search(endpoint, query, sort, order, per_page, page):
"""
- Executes search.search and returns a dictionary
of results
`return {headers,status_code,items}`
"""
current_search_params = {
"query": query,
"sort": sort,
"order": order,
"per_page": per_page,
"page": page,
}
logger.debug(f"current_search_params = {current_search_params}")
status_code, items, headers = search.search(
endpoint,
query,
sort=sort,
order=order,
page=page,
per_page=per_page,
strict=True,
)
results = {"headers": headers, "status_code": status_code, "items": items}
logger.debug(f"status_code = {status_code} num_items = {len(items)}")
return results | 5,333,755 |
def async_wrapper(fn):
"""
Wraps an async function or generator with a function which runs that generator on the thread's
event loop. The wrapped function requires an 'xloil_thread_context' argument which provides a
callback object to return a result. xlOil will pass this object automatically to functions
declared async.
This function is used by the `func` decorator and generally should not be invoked
directly.
"""
import asyncio
import traceback
@functools.wraps(fn)
def synchronised(xloil_thread_context, *args, **kwargs):
ctx = xloil_thread_context
async def run_async():
_async_caller.set(ctx.caller)
try:
# TODO: is inspect.isasyncgenfunction expensive?
if inspect.isasyncgenfunction(fn):
async for result in fn(*args, **kwargs):
ctx.set_result(result)
else:
result = await fn(*args, **kwargs)
ctx.set_result(result)
except (asyncio.CancelledError, StopAsyncIteration):
ctx.set_done()
raise
except Exception as e:
ctx.set_result(str(e) + ": " + traceback.format_exc())
ctx.set_done()
ctx.set_task(asyncio.run_coroutine_threadsafe(run_async(), ctx.loop))
return synchronised | 5,333,756 |
def add_deprecated_species_alias(registry, ftype, alias_species, species, suffix):
"""
Add a deprecated species alias field.
"""
unit_system = registry.ds.unit_system
if suffix == "fraction":
my_units = ""
else:
my_units = unit_system[suffix]
def _dep_field(field, data):
if not isinstance(data, FieldDetector):
issue_deprecation_warning(
('The "%s_%s" field is deprecated. ' + 'Please use "%s_%s" instead.')
% (alias_species, suffix, species, suffix)
)
return data[ftype, "%s_%s" % (species, suffix)]
registry.add_field(
(ftype, "%s_%s" % (alias_species, suffix)),
sampling_type="local",
function=_dep_field,
units=my_units,
) | 5,333,757 |
def docker_client():
"""
Return the current docker client in a manner that works with both the
docker-py and docker modules.
"""
try:
client = docker.from_env(version='auto', timeout=3600)
except TypeError:
# On older versions of docker-py (such as 1.9), version isn't a
# parameter, so try without it
client = docker.from_env()
client = client if not hasattr(client, 'api') else client.api
return client | 5,333,758 |
def fatal(msg, *args, **kwargs):
"""Logs thE level FATAL logging, it logs only FATAL.
Args:
msg: the message to log
"""
_logger.fatal(_log_prefix() + msg, *args, **kwargs) | 5,333,759 |
def bulk_generate_metadata(html_page: str,
description: dict=None,
enable_two_ravens_profiler=False
) -> typing.List[typing.List[dict]]:
"""
:param html_page:
:param description:
:param es_index:
:return:
"""
successed = []
hp = HTMLProcesser(html_page)
html_meta = hp.extract_description_from_meta()
for text, href in hp.generate_a_tags_from_html():
try:
cur_description = copy.deepcopy(description) or {}
if not Utils.validate_url(href):
continue
if not cur_description.get('title'):
black_list = set(text.lower().split()).intersection(TITLE_BLACK_LIST)
if not black_list:
cur_description['title'] = text.strip()
if not cur_description.get('description'):
cur_description['description'] = html_meta
cur_description['materialization_arguments'] = {'url': href}
# Not to extract html tables, otherwise there will be too many FPs:
cur_metadata = generate_metadata(cur_description, ignore_html=True,
enable_two_ravens_profiler=enable_two_ravens_profiler)
if cur_metadata:
successed.append(cur_metadata)
except Exception as e:
print(' - FAILED GENERATE METADATA ON \n\ttext = %s, \n\thref = %s \n%s' % (text, href, str(e)))
return successed | 5,333,760 |
def add_cals():
"""
Add nutrients from products.
"""
if 'username' in session:
user_obj = users_db.get(escape(session['username']))
calc = Calculator(user_obj.weight, user_obj.height,
user_obj.age, user_obj.gender, user_obj.activity)
food = request.form.get("menu")
weight = request.form.get('keyword')
pr = Product(food)
try:
weig = float(weight)
nutr = pr.choose_product(food, weig)
a.append(nutr)
return render_template('home.html', username=escape(session['username']), normas=[calc.calories_need(), calc.proteins_need(), calc.fats_need(), calc.carbohydrates_need()], vars=nutr)
except TypeError:
return "Wrong weight"
else:
return "You are not logged in" | 5,333,761 |
def parse(q):
"""http://en.wikipedia.org/wiki/Shunting-yard_algorithm"""
def _merge(output, scache, pos):
if scache:
s = " ".join(scache)
output.append((s, TOKEN_VALUE, pos - len(s)))
del scache[:]
try:
tokens = lex(q)
except Exception as e:
raise ParseError(e.message)
tokens.reverse()
scache, stack, output = list(), list(), list()
while tokens:
tup = tokens.pop()
token, token_type, pos = tup[0], tup[1], tup[2]
utoken = token.upper()
if token_type in (TOKEN_OPER, TOKEN_LOGIC):
_merge(output, scache, pos)
if stack and not (stack[-1][1] == TOKEN_PARENTHESIS
and stack[-1][0] == "("):
# compare with old token on the top of stack
top = stack[-1]
if utoken not in OPERS:
raise ParseError(
"invalid operator `%s' at position %s" % (token, pos))
p = (OPERS[utoken], OPERS[top[0]])
if ((p[0].assoc == "L" and p[0].p <= p[1].p) or
(p[0].assoc == "R" and p[0].p < p[1].p)):
output.append(stack.pop())
# push new token onto stack
if token_type == TOKEN_LOGIC:
stack.append((utoken, TOKEN_LOGIC, pos))
else:
stack.append((utoken, TOKEN_OPER, pos))
elif token_type == TOKEN_PARENTHESIS and token == "(":
_merge(output, scache, pos)
stack.append((token, TOKEN_PARENTHESIS, pos))
elif token_type == TOKEN_PARENTHESIS and token == ")":
_merge(output, scache, pos)
del scache[:]
try:
while not (stack[-1][1] == TOKEN_PARENTHESIS
and stack[-1][0] == "("):
output.append(stack.pop())
except IndexError:
raise ParseError(
"parenthesis mismatch at position %s" % (pos))
stack.pop()
else:
scache.append(token)
_merge(output, scache, pos)
if stack and stack[-1][0] == "(":
raise ParseError(
"parenthesis mismatch at position %s" % output[2])
while stack:
output.append(stack.pop())
return output | 5,333,762 |
def CheckChangeOnUpload(input_api, output_api):
"""Presubmit checks for the change on upload.
The following are the presubmit checks:
* Check change has one and only one EOL.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
# Run on upload, not commit, since the presubmit bot apparently doesn't have
# coverage or Go installed.
results.extend(_InfraTests(input_api, output_api))
results.extend(_CheckGNFormatted(input_api, output_api))
return results | 5,333,763 |
def compute_compression_rate(file: str, zip_archive) -> float:
"""Compute the compression rate of two files.
More info: https://en.m.wikipedia.org/wiki/Data_compression_ratio
:param file: the uncompressed file.
:param zip_archive the same file but compressed.
:returns the compression rate.
"""
uncompressed = Helper.Helper.get_file_size(file)
compressed = Helper.Helper.get_file_size(archive(file, zip_archive))
return uncompressed / compressed | 5,333,764 |
def set_idc_func_ex(name, fp=None, args=(), flags=0):
"""
Extends the IDC language by exposing a new IDC function that is backed up by a Python function
This function also unregisters the IDC function if 'fp' was passed as None
@param name: IDC function name to expose
@param fp: Python callable that will receive the arguments and return a tuple.
If this argument is None then the IDC function is unregistered
@param args: Arguments. A tuple of idaapi.VT_XXX constants
@param flags: IDC function flags. A combination of EXTFUN_XXX constants
@return: Boolean.
"""
global __IDC_FUNC_CTXS
# Get the context
f = __IDC_FUNC_CTXS.get(name, None)
# Unregistering?
if fp is None:
# Not registered?
if f is None:
return False
# Break circular reference
del f.cb
# Delete the name from the dictionary
del __IDC_FUNC_CTXS[name]
# Delete the context and unregister the function
return _idaapi.pyw_unregister_idc_func(f.ctxptr)
# Registering a function that is already registered?
if f is not None:
# Unregister it first
set_idc_func_ex(name, None)
# Convert the tupple argument info to a string
args = "".join([chr(x) for x in args])
# Create a context
ctxptr = _idaapi.pyw_register_idc_func(name, args, fp)
if ctxptr == 0:
return False
# Bind the context with the IdcFunc object
f = _IdcFunction(ctxptr)
# Remember the Python context
__IDC_FUNC_CTXS[name] = f
# Register IDC function with a callback
return _idaapi.py_set_idc_func_ex(
name,
f.fp_ptr,
args,
flags) | 5,333,765 |
def get_hidden_plugins() -> Dict[str, str]:
"""
Get the dictionary of hidden plugins and versions.
:return: dict of hidden plugins and their versions
"""
hidden_plugins = get_cache('cache/hidden-plugins.json')
if hidden_plugins:
return hidden_plugins
else:
return {} | 5,333,766 |
def wait_for_re_doc(coll, key, timeout=180):
"""Fetch a doc with the RE API, waiting for it to become available with a 30s timeout."""
start_time = time.time()
while True:
print(f'Waiting for doc {coll}/{key}')
results = re_client.get_doc(coll, key)
if results['count'] > 0:
break
else:
if int(time.time() - start_time) > timeout:
raise RuntimeError('Timed out trying to fetch', key)
time.sleep(1)
return results['results'][0] | 5,333,767 |
def split_year_from_week(data: pd.DataFrame) -> pd.DataFrame:
"""
Because we have used the partition key as the NFL year, the year/week need to be put into the appropriate columns
"""
data[[Stats.YEAR, Stats.NFL_WEEK]] = data[Stats.YEAR].str.split("/", expand=True)
data[Stats.NFL_WEEK] = data[Stats.NFL_WEEK].apply(lambda x: int(x.lstrip("week")))
return data | 5,333,768 |
def normalize_index(idx, shape):
"""Normalize slicing indexes
1. Replaces ellipses with many full slices
2. Adds full slices to end of index
3. Checks bounding conditions
4. Replace multidimensional numpy arrays with dask arrays
5. Replaces numpy arrays with lists
6. Posify's integers and lists
7. Normalizes slices to canonical form
Examples
--------
>>> normalize_index(1, (10,))
(1,)
>>> normalize_index(-1, (10,))
(9,)
>>> normalize_index([-1], (10,))
(array([9]),)
>>> normalize_index(slice(-3, 10, 1), (10,))
(slice(7, None, None),)
>>> normalize_index((Ellipsis, None), (10,))
(slice(None, None, None), None)
>>> normalize_index(np.array([[True, False], [False, True], [True, True]]), (3, 2))
(dask.array<array, shape=(3, 2), dtype=bool, chunksize=(3, 2), chunktype=numpy.ndarray>,)
"""
from .core import Array, from_array
if not isinstance(idx, tuple):
idx = (idx,)
# if a > 1D numpy.array is provided, cast it to a dask array
if len(idx) > 0 and len(shape) > 1:
i = idx[0]
if is_arraylike(i) and not isinstance(i, Array) and i.shape == shape:
idx = (from_array(i), *idx[1:])
idx = replace_ellipsis(len(shape), idx)
n_sliced_dims = 0
for i in idx:
if hasattr(i, "ndim") and i.ndim >= 1:
n_sliced_dims += i.ndim
elif i is None:
continue
else:
n_sliced_dims += 1
idx = idx + (slice(None),) * (len(shape) - n_sliced_dims)
if len([i for i in idx if i is not None]) > len(shape):
raise IndexError("Too many indices for array")
none_shape = []
i = 0
for ind in idx:
if ind is not None:
none_shape.append(shape[i])
i += 1
else:
none_shape.append(None)
for axis, (i, d) in enumerate(zip(idx, none_shape)):
if d is not None:
check_index(axis, i, d)
idx = tuple(map(sanitize_index, idx))
idx = tuple(map(normalize_slice, idx, none_shape))
idx = posify_index(none_shape, idx)
return idx | 5,333,769 |
def data2results(data, rr, nin=None, nout=None, ngoodrank=None, fullcalccurrent=None, fullcalc=None, selectionmaxn=None):
""" Convert analysis data to results structure """
print('do not use this function!!!!')
raise | 5,333,770 |
def relu(data):
"""Rectified linear unit.
.. math::
out = max(x, 0)
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.relu(data) | 5,333,771 |
def levsim(args):
"""Returns the Levenshtein similarity between two terms."""
term_i, term_j, j = args
return (MLEV_ALPHA * (1 - Levenshtein.distance(term_i, term_j) \
/ max(len(term_i), len(term_j)))**MLEV_BETA, term_j, j) | 5,333,772 |
def some_payloaded_data(length=1000000, size=32, var=0):
"""Generate random array with named tuples, containing random string as payload"""
for datum in some_simple_data(length):
yield DataWithPayload(datum, some_payload(size, var)) | 5,333,773 |
def retrieve_job_logs(job_id):
"""Retrieve job's logs.
:param job_id: UUID which identifies the job.
:returns: Job's logs.
"""
return JOB_DB[job_id].get('log') | 5,333,774 |
def parse_argv():
"""Parses arguments for use with the test launcher.
Arguments are:
1. Working directory.
2. Test runner, `pytest` or `nose`
3. debugSecret
4. debugPort
5. Debugger search path
6. Mixed-mode debugging (non-empty string to enable, empty string to disable)
7. Enable code coverage and specify filename
8. TestFile, with a list of testIds to run
9. Rest of the arguments are passed into the test runner.
"""
return (sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]), sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9:]) | 5,333,775 |
def play_collection(playlist_id, start_album_id, shuffle_albums: bool, device_id=None):
"""This function attempts to mimic the ability to play songs
within the context of a playlist.
Playback will start from the first track of `start_album_id`
If shuffle_albums is set to true then the list order will be randomized.
A playlist is created just for playback and then immediately torn down
after playback starts.
NOTE: start_album_id must be in the collection or undefined behavior will occur
"""
# Get the spotify user account instance
sp = _get_sp_instance()
# Get a client credentials spotify interface
sp_cc_iface = Spotify()
# Turn off user's shuffle (so the albums are actually in order!)
# NOTE: On some devices this may fail (ex: New web player tab before it has played music)
# As seen thusfar, when this command fails, the playback would have failed anyways, except silently.
# So this at least gives some indication that something went wrong (although it would be nice
# if there was a more clear explanation for the user...I'm just not really the underlying reasoning
# so I can't accurately add an error message at this time.)
sp.shuffle(False, device_id)
# Get the collection (making sure to reload albums if necessary!)
collection = sp_cc_iface.get_collection_from_playlist_id(playlist_id, reload_albums=True)
# Get a copy of the collection albums
collection_albums = copy.deepcopy(collection.albums)
# Shuffle the albums if specified
if shuffle_albums:
random.shuffle(collection_albums)
# Create temporary playlist to hold playback tracks
playback_playlist = create_playlist(f"Album Collection: {collection.name}")
try:
# If there is a 'start album' then add tracks from the start album to temporary playlist
if start_album_id is not None:
add_tracks_to_playlist(
playback_playlist.id,
next(album.track_ids for album in collection_albums if album.id == start_album_id))
# Otherwise just add tracks from the first album in the collection
else:
add_tracks_to_playlist(
playback_playlist.id,
collection_albums[0].track_ids)
# Begin playback of the playlist (only one album so far, but that's fine)
sp.start_playback(device_id=device_id, context_uri=playback_playlist.uri)
# Get list of tracks from list of albums and mark which track should be played first
playback_track_ids = []
start_album_offset = None
start_album_num_tracks = None
for album in collection_albums:
# Record start album information
if start_album_offset is None and \
(start_album_id is None or album.id == start_album_id):
start_album_offset = len(playback_track_ids)
start_album_num_tracks = album.total_tracks
# Add tracks from album to list
playback_track_ids.extend(album.track_ids)
# Add tracks from before the start album to the temporary playlist
add_tracks_to_playlist(playback_playlist.id, playback_track_ids[0:start_album_offset])
# Put start album in it's correct position at the end of the playlist as it's been built thusfar
sp.playlist_reorder_items(
playback_playlist.id,
0,
start_album_num_tracks + start_album_offset,
range_length=start_album_num_tracks)
# Add remaining tracks to the temporary playlist
add_tracks_to_playlist(playback_playlist.id, playback_track_ids[start_album_offset+start_album_num_tracks:])
except Exception as e:
raise e
finally:
# Remove the temporary playlist
sp.current_user_unfollow_playlist(playback_playlist.id) | 5,333,776 |
def _normalize_format(fmt):
"""Return normalized format string, is_compound format."""
if _is_string_or_bytes(fmt):
return _compound_format(sorted(
_factor_format(fmt.lower()))), ',' in fmt
else:
return _compound_format(sorted([_normalize_format(f)[0] for f in
fmt])), True | 5,333,777 |
def get_cevioai_version() -> str:
"""
CeVIO AIのバージョンを取得します。
Returns
-------
str
CeVIO AIのバージョン
"""
_check_cevioai_status()
return _service_control.host_version | 5,333,778 |
def get_all_revisions_available(link):
""" List all the revisions availabel for a particular link
"""
svn_username = current_user.svn_username
svn_password = current_user.svn_password
link = link.replace("dllohsr222","10.133.0.222")
args = ["svn", "log",
link,"--xml",
"--username", svn_username,
"--password", svn_password,
"--non-interactive", "--no-auth-cache"]
import subprocess
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
#logger.info(out)
from bs4 import BeautifulSoup
soup = BeautifulSoup(out, "xml")
revision_data = soup.find_all('logentry')
#logger.info(revision_data)
rev_list = []
for rev in revision_data:
rev_list.append((int(rev["revision"]),rev.msg.getText()))
#revision_list = []
##for rev in revision_data:
import operator
rev_list.sort(key=operator.itemgetter(0), reverse = True)
updated_rev_list = []
first_flag = False
for rev_data in rev_list:
if not first_flag:
updated_rev_list.append("{}->{}(Latest)".format(rev_data[0], rev_data[1]))
first_flag = True
else:
updated_rev_list.append("{}->{}".format(rev_data[0], rev_data[1]))
return updated_rev_list | 5,333,779 |
def get_explicit_kwargs_OD(f_params, bound_args, kwargs) -> OrderedDict:
"""For some call to a function f, args *arg and **kwargs,
:param f_params: inspect.signature(f).parameters
:param bound_args: inspect.signature(f).bind(*args, **kwargs)
:return: OrderedDict of the (kwd, kwargs[kwd])
for keyword parameters kwd of f that ARE explicitly passed.
Another ad-hoc little function, needed in 2 different places.
TODO (doc)tests?
"""
arguments = bound_args.arguments
return OrderedDict(
((k, kwargs[k])
for k in f_params
if k in arguments and k in kwargs)
) | 5,333,780 |
def generate_variable_formants_point_function(corpus_context, min_formants, max_formants):
"""Generates a function used to call Praat to measure formants and bandwidths with variable num_formants.
Parameters
----------
corpus_context : :class:`~polyglot.corpus.context.CorpusContext`
The CorpusContext object of the corpus.
min_formants : int
The minimum number of formants to measure with on subsequent passes (default is 4).
max_formants : int
The maximum number of formants to measure with on subsequent passes (default is 7).
Returns
-------
formant_function : Partial function object
The function used to call Praat.
"""
max_freq = 5500
script_dir = os.path.dirname(os.path.abspath(__file__))
script = os.path.join(script_dir, 'multiple_num_formants.praat')
formant_function = PraatAnalysisFunction(script, praat_path=corpus_context.config.praat_path,
arguments=[0.01, 0.025, min_formants, max_formants, max_freq])
formant_function._function._output_parse_function = parse_multiple_formant_output
return formant_function | 5,333,781 |
def handle_player_dead_keys(key):
"""
The set of keys for a dead player.
Can only see the inventory and toggle fullscreen.
"""
key_char = chr(key.c) if key.vk == libtcod.KEY_CHAR else ""
if key_char == 'i':
return {'show_inventory': True}
if key.vk == libtcod.KEY_ENTER and key.lalt:
# Alt+Enter: toggle full screen
return {'fullscreen': True}
elif key.vk == libtcod.KEY_ESCAPE:
# Exit the menu
return {'exit': True}
return {} | 5,333,782 |
def safe_std(values):
"""Remove zero std values for ones."""
return np.array([val if val != 0.0 else 1.0 for val in values]) | 5,333,783 |
def skinPercent(q=1,ib="float",nrm=1,prw="float",r=1,rtd=1,t="string",tmw="string",tv="[string, float]",v=1,zri=1):
"""
http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/skinPercent.html
-----------------------------------------
skinPercent is undoable, queryable, and NOT editable.
This command edits and queries the weight values on members of a skinCluster
node, given as the first argument. If no object components are explicitly
mentioned in the command line, the current selection list is used.
Note that setting multiple weights in a single invocation of this command is
far more efficient than calling it once per weighted vertex.
-----------------------------------------
Return Value:
None
In query mode, return type is based on queried flag.
-----------------------------------------
Flags:
-----------------------------------------
ib : ignoreBelow [float] ['query']
Limits the output of the -value and -transform queries to the entries whose weight values are over the specified limit. This flag has to be used before the -query flag. In query mode, this flag needs a value.
-----------------------------------------
nrm : normalize [boolean] []
If set, the weights not assigned by the -transformValue flag are normalized so that the sum of the all weights for the selected object component add up to 1. The default is on. NOTE: The skinCluster has a normalizeWeights attribute which when set to OFF overrides this attribute! If the skinCluster.normalizeWeights attribute is OFF, you must set it to Interactive in order to normalize weights using the skinPercent command.
-----------------------------------------
prw : pruneWeights [float] []
Sets to zero any weight smaller than the given value for all the selected components. To use this command to set all the weights to zero, you must turn the -normalize flag "off" or the skinCluster node will normalize the weights to sum to one after pruning them. Weights for influences with a true value on their "Hold Weights" attribute will not be pruned.
-----------------------------------------
r : relative [boolean] []
Used with -transformValue to specify a relative setting of values. If -relative is true, the value passed to -tv is added to the previous value. Otherwise, it replaces the previous value.
-----------------------------------------
rtd : resetToDefault [boolean] []
Sets the weights of the selected components to their default values, overwriting any custom weights.
-----------------------------------------
t : transform [string] ['query']
In Mel, when used after the -query flag (without an argument) the command returns an array of strings corresponding to the names of the transforms influencing the selected object components. If used before the -query flag (with a transform name), the command returns the weight of the selected object component corresponding to the given transform. The command will return an average weight if several components have been selected. In Python, when used with None instead of the name of the transform, the command returns an array of strings corresponding to the names of the transforms influencing the selected object components. If used with a transform name, the command returns the weight of the selected object. The command will return an average weight if several components have been selected. In query mode, this flag can accept a value.
-----------------------------------------
tmw : transformMoveWeights [string] []
This flag is used to transfer weights from a source influence to one or more target influences. It acts on the selected vertices. The flag must be used at least twice to generate a valid command. The first flag usage indicates the source influence from which the weights will be copied. Subsequent flag usages indicate the target influences.
-----------------------------------------
tv : transformValue [[string, float]] []
Accepts a pair consisting of a transform name and a value and assigns that value as the weight of the selected object components corresponding to the given transform.
-----------------------------------------
v : value [boolean] ['query']
Returns an array of doubles corresponding to the joint weights for the selected object component.
-----------------------------------------
zri : zeroRemainingInfluences [boolean]
If set, the weights not assigned by the -transformValue flag are set to 0. The default is off.
""" | 5,333,784 |
def _munge_source_data(data_source=settings.NETDEVICES_SOURCE):
"""
Read the source data in the specified format, parse it, and return a
:param data_source:
Absolute path to source data file
"""
log.msg('LOADING FROM: ', data_source)
kwargs = parse_url(data_source)
path = kwargs.pop('path')
return loader.load_metadata(path, **kwargs) | 5,333,785 |
def copy_global_to_local_blacklist(excluded_testcase=None):
"""Copies contents of global blacklist into local blacklist file, excluding
a particular testcase (if any)."""
lsan_suppressions_path = get_local_blacklist_file_path()
excluded_function_name = (
get_leak_function_for_blacklist(excluded_testcase)
if excluded_testcase else None)
# The local suppressions file should always have a comment on top
# to prevent parsing errors.
with open(lsan_suppressions_path, "wb") as local_blacklist:
local_blacklist.write("# This is a LSAN suppressions file.\n")
# Copy global blacklist into local blacklist.
global_blacklists = data_types.Blacklist.query(
data_types.Blacklist.tool_name == LSAN_TOOL_NAME)
blacklisted_functions = []
for blacklist in global_blacklists:
if blacklist.function_name in blacklisted_functions:
continue
if blacklist.function_name == excluded_function_name:
continue
local_blacklist.write(
LSAN_SUPPRESSION_LINE.format(function=blacklist.function_name))
blacklisted_functions.append(blacklist.function_name) | 5,333,786 |
def create_app(settings_override=None):
"""
Create a flask application using the app factory pattern
:return: Flask app
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('config.settings')
app.config.from_pyfile('settings.py', silent=True)
if settings_override:
app.config.update(settings_override)
app.register_blueprint(user)
extensions(app)
return app | 5,333,787 |
def convex_hull(ps: Polygon) -> Polygon:
"""Andrew's algorithm"""
def construct(limit, start, stop, step=1):
for i in range(start, stop, step):
while len(res) > limit and cross(res[-1] - res[-2], s_ps[i] - res[-1]) < 0:
res.pop()
res.append(s_ps[i])
assert len(ps) >= 3
s_ps = sorted(ps)
N = len(s_ps)
res: Polygon = []
construct(1, 0, N)
construct(len(res), N - 2, -1, -1)
return res[:-1] | 5,333,788 |
def get_api_file_url(file_id):
"""Get BaseSpace API file URL."""
api_url = get_api_url()
return f'{api_url}/files/{file_id}' | 5,333,789 |
def Tr(*content, **attrs):
"""
Wrapper for tr tag
>>> Tr().render()
'<tr></tr>'
"""
return KWElement('tr', *content, **attrs) | 5,333,790 |
def _read_storm_locations_one_time(
top_tracking_dir_name, valid_time_unix_sec, desired_full_id_strings):
"""Reads storm locations at one time.
K = number of storm objects desired
:param top_tracking_dir_name: See documentation at top of file.
:param valid_time_unix_sec: Valid time.
:param desired_full_id_strings: length-K list of full storm IDs. Locations
will be read for these storms only.
:return: desired_latitudes_deg: length-K numpy array of latitudes (deg N).
:return: desired_longitudes_deg: length-K numpy array of longitudes (deg E).
"""
spc_date_string = time_conversion.time_to_spc_date_string(
valid_time_unix_sec)
desired_times_unix_sec = numpy.full(
len(desired_full_id_strings), valid_time_unix_sec, dtype=int
)
tracking_file_name = tracking_io.find_file(
top_tracking_dir_name=top_tracking_dir_name,
tracking_scale_metres2=DUMMY_TRACKING_SCALE_METRES2,
source_name=tracking_utils.SEGMOTION_NAME,
valid_time_unix_sec=valid_time_unix_sec,
spc_date_string=spc_date_string, raise_error_if_missing=True)
print('Reading storm locations from: "{0:s}"...'.format(tracking_file_name))
storm_object_table = tracking_io.read_file(tracking_file_name)
desired_indices = tracking_utils.find_storm_objects(
all_id_strings=storm_object_table[
tracking_utils.FULL_ID_COLUMN].values.tolist(),
all_times_unix_sec=storm_object_table[
tracking_utils.VALID_TIME_COLUMN].values,
id_strings_to_keep=desired_full_id_strings,
times_to_keep_unix_sec=desired_times_unix_sec, allow_missing=False)
desired_latitudes_deg = storm_object_table[
tracking_utils.CENTROID_LATITUDE_COLUMN].values[desired_indices]
desired_longitudes_deg = storm_object_table[
tracking_utils.CENTROID_LONGITUDE_COLUMN].values[desired_indices]
return desired_latitudes_deg, desired_longitudes_deg | 5,333,791 |
def ja_il(il, instr):
"""
Returns llil expression to goto target of instruction
:param il: llil function to generate expression with
:param instr: instruction to pull jump target from
:return: llil expression to goto target of instr
"""
label = valid_label(il, instr.ja_target)
return il.goto(label) | 5,333,792 |
def create_plot_durations_v_nrows(source, x_axis_type='log', x_range=(1, 10**5),
y_axis_type='log', y_range=(0.001, 10**3)):
"""
Create a Bokeh plot (Figure) of do_query_dur and stream_to_file_dur versus num_rows.
num_rows is the number of result rows from the query.
Parameters
----------
source : ColumnDataSource
Bokeh data source containing the navostats data
x_axis_type : str
auto, linear, log, datetime, or mercator
x_range : tuple (min, max)
The range of values to display on the x axis. When x_axis_type is 'log',
it helps if the endpoints are exact powers of 10.
y_axis_type : str
auto, linear, log, datetime, or mercator
y_range : tuple (min, max)
The range of values to display on the y axis. When y_axis_type is 'log',
it helps if the endpoints are exact powers of 10.
Returns
-------
plotting.figure
A Bokeh plot that can be shown.
"""
# create a new plot with a datetime axis type
p = plotting.figure(plot_width=500, plot_height=500,
x_axis_type=x_axis_type, x_range=x_range,
y_axis_type=y_axis_type, y_range=y_range)
hover = create_hover()
p.add_tools(hover)
# add renderers
qt_rend = p.circle(x="num_rows", y="do_query_dur", source=source, size=4, color='red', alpha=0.2)
dt_rend = p.circle(x="num_rows", y="stream_to_file_dur", source=source, size=4, color='green', alpha=0.2)
legend = Legend(items=[
("Query Duration", [qt_rend]),
("Download Duration", [dt_rend])
], location=(0, 40), click_policy='hide')
p.add_layout(legend, 'below')
p.title.text = 'Query and Download Durations v. # of Rows'
p.xaxis.axis_label = '# of Rows'
p.yaxis.axis_label = 'Durations (s)'
return p | 5,333,793 |
def listen_and_transcribe(length=0, silence_len=0.5):
"""
Listen with the given parameters, but simulaneously stream the audio to the
Aurora API, transcribe, and return a Text object. This reduces latency if
you already know you want to convert the speech to text.
:param length the length of time (seconds) to record for. If 0, it will record indefinitely, until the specified amount of silence
:type length float
:param silence_len the amount of silence (seconds) to allow before stoping (ignored if length != 0)
:type silence_len float
"""
from auroraapi.text import Text
return Text(get_stt(functools.partial(stream, length, silence_len), stream=True)["transcript"]) | 5,333,794 |
def load_test_dataframes(feature_folder, **kwargs):
"""
Convenience function for loading unlabeled test dataframes. Does not add a 'Preictal' column.
:param feature_folder: The folder to load the feature data from.
:param kwargs: keyword arguments to use for loading the features.
:return: A DataFrame of unlabeled test data without a 'Preictal' column.
"""
test = load_feature_files(feature_folder,
class_name="test",
# Never use sliding frames for the test-data
sliding_frames=False,
**kwargs)
test.sortlevel('segment', inplace=True)
if isinstance(test.columns, pd.MultiIndex):
test.sortlevel(axis=1, inplace=True)
return test | 5,333,795 |
def _match_grid(grid):
"""
given a grid, create the other side to obey:
one p1 black must be a p2 green, tan, and black; and vice versa
"""
l = [""] * 25
color_dict = {
"b": [i for i in range(25) if grid[i] == "b"],
"t": [i for i in range(25) if grid[i] == "t"],
"g": [i for i in range(25) if grid[i] == "g"]
}
random.shuffle(color_dict["b"])
l[color_dict["b"][0]] = "b"
l[color_dict["b"][1]] = "g"
l[color_dict["b"][2]] = "t"
x = random.choice(color_dict["g"])
color_dict["g"].remove(x)
l[x] = "b"
l[random.choice(color_dict["t"])] = "b"
# one green is the other player's black
# three are the other players greens
remain = {i for i in range(25) if l[i] == ""}
s = random.sample(remain.difference(set(color_dict["g"])), _NUM_GREENS - 4)
s += random.sample(color_dict["g"], 3)
for i in remain:
if i in s: l[i] = "g"
else: l[i] = "t"
return _Player_Grid(l) | 5,333,796 |
def extract_frames(subject_action):
"""
This function is to save video frames and save their paths along with
2D/3D keypoints and bounding box coordinates in a .npz file.
Can be used for PEBRT.
"""
tabs = {}
for s in subject_action.keys():
for action in subject_action[s]:
v = Video(s, action, 54138969)
data = v.save(save_img=False, save_npz=True)
category = s + "/" + action
tabs[category] = data
print("Merging all npz files.")
filename = "./h36m/data_h36m_frame_all"
np.savez_compressed(filename, **tabs)
print("saved {}.npz".format(filename))
print("Done.") | 5,333,797 |
def decorate(subplot=None, **params):
"""Decorate graph using parameters
Args:
subplot (matplotlib.axes._subplots.AxesSubplot)
**params: <deco_param_descr>
"""
subplot = plt.gca() if subplot is None else subplot
params, font_params = splitparams_(params)
if params.xlabel:
subplot.set_xlabel(params['xlabel'])
subplot.xaxis.label.set_fontproperties(
FontProperties(**font_params['xlabel'])
)
if params['ylabel']:
subplot.set_ylabel(params['ylabel'])
subplot.yaxis.label.set_fontproperties(
FontProperties(**font_params['ylabel'])
)
if params['title']:
plt.title(params['title'])
subplot.title.set_fontproperties(
FontProperties(**font_params['title'])
)
if params['w_grid']:
plt.grid(True)
for label in subplot.get_xticklabels():
label.set_fontproperties(
FontProperties(**font_params['xticks'])
)
for label in subplot.get_yticklabels():
label.set_fontproperties(
FontProperties(**font_params['yticks'])
)
if subplot.get_legend_handles_labels()[0]:
prop = FontProperties(**font_params['legend'])
subplot.legend(**params['legend'], prop=prop) | 5,333,798 |
def add_vary_callback_if_cookie(*varies):
"""Add vary: cookie header to all session responses.
Prevent downstream web serves to accidentally cache session set-cookie reponses,
potentially resulting to session leakage.
"""
def inner(request, response):
vary = set(response.vary if response.vary is not None else [])
vary |= set(varies)
response.vary = vary
return inner | 5,333,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.