content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def substitute_variables(model_params_variables, model_data_raw):
"""
:param model_params_variables:
:param model_data_raw:
:return:
"""
model_data_list = []
for argument_raw in model_data_raw:
argument_split_raw = argument_raw.split(",")
argument_split = []
for parameter_raw in filter(lambda x: x != "", argument_split_raw):
parameter = parameter_raw.strip()
if parameter in model_params_variables:
argument_split.append(model_params_variables[parameter])
else:
if "inst" in parameter:
last_time_str = model_data_list[-1][1]
last_time = float(last_time_str)
new_time = last_time + 1
new_time_str = str(new_time)
argument_split.append(new_time_str)
elif parameter.startswith("-e") and "_" in parameter:
flag = parameter.split("_")[0]
argument_split.append(flag)
else:
argument_split.append(parameter)
model_data_list.append(argument_split)
# generate clean model data
model_data = []
for argument in model_data_list:
model_data.append(",".join(argument))
return model_data | bb34bc44f9f4c633d5396fde31bf8ece5cd163c6 | 3,630,800 |
def getFirstDay(curDate, curWeek):
"""get first day of the first week"""
assert(curWeek >= 1)
curDate -= timedelta(weeks=curWeek-1)
curDate -= timedelta(days=curDate.weekday())
return curDate | b9b2de6040cc655aa309a763046300cd88d92ab1 | 3,630,801 |
from typing import List
from re import T
from typing import Callable
from typing import Optional
import click
def select_from_list(data: List[T], name: Callable[[T], str], prompt: str) -> Optional[T]:
"""Interactively selects named entity from given list"""
names: List[str] = list(map(name, data))
prompt = f'{prompt}: [0-{len(names)}]'
while True:
print_selection_list(names)
pos: int = click.prompt(prompt, type=INT)
if pos < 0 or pos > len(names):
print('Invalid number.')
continue
if pos == 0:
return None
return data[pos - 1] | cb76336780d5efb6b3f2d6d71cc210c5743c2bb6 | 3,630,802 |
def gram_matrix(features, normalize=True):
"""
Compute the Gram matrix from features.
Inputs:
- features: Tensor of shape (1, H, W, C) giving features for
a single image.
- normalize: optional, whether to normalize the Gram matrix
If True, divide the Gram matrix by the number of neurons (H * W * C)
Returns:
- gram: Tensor of shape (C, C) giving the (optionally normalized)
Gram matrices for the input image.
"""
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
n, h, w, c = tf.shape(features)
f = tf.reshape(tf.transpose(features[0], [2, 0, 1]), (c, h*w))
f = tf.transpose(f, [1, 0])
G = tf.linalg.matmul(tf.transpose(f, [1, 0]), f)
if normalize:
G = G / tf.cast(h * w * c, tf.float32)
return G
co = tf.reshape(tf.transpose(content_original[0], [2, 0, 1]), (c, h*w))
return content_weight * tf.reduce_sum(tf.math.square(tf.transpose(cc, (1, 0)) - tf.transpose(co, (1, 0))))
n, h, w, c = features.shape
f = np.moveaxis(features, -1, 1).reshape(n, c, -1)
f = np.moveaxis(f, -1, 1)
G = np.matmul(f[0].T, f[0])
# G = np.empty((f.shape[2], f.shape[2]))
# G[:] = np.nan
# for i in range(f.shape[2]):
# for j in range(f.shape[2]):
# G[i, j] = np.sum(f[0][:, i]*f[0][:, j])
if normalize:
G = G / (h * w * c)
return G
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** | 094dcdf8e3ee65fcc9ec19d226062fbb9ac01781 | 3,630,803 |
import os
def project_path(*relative_paths):
"""Full path corresponding to 'relative_paths' components"""
return os.path.join(MetaDefs.project_dir, *relative_paths) | 19027b20a31ed976b115d209795d4a123dfea66f | 3,630,804 |
import tempfile
import subprocess
def get_html_docker(url: str) -> str:
"""Returns the rendered HTML at *url* as a string"""
cmd = ['docker',
'container',
'run',
'--rm',
'zenika/alpine-chrome',
'--no-sandbox',
'--dump-dom',
str(url)
]
with tempfile.NamedTemporaryFile(suffix='.html') as fp:
p = subprocess.run(cmd,
stdout=fp,
stderr=subprocess.STDOUT,
)
if p.returncode != 0:
raise OSError(f"Command failed [{p.returncode}]:\n{' '.join(cmd)}")
with open(fp.name, 'rb') as fout:
html_doc = fout.read().decode('utf8')
# Clean up the cmd's previous print statements
# html_doc = html_doc[html_doc.find('<html>'):].strip()
if not html_doc:
raise OSError(f"No HTML could be obtained for {url}")
return html_doc | 2980e35337f572daca7a16f2694620ca5c02aa90 | 3,630,805 |
def convert_from_pj_fat_choice(py_json):
"""
Convert the given py_json into a PlayerFeedVegetarian
:param py_json: the PyJSON to convert
:type py_json: PyJSON
:return: the equivalent PlayerFeedVegetarian
:rtype: PlayerFeedVegetarian
"""
[species_index, fat_to_store] = py_json
if not is_natural_plus(fat_to_store):
raise ConvertPyJSONError("Error Converting Fat Tissue Choice")
return PlayerStoreFat(species_index, fat_to_store) | 198914610dbe0b0810ebad1bcb1dcc1db1e78938 | 3,630,806 |
from typing import OrderedDict
def marshal(data, fields, envelope=None):
"""Takes raw data (in the form of a dict, list, object) and a dict of
fields to output and filters the data based on those fields.
:param data: the actual object(s) from which the fields are taken from
:param fields: a dict of whose keys will make up the final serialized
response output
:param envelope: optional key that will be used to envelop the serialized
response
>>> from sanic_restful_api import fields, marshal
>>> data = { 'a': 100, 'b': 'foo' }
>>> mfields = { 'a': fields.Raw }
>>> marshal(data, mfields)
OrderedDict([('a', 100)])
>>> marshal(data, mfields, envelope='data')
OrderedDict([('data', OrderedDict([('a', 100)]))])
"""
def make(cls):
if isinstance(cls, type):
return cls()
return cls
if isinstance(data, (list, tuple)):
return (OrderedDict([(envelope, [marshal(d, fields) for d in data])])
if envelope else [marshal(d, fields) for d in data])
items = ((k, marshal(data, v)
if isinstance(v, dict) else make(v).output(k, data))
for k, v in fields.items())
return OrderedDict(
[(envelope, OrderedDict(items))]) if envelope else OrderedDict(items) | 502fb7b91ff701f3390aae8db5a9463ad792f6ba | 3,630,807 |
def _generator2(path):
"""
Args:
path: path of the dataframe
Returns:
yield outputs of X and Y pairs
"""
args = init_args()
catalog = load_catalog(path)
def preprocess(x):
zero = False
if not np.any(x):
zero = True
img = (x - avg_x) / std_x
return img, zero
print("starting generator again...")
unique_paths = pd.unique(catalog['hdf5_8bit_path'].values.ravel())
# print(unique_paths,type(unique_paths))
epochs = args.epochs
zero_img_count = 0
k_sequences = 0
GHI_sequence_steps = [4, 8, 12] # in the future, in addition to T0
GHI_sequence_steps_reverse = [24, 20, 12, 0]
img_sequence_step = 2
for i in range(1):
np.random.shuffle(unique_paths)
# print(shuffled)
for path in unique_paths:
# samples = fetch_all_samples_hdf5(args,path)
try:
samples = load_numpy(path)
except Exception as e:
continue
X = []
Y = []
grouped = catalog[path == catalog.hdf5_8bit_path]
for station in args.station_data.keys():
# print("I am here")
df = grouped[grouped.station == station]
argsort = np.argsort(df['hdf5_8bit_offset'].values)
offsets_0 = df['hdf5_8bit_offset'].values[argsort]
matching_offsets_imgs = offsets_0
for i in range(k_sequences):
matching_offsets_imgs = np.intersect1d(
matching_offsets_imgs, matching_offsets_imgs + img_sequence_step)
# print("matching offsets",matching_offsets_imgs)
# For GHIs
matching_offsets_GHIs = matching_offsets_imgs
for GHI_sequence_step in GHI_sequence_steps:
matching_offsets_GHIs = np.intersect1d(
matching_offsets_GHIs, matching_offsets_GHIs + GHI_sequence_step)
# print("matching offsets_GHIS",matching_offsets_GHIs)
GHI_pairs_list = []
CS_GHI_pairs_list = []
y_pairs_list = []
for i, GHI_sequence_step in enumerate(
GHI_sequence_steps_reverse):
GHI_vals = df[df.hdf5_8bit_offset.isin(
matching_offsets_GHIs - GHI_sequence_step)].GHI.values
CS_GHI_vals = df[df.hdf5_8bit_offset.isin(
matching_offsets_GHIs - GHI_sequence_step)].CLEARSKY_GHI.values
GHI_pairs_list.append(GHI_vals)
CS_GHI_pairs_list.append(CS_GHI_vals)
y = CS_GHI_vals - GHI_vals
y_pairs_list.append(y)
GHI_pairs = zip(*GHI_pairs_list)
CS_GHI_pairs = zip(*CS_GHI_pairs_list)
y_pairs = zip(*y_pairs_list)
# iso_dt = df[df.hdf5_8bit_offset.isin(matching_offsets_imgs)]['iso-datetime'].tolist()
# date_time_attrs = [get_datetime_attrs(dt) for dt in iso_dt]
offsets_pairs_list = []
for i in range(k_sequences + 1):
offsets_pairs_list.append(
matching_offsets_imgs - (k_sequences + i) * img_sequence_step)
offsets_pairs_list.append(matching_offsets_imgs)
offset_pairs = zip(*offsets_pairs_list)
GHIs_0 = df[df.hdf5_8bit_offset.isin(offsets_0)].GHI.values
CS_GHI_0 = df[df.hdf5_8bit_offset.isin(
offsets_0)].CLEARSKY_GHI.values
y_0 = CS_GHI_0 - GHIs_0
# example_pair = zip(offset_pairs, date_time_attrs, CS_GHI_pairs, GHI_pairs)
# print(list(offset_pairs), list(CS_GHI_pairs), list(GHI_pairs), list(y_pairs))
example_pair = zip(
offset_pairs, CS_GHI_pairs, GHI_pairs, y_pairs)
# if not (len(offset_pairs) == len(CS_GHI_pairs) == len(GHI_pairs) == len(y_pairs)):
# print("golmaal hai bhai sab golmaal hai")
# print(list(example_pair))
sample = samples[station]
for offsets, CS_GHIs, GHIs, ys in example_pair:
imgs = []
for offset in offsets:
img = sample[offset].swapaxes(0, 1).swapaxes(1, 2)
img, status = preprocess(img)
imgs.append(img)
# img_1 = sample[offset_1].swapaxes(0,1).swapaxes(1,2)
# img_0 = sample[offset_0].swapaxes(0,1).swapaxes(1,2)
if k_sequences == 0:
imgs = imgs[0]
if False:
a = (imgs, date_time_pair, CS_GHIs)
yield (imgs, date_time_pair, CS_GHIs), (GHIs)
else:
# print("yielding")
X.append(imgs)
Y.append(ys)
# yield imgs, ys
# np.random.shuffle(X)
# np.random.shuffle(Y)
X, Y = shuffle(X, Y, random_state=0)
for i, j in zip(X, Y):
yield i, j
print("Zero img count:", zero_img_count) | 86d87ce4b53fa6c0d57a2f144542484803447625 | 3,630,808 |
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from typing import Counter
def main():
"""
Get the urls from receita website (to see structure of dict -- see tests)
:return: dict with urls from files as well as last modified date and size in bytes
"""
# get page content
page = requests.get(CORE_URL_FILES)
# BeautifulSoup object
soup = BeautifulSoup(page.text, 'html.parser')
table = soup.find('table')
rows = table.find_all('tr')
list_last_modified_at = []
print('creating dict files url')
for row in rows:
if row.find_all('td'):
if row.find_all('td')[1].find('a')['href'].endswith('.zip'):
# get last modified time and parse to date (ex: '2021-07-19')
list_last_modified_at.append(
datetime.strptime(row.find_all('td')[2].text.strip(), '%Y-%m-%d %H:%M').strftime(
'%Y-%m-%d'))
# get the most common on 'last_modified' from source
ref_date, occurences = Counter(list_last_modified_at).most_common(1)[0]
print(
f"ref date will be: '{ref_date}' with {occurences} out of {len(list_last_modified_at)} ({occurences / len(list_last_modified_at):.1%}) ")
return ref_date | a998b014f08fbe4912c1178cd1c2412b3adf2d9c | 3,630,809 |
def dropDuplicatedResponses(dfResponses, dupIdColumns=None, returnDups=False):
"""
Take a parsed response data frame, returns a data frame after dropping duplications
:param dfResponses: data frame with responses
:param dupIdColumns: a list of column names to keep track when we identify duplications.
:param returnDups: optionally return duplicated records instead of non-duplicated; default to False
:return: df with duplications eliminated, unless returnDups == True, in which case return only dups
"""
assert (isinstance(dfResponses, pd.DataFrame))
if dupIdColumns is None:
dupIdColumns = ['BookletNumber', 'AccessionNumber']
assert (label in dfResponses.columns for label in dupIdColumns)
# looking for duplicated responses
if returnDups:
return dfResponses.loc[dfResponses.duplicated(dupIdColumns, keep=False)].sort_values(dupIdColumns)
else:
return dfResponses.drop_duplicates(dupIdColumns) | 15428b6d8938bbd1d3d3dc9e1002206b10a902f6 | 3,630,810 |
def load(model_name):
"""Loads and returns pickle File
"""
return load_file(model_name) | d8e162104294252494c07335373353ec2bcbc2f0 | 3,630,811 |
def get_policy_acc(graph, values):
""" compute the accuracy of policy predictions (per graph averaged manner)
:param graph: dgl.graph; possibly batched
:param values: (predicted) state values
:return:
"""
policy = get_policy(graph, values)
with graph.local_scope():
graph.ndata['correct'] = (graph.ndata['policy'] == policy).float()
accs = dgl.readout_nodes(graph, 'correct', op='mean') # [batch size x 1]
mean = accs.mean().item()
std = accs.std().item()
return mean, std, accs | fd2cb9741982b6478c34cbeb9b7c305371533974 | 3,630,812 |
def domain_domain_pair_association(domain_type_dict, opposite_type_dict={'T': 'AT', 'AT': 'T'}):
"""
Compute domain domain association.
domain_type_dict is a {domain_name:{T:[gene_ids], AT:[gene_ids]} ... }
"""
domain_domain_dict = {}
for domain, type2genes in domain_type_dict.items():
domain_dict = domain_domain_dict.setdefault(domain, {})
for domain_next, type2genes_next in domain_type_dict.items():
if domain_next in domain_dict:
continue
domain_dict_next = domain_domain_dict.setdefault(domain_next, {})
pairs = []
for type, opposite_type in opposite_type_dict.items():
genes = type2genes.setdefault(type, [])
genes_next = type2genes_next.setdefault(opposite_type, [])
pairs += list(set(genes_next) & set(genes))
if len(pairs) > 0:
domain_dict[domain_next] = pairs
domain_dict_next[domain] = pairs
return domain_domain_dict | 1dea69154132af8e39b4119a307e38bde8269160 | 3,630,813 |
def pull_jhu_data(base_url: str, metric: str, pop_df: pd.DataFrame) -> pd.DataFrame:
"""Pulls the latest Johns Hopkins CSSE data, and conforms it into a dataset
The output dataset has:
- Each row corresponds to (County, Date), denoted (FIPS, timestamp)
- Each row additionally has a column `new_counts` corresponding to the new
new_counts (either `confirmed` cases or `deaths`), and a column
`cumulative_counts`, correspond to the aggregate metric from January 22nd
(as of April 27th) until the latest date.
Note that the raw dataset gives the `cumulative_counts` metric, from which
we compute `new_counts` by taking first differences. Hence, `new_counts`
may be negative. This is wholly dependent on the quality of the raw
dataset.
We filter the data such that we only keep rows with valid FIPS, or "FIPS"
codes defined under the exceptions of the README. The current exceptions
include:
- 70002: Dukes County and Nantucket County in Massachusetts, which are
reported together
- 70003: Kansas City, Missouri, which reports counts separately from the
four counties it intesects (Platte, Cass, Clay, Jackson Counties)
Parameters
----------
base_url: str
Base URL for pulling the JHU CSSE data
metric: str
One of 'confirmed' or 'deaths'.
pop_df: pd.DataFrame
Read from static file "fips_population.csv".
Returns
-------
pd.DataFrame
Dataframe as described above.
"""
# Read data
df = pd.read_csv(base_url.format(metric=metric))
# FIPS are missing for some nonstandard FIPS
date_cols = [col_name for col_name in df.columns if detect_date_col(col_name)]
keep_cols = date_cols + ['UID']
df = df[keep_cols]
df = df.melt(
id_vars=["UID"],
var_name="timestamp",
value_name="cumulative_counts",
)
df["timestamp"] = pd.to_datetime(df["timestamp"])
gmpr = GeoMapper()
df = gmpr.replace_geocode(df, "jhu_uid", "fips", from_col="UID", date_col="timestamp")
# Merge in population LOWERCASE, consistent across confirmed and deaths
# Set population as NAN for fake fips
pop_df.rename(columns={'FIPS':'fips'}, inplace=True)
pop_df['fips'] = pop_df['fips'].astype(int).\
astype(str).str.zfill(5)
df = pd.merge(df, pop_df, on="fips", how='left')
# Add a dummy first row here on day before first day
# code below could be cleaned with groupby.diff
min_ts = min(df["timestamp"])
df_dummy = df.loc[df["timestamp"] == min_ts].copy()
df_dummy.loc[:, "timestamp"] = min_ts - pd.Timedelta(days=1)
df_dummy.loc[:, "cumulative_counts"] = 0
df = pd.concat([df_dummy, df])
# Obtain new_counts
df.sort_values(["fips", "timestamp"], inplace=True)
df["new_counts"] = df["cumulative_counts"].diff() # 1st discrete difference
# Handle edge cases where we diffed across fips
mask = df["fips"] != df["fips"].shift(1)
df.loc[mask, "new_counts"] = np.nan
df.reset_index(inplace=True, drop=True)
# Final sanity checks
days_by_fips = df.groupby("fips").count()["cumulative_counts"].unique()
unique_days = df["timestamp"].unique()
# each FIPS has same number of rows
if (len(days_by_fips) > 1) or (days_by_fips[0] != len(unique_days)):
raise ValueError("Differing number of days by fips")
min_timestamp = min(unique_days)
max_timestamp = max(unique_days)
n_days = (max_timestamp - min_timestamp) / np.timedelta64(1, "D") + 1
if n_days != len(unique_days):
raise ValueError(
f"Not every day between {min_timestamp} and "
"{max_timestamp} is represented."
)
return df.loc[
df["timestamp"] >= min_ts,
[ # Reorder
"fips",
"timestamp",
"population",
"new_counts",
"cumulative_counts",
],
] | b11d852498df4efdebaf15dba2f56f06666a0928 | 3,630,814 |
def dict_from_graph_attr(graph, attr, array_values=False):
"""
Parameters
----------
graph : networkx.Graph
attr : str, iterable, or dict
If str, then it specifies the an attribute of the graph's nodes.
If iterable of strings, then multiple attributes of the graph's nodes
are specified.
If dict, then each key is a node and each value the corresponding
attribute value. (This format is also this function's return format.)
array_values : bool, default: False
If True, then each value is transformed into a :class:`numpy.ndarray`.
Returns
-------
result_dict : dict
Each key is a node in the graph.
If `array_values` is False, then each value is a list of attribute
values corresponding to the key node.
If `array_values` is True, then each value this list of attribute
values is turned into a :class:`numpy.ndarray`. That requires the
values to be shape-compatible for stacking.
Examples
--------
>>> import networkx as nx
>>> edges = [(0, 1), (1, 2), # 0 | 1 | 2
... (0, 3), (1, 4), (2, 5), # ---------
... (3, 4), (4,5)] # 3 | 4 | 5
>>> graph = nx.Graph(edges)
>>> data_dict = {node: 10*node for node in graph}
>>> nx.set_node_attributes(graph, data_dict, "test_data")
>>> desired = {key: [value] for key, value in data_dict.items()}
>>> dict_from_graph_attr(graph, "test_data") == desired
True
>>> dict_from_graph_attr(graph, ["test_data"]) == desired
True
"""
if isinstance(attr, dict):
return attr
if isinstance(attr, str):
attr = [attr]
data_dict = {node: [] for node in graph.nodes()}
for a in attr:
for node, value in nx.get_node_attributes(graph, a).items():
data_dict[node].append(value)
if array_values:
for node in data_dict:
data_dict[node] = np.array(data_dict[node])
return data_dict | 3d097842d92670d8c0217a8ec179cf5e61c83780 | 3,630,815 |
from scipy.signal import freqz
def _filter_attenuation(h, frequencies, gains):
""" Compute minimum attenuation at stop frequency.
Args:
h (array): Filter coefficients.
frequencies (list): Transition frequencies normalized.
gains (array): Filter gain at frequency sampling points.
Returns:
att_db: Minimum attenuation per frequency
att_freq: Frequencies
Notes:
Adapted from mne.filters
"""
frequencies = np.array(frequencies)
_, filt_resp = freqz(h.ravel(), worN=np.pi * frequencies)
filt_resp = np.abs(filt_resp) # use amplitude response
filt_resp[np.where(gains == 1)] = 0
idx = np.argmax(filt_resp)
att_db = -20 * np.log10(np.maximum(filt_resp[idx], 1e-20))
att_freq = frequencies[idx]
return att_db, att_freq | e412d987ae83e5f6ae4bae5f550d1cb22590bd41 | 3,630,816 |
def about1():
"""
Name : about1 function
Module : routes
Description : This function loads about1.html page.
Parameters: None
Returns : This function returns the About-us tab of the Web app.
Written By : Abhishek Mestry ,Ninad Kadam ,Viresh Dhuri
Version : 1.0.0
Revision : None
"""
logger.info("[Process 3 : About us page sucessfully!]")
return render_template("about1.html") | 0e4ab7ea15e0f54c35eb746b41944fba065c8d2e | 3,630,817 |
import os
def execute_barrbap(organism, dna_file, con):
"""determines the 16sRNA sequences using barrnap tool"""
# barrnap output file name e.g. barrnap.NC_000913
barrnap_out = cwd + "/barrnap." + organism
# > /dev/null 2>&1 is to disable stdout from displaying on terminal
barrnap_cmd = "barrnap " + str(dna_file) + " --quiet --outseq " +\
barrnap_out + " > /dev/null 2>&1"
try:
# print('Barrnap is RUNNING for ', organism, ' !!!!!')
if os.stat(dna_file).st_size !=0:
os.system(barrnap_cmd)
if not os.path.isfile(barrnap_out):
file_cmd = "touch " + barrnap_out
try:
os.system(file_cmd)
# subprocess.run([file_cmd], check = True)
except:
# print('Ctrl C pressed, program safely exited !!!!!!!!?')
os.system('rm ' + str(organism) + '*')
# barrnap_out is not yet created, no removal required !!!
# os.system('rm -r ' + barrnap_out)
if con:
con.close()
os._exit(0)
return barrnap_out
except:
print('Ctrl C pressed, program safely exited !!!!!!!!! ###')
if con:
con.close()
os._exit(0) | 15b68e3268c9bbfe913fcc6e795ed26320b5499a | 3,630,818 |
from operator import and_
from operator import lt
from operator import ge
from operator import eq
from typing import cast
def local_adv_sub1_adv_inc_sub1(fgraph, node):
"""Optimize the possible AdvSub1(AdvSetSub1(...), ...).
AdvancedSubtensor1(AdvancedSetSubtensor1(x, y, idx), idx) -> y
Notes
-----
This opt add AssertOp. Otherwise, it would remove shape and
index error. If you want to get rid of them, see the
:ref:`unsafe_optimization` section.
WARNING:
A previous version of this optimization also matched
AdvancedSubtensor1(AdvancedIncSubtensor1(0s, y, idx), idx) -> y
This is incorrect when there are duplicate indices.
The current version warns the user about potential past issues.
"""
if not isinstance(node.op, AdvancedSubtensor1):
return
inp = node.inputs[0]
if not inp.owner or not isinstance(inp.owner.op, AdvancedIncSubtensor1):
return
idx = node.inputs[1]
idx2 = inp.owner.inputs[2]
x = inp.owner.inputs[0]
y = inp.owner.inputs[1]
if idx is not idx2:
return
if (
not inp.owner.op.set_instead_of_inc
and
# Don't use only_process_constants=True. We need to
# investigate Alloc of 0s but with non constant shape.
extract_constant(x, elemwise=False) != 0
):
return
if not inp.owner.op.set_instead_of_inc:
return
cond = [at_all(and_(lt(idx, x.shape[0]), ge(idx, -x.shape[0])))]
if not fgraph.shape_feature.same_shape(idx, y, 0, 0):
cond.append(eq(idx.shape[0], y.shape[0]))
r = Assert(
"Bad indexing or shapes in a AdvancedIncSubtensor1 " "that was optimized away"
)(y, *cond)
copy_stack_trace(y, r)
if r.dtype == node.outputs[0].dtype:
return [r]
# It is possible that y is upcast or downcast to x.dtype.
# In all case, as we set or add with 0, we can just cast y.
r2 = cast(r, node.outputs[0].dtype)
# Copy over stacktrace from before casting, since
# we don't expect problems in the casting operation,
# and any problems in the indexing would have been spotted above.
copy_stack_trace(r, r2)
return [r2] | f69b7be639457b0147dcb5836a86028277bfa939 | 3,630,819 |
import asyncio
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
fut = asyncio.Future()
if exception is not None:
fut.set_exception(exception)
else:
fut.set_result(return_value)
return fut | d06d037bab143e288534e3e7e98da259f7c1cefc | 3,630,820 |
from typing import Tuple
def convert_to_classes(data: MoleculeDataset, num_bins: int = 20) -> Tuple[MoleculeDataset,
np.ndarray,
MoleculeDataset]:
"""
Converts regression data to classification data by binning.
:param data: Regression data as a list of molecule datapoints.
:param num_bins: The number of bins to use when doing regression_with_binning.
:return: A tuple with the new classification data, a numpy array with the bin centers,
and the original regression data.
"""
print('Num bins for binning: {}'.format(num_bins))
old_data = deepcopy(data)
for task in range(data.num_tasks):
regress = np.array([targets[task] for targets in data.targets])
bin_edges = np.quantile(regress, [float(i)/float(num_bins) for i in range(num_bins+1)])
for i in range(len(data)):
bin_index = (bin_edges <= regress[i]).sum() - 1
bin_index = min(bin_index, num_bins-1)
data[i].targets[task] = bin_index
return data, np.array([(bin_edges[i] + bin_edges[i+1])/2 for i in range(num_bins)]), old_data | f8ff21234c94315387fc2669e0ff02d3c3d68fb6 | 3,630,821 |
def construct_lambda_schedule(num_windows):
"""Generate a length-num_windows list of lambda values from 0.0 up to 1.0
Notes
-----
manually optimized by YTZ
"""
A = int(.35 * num_windows)
B = int(.30 * num_windows)
C = num_windows - A - B
# Empirically, we see the largest variance in std <du/dl> near the endpoints in the nonbonded
# terms. Bonded terms are roughly linear. So we add more lambda windows at the endpoint to
# help improve convergence.
lambda_schedule = np.concatenate([
np.linspace(0.0, 0.25, A, endpoint=False),
np.linspace(0.25, 0.75, B, endpoint=False),
np.linspace(0.75, 1.0, C, endpoint=True)
])
assert len(lambda_schedule) == num_windows
return lambda_schedule | af9104ad7c5f8a529a3098d495c3fbfb38ca5df2 | 3,630,822 |
import torch
def compute_face_normals_and_areas(vertices: torch.Tensor, faces: torch.Tensor):
"""
:params
vertices (B,N,3)
faces (B,F,3)
:return
face_normals (B,F,3)
face_areas (B,F)
"""
ndim = vertices.ndimension()
if vertices.ndimension() == 2 and faces.ndimension() == 2:
vertices.unsqueeze_(0)
faces.unsqueeze_(0)
B,N,D = vertices.shape
F = faces.shape[1]
# (B,F*3,3)
face_vertices = torch.gather(vertices, 1, faces.view(B, -1, 1).expand(-1, -1, D)).view(B,F,3,D)
face_normals = torch.cross(face_vertices[:,:,1,:] - face_vertices[:,:,0,:],
face_vertices[:,:,2,:] - face_vertices[:,:,1,:], dim=-1)
face_areas = face_normals.clone()
face_areas = torch.sqrt((face_areas ** 2).sum(dim=-1))
face_areas /= 2
face_normals = normalize(face_normals, dim=-1)
if ndim == 2:
vertices.squeeze_(0)
faces.squeeze_(0)
face_normals.squeeze_(0)
face_areas.squeeze_(0)
# assert (not np.any(face_areas.unsqueeze(-1) == 0)), 'has zero area face: %s' % mesh.filename
return face_normals, face_areas | c586b621d8621aedd4a19d0da11d97e705fe2a06 | 3,630,823 |
def der_Cquat_by_v(q,v):
"""
Being C=C(quat) the rotational matrix depending on the quaternion q and
defined as C=quat2rotation(q), the function returns the derivative, w.r.t. the
quanternion components, of the vector dot(C,v), where v is a constant
vector.
The elements of the resulting derivative matrix D are ordered such that:
.. math:: d(C*v) = D*d(q)
where :math:`d(.)` is a delta operator.
"""
vx,vy,vz=v
q0,q1,q2,q3=q
return 2.*np.array( [[ q0*vx + q2*vz - q3*vy, q1*vx + q2*vy + q3*vz,
q0*vz + q1*vy - q2*vx, -q0*vy + q1*vz - q3*vx],
[ q0*vy - q1*vz + q3*vx, -q0*vz - q1*vy + q2*vx,
q1*vx + q2*vy + q3*vz, q0*vx + q2*vz - q3*vy],
[ q0*vz + q1*vy - q2*vx, q0*vy - q1*vz + q3*vx,
-q0*vx - q2*vz + q3*vy, q1*vx + q2*vy + q3*vz]]) | a4ade85551921a96bc371fc2d99793490637a561 | 3,630,824 |
def calc_BMDs(Tcpl,BMR=dict(E=[10,20,30],Z=[1,2,3]),
ret='dict',add_info=False,
dbg=False):
"""
Calculate benchmark doses corresponding to bmrs:-
E: fractional efficacy (top)
Z: number of standard deviations (assumes response is in units of Z)
"""
BF = Tcpl['best_fit']
if BF['model']=='cnst':
return pd.DataFrame()
C,R,model,kw = calc_Resp(Tcpl)
ci,cf = C.min(),C.max()
BMD=[]
for e0 in BMR.get('E'):
def bmdf(c): return model(c,**kw)-e0*kw['tp']/100.0
try:
soln = optz.fsolve(bmdf,[ci,cf])
soln = soln[np.logical_and(soln>ci,soln<cf)]
bmd0 = np.min(soln)
except:
bmd0 = None
if dbg: print("Failed E %0.2f" % e0)
else:
BMD.append(dict(bmr_type='E',bmr=e0,bmd=bmd0,bmd_um=10**bmd0))
for z0 in BMR.get('Z'):
def bmdf(c): return model(c,**kw)-z0
try:
soln = optz.fsolve(bmdf,[ci,cf])
soln = soln[np.logical_and(soln>ci,soln<cf)]
bmd0 = np.min(soln)
except:
bmd0 = None
if dbg: print("Failed Z %0.2f" % z0)
else:
BMD.append(dict(bmr_type='Z',bmr=z0,bmd=bmd0,bmd_um=10**bmd0))
if len(BMD)==0:
return pd.DataFrame()
if add_info: ret = 'df'
if ret=='df':
DF = pd.DataFrame(BMD)
if add_info:
for i in ['name','timeh','ft','ft_ch','ch','ft_type']:
DF.loc[:,i]=Tcpl[i]
return DF
else:
return BMD | 478f7b5a811ef0996a3f7db847fe8c9a8aa1b23b | 3,630,825 |
import requests
import json
def request_records(request_params):
"""
Download utility rate records from USURDB given a set of request
parameters.
:param request_params: dictionary with request parameter names as
keys and the parameter values
:return:
"""
records = requests.get(
"https://api.openei.org/utility_rates?", params=request_params
)
request_content = records.content
# strict=False prevents an error (control characters are allowed inside
# strings)
json_records = json.loads(request_content, strict=False)
return json_records | 7323657186cc87a291e47c3a71cd2e81b4ec8a73 | 3,630,826 |
def _handle_sort_key(model_name, sort_key=None):
"""Generate sort keys according to the passed in sort key from user.
:param model_name: Database model name be query.(alarm, meter, etc.)
:param sort_key: sort key passed from user.
return: sort keys list
"""
sort_keys_extra = {'alarm': ['name', 'user_id', 'project_id'],
'meter': ['user_id', 'project_id'],
'resource': ['user_id', 'project_id', 'timestamp'],
}
sort_keys = sort_keys_extra[model_name]
if not sort_key:
return sort_keys
# NOTE(Fengqian): We need to put the sort key from user
# in the first place of sort keys list.
try:
sort_keys.remove(sort_key)
except ValueError:
pass
finally:
sort_keys.insert(0, sort_key)
return sort_keys | aef2d996d9d18593ec129c4a37bf8150b3e9c0fe | 3,630,827 |
def view_clear_pages_cache(self, request, form):
""" Clears the pages cache. """
layout = DefaultLayout(self, request)
if form.submitted(request):
request.app.pages_cache.flush()
request.message(_("Cache cleared."), 'success')
return redirect(layout.manage_link)
return {
'layout': layout,
'form': form,
'title': _("Clear cache"),
'callout': _(
"Elections and votes are cached for ${expiration} seconds. The "
"cache is automatically cleared for new results and other "
"updates. It is not normally necessary to clear the cache "
"yourself.",
mapping={'expiration': self.cache_expiration_time}
),
'cancel': layout.manage_link
} | 3cad0b2ab565b558c575131e38530c6f548a9f25 | 3,630,828 |
def cyan_on_red(string, *funcs, **additional):
"""Text color - cyan on background color - red. (see sgr_combiner())."""
return sgr_combiner(string, ansi.CYAN, *funcs, attributes=(ansi.BG_RED,)) | 80072a9df6e8f0c13154d8f563c04d36cfdbc6e1 | 3,630,829 |
def calc_glass_constants(nd, nF, nC, *partials):
"""Given central, blue and red refractive indices, calculate Vd and PFd.
Args:
nd, nF, nC: refractive indices at central, short and long wavelengths
partials (tuple): if present, 2 ref indxs, n4 and n5, wl4 < wl5
Returns:
V-number and relative partial dispersion from F to d
If `partials` is present, the return values include the central wavelength
index and the relative partial dispersion between the 2 refractive indices
provided from `partials`.
"""
dFC = nF-nC
vd = (nd - 1.0)/dFC
PFd = (nF-nd)/dFC
if len(partials) == 2:
n4, n5 = partials
P45 = (n4-n5)/dFC
return nd, vd, PFd, P45
return vd, PFd | f347b6caf167c19451bb2f03e88b5846c6873250 | 3,630,830 |
import subprocess
import click
def git_status_check(cwd):
"""check whether there are uncommited changes in current dir
Parameters
----------
cwd : str
current working directory to check git status
Returns
-------
bool
indicating whether there are uncommited changes
"""
pipe = subprocess.Popen(["git status --porcelain"],
stdout=subprocess.PIPE, shell=True, cwd=cwd)
stdout, stderr = pipe.communicate()
stdout = stdout.decode()
if stdout != "":
click.echo("Uncommited changes exist on branch")
return True
else:
return False | 11960967a2e0461ee21861a8aaa856233b0275d9 | 3,630,831 |
def light_rgb_schema(gateway, child, value_type_name):
"""Return a validation schema for V_RGB."""
schema = {"V_RGB": cv.string, "V_STATUS": cv.string}
return get_child_schema(gateway, child, value_type_name, schema) | da6b262b8e0c0cc8461e0184e67c6b2bc6c9bee4 | 3,630,832 |
from datetime import datetime
def macro_timedelta(start_date, years=0, months=0, days=0):
"""Since datetime doesn't provide timedeltas at the year or month level,
this function generates timedeltas of the appropriate sizes.
"""
delta = datetime.timedelta(days=days)
new_month = start_date.month + months
while new_month > 12:
new_month -= 12
years += 1
while new_month < 1:
new_month += 12
years -= 1
end_date = datetime.datetime(
start_date.year + years, new_month, start_date.day)
delta += end_date - start_date
return delta | ee2df42abc74d14951a03827d9c5de67f10cac38 | 3,630,833 |
import re
def chomp_keep_single_spaces(string):
"""This chomp cleans up all white-space, not just at the ends"""
string = str(string)
result = string.replace("\n", " ") # Convert line ends to spaces
result = re.sub(" [ ]*", " ", result) # Truncate multiple spaces to single space
result = result.replace(" ", " ") # Replace weird spaces with regular spaces
result = result.replace("\xa0", " ") # Remove non-breaking space
result = re.sub("^[ ]*", "", result) # Clean start
return re.sub("[ ]*$", "", result) | e72a3e416dbbeb97d3984f7f3883a91b0ab13877 | 3,630,834 |
def index():
"""Displays the main page"""
user = get_user()
# XXX
return redirect('/login')
# Render template
render = render_template('main.html', lang=lang, user=user)
return make_response(render) | 0548c072d7eb0def56e444beab39937933fa12c6 | 3,630,835 |
import os
from pathlib import Path
from datetime import datetime
def update_ssh_config(sshurl, user, dryrun=False):
"""
Add a new entry to the SSH config file (``~/.ssh/config``).
It sets the default user login to the SSH special remote.
Parameters
-----------
sshurl : str
SSH URL of the git-annex special remote in the form
`ssh://server.example.org`
user : str
User login for authentication to the git-annex special remote
dryrun : bool
If `True`, only generates the commands and
do not execute them
(Default: `False`)
"""
# Return cmd to None is no operation is performed
cmd = None
# Remove "ssh://" prefix in SSH URL
sshurl = sshurl.replace('ssh://', '')
# Path to ssh config file
ssh_config_path = os.path.join(
str(Path.home()),
'.ssh',
'config'
)
print(f'\t* Add new entry in {ssh_config_path}')
# Save the current content of an existing ssh config file
content = None
if os.path.exists(ssh_config_path):
with open(ssh_config_path, 'r+') as ssh_config:
content = ssh_config.read()
# Add the entry if it does not exist in the existing ssh config file
with open(ssh_config_path, 'w+') as ssh_config:
if (content and (f'Host {sshurl}' not in content))\
or content is None:
hdr = [
'## Added by NeuroDataPub ',
f'({datetime.strftime(datetime.now(), "%d. %B %Y %I:%M%p")}) ##\n',
]
lines = [
f'Host {sshurl} \n',
f'\tHostName {sshurl} \n',
f'\tUser {user} \n\n'
]
try:
if not dryrun:
ssh_config.writelines(hdr + lines)
print(f'\t - Entry:\n\n{"".join(lines)}')
cmd = f"""cat << EOF >> {ssh_config_path}
{hdr}
Host {sshurl}
HostName {sshurl}
User {user}
EOF
"""
except Exception as e:
print(f'\t - ERROR:\n\n{e}')
else:
print(f'\t - INFO: Entry for `Host {sshurl}` already existing!\n\n')
# Append the previous content of the existing ssh config file
if content and not dryrun:
with open(ssh_config_path, 'a') as ssh_config:
ssh_config.write(content)
return cmd | 50feb2753eb5095090be7b440bb60a7a0478204b | 3,630,836 |
import re
def _unhumanize(human_time_interval):
"""Converts human_time_interval (e.g. 'an hour ago') into a
datetime.timedelta.
"""
munged = human_time_interval.strip()
for needle in _SINGULARS:
munged = munged.replace(needle, '1 ')
interval_re = '|'.join(_DELTAS.keys())
sre = re.match(r'[. ]*([0-9]*)[ ]*(' + interval_re + r')s?( ago)?', munged)
if sre:
ago = sre.groups(1)[2] == ' ago'
mul = int(sre.groups(1)[0])
if ago:
mul = mul * -1
delta = _DELTAS[sre.groups(1)[1]]
return delta * mul
else:
return None | c70ba342cfb7721517365fed0596a312d35179a1 | 3,630,837 |
import re
def parase_pbs_script(filename = "emtojob.pbs"):
"""
Parse the exe part of pbs file
Parameter
filename: str (filename-like)
The filename of the pbs script
Return
param_dict: dict
The dict of parameters.
"""
s = {"-q": "queue", "-A": "account", "-N": "job_name", "-V": "env",
"-G": "group_name"}
submit_s = {"nodes": "node", "ppn": "core", "pmem": "pmem"}
param_dict = {"module": [], "cmds": []}
with open(filename, "r") as fid:
for eachline in fid:
eachline = eachline.strip()
if eachline.startswith("#PBS"):
line_list = re.split("\s+", eachline)
if line_list[1] == "-l":
if line_list[2].startswith("walltime"):
# walltime
param_dict["walltime"] = line_list[2].split("=")[1]
else:
for item in line_list[2].split(":"):
key = item.split("=")[0]
# nodes, ppn, pmem
value = item.split("=")[1]
if key in submit_s:
param_dict[submit_s[key]] = value
else:
if line_list[1] in s:
param_dict[s[line_list[1]]] = line_list[2]
elif eachline.startswith("module"):
modules = eachline.split()[2:]
for module in modules:
param_dict["module"].append(module)
elif eachline.startswith(("cd $", "#")) or (not eachline):
#The cd $PBS_O_WORKDIR, or comments(#) or empty
pass
else:
param_dict["cmds"].append(eachline + "\n")
return param_dict | 7c1aed9c08a21b123d70e1697d3cf72fcd418a5e | 3,630,838 |
async def handle_errors(request: Request, exception: Exception):
"""
Handles exceptions raised by the API.
Parameters
----------
exception : Exception
Returns
-------
str
"""
return JSONResponse(
status_code=exception.code,
content={"message": exception.message},
) | 13a5240d5790f5edbd359c24a990c7adada8a3cc | 3,630,839 |
def immortal():
""" Make target (if 400+) or self (if 399-) immortal. """
av = spellbook.getTarget() if spellbook.getInvokerAccess() >= 400 else spellbook.getInvoker()
av.setImmortalMode(not av.immortalMode)
return 'Toggled immortal mode %s for %s' % ('ON' if av.immortalMode else 'OFF', av.getName()) | ad10c3ff62e583e55ba55bc09c4214a2f670d3aa | 3,630,840 |
import math
def read_command_line_branch(input_path=None, output_path=None):
"""
Read arguments from commandline and return all values in a dictionary.
If input_path and output_path are not None, then do not parse command line, but
only return default values.
Args:
input_path (str): Input file path, positional argument with default None.
output_path (str): Output file path, positional argument with default None.
"""
# Description of the script
description = "Moves a selected part of a tubular geometry, " + \
"in two (horizontal, vertical) geometry-specific directions. " + \
"Magnitude of movement is defined by the parameters alpha and beta" + \
"Primary script used for application in blood vessels."
parser = ArgumentParser(description=description, formatter_class=RawDescriptionHelpFormatter)
# Add common arguments
required = not (input_path is not None and output_path is not None)
add_common_arguments(parser, required=required)
parser.add_argument("-tm", "--translation-method", type=str, default="manual",
choices=["manual", "commandline", "no_translation"],
help="Defines the method of translation of the branch to be manipulated." +
" The parameter provides three options: 'manual', 'commandline' and 'no_translation'. In" +
" 'manual' the user will be provided with a visualization of the input surface, and " +
"asked to provide the new position of the branch on the surface model." +
" If 'commandline' is provided, then '--branch-location'" +
" is expected to be provided. Selecting 'no_translation' will " +
"result in no translation; any manipulation performed on the " +
"branch will happen at the branch's current position. ")
parser.add_argument('-bl', "--branch-location", nargs="+", type=float, default=None, metavar="branch_location",
help="If this parameter is provided, the branch to be manipulated will be moved to the point "
"on the surface closest to this point. Example providing the point (1, 5, -1):" +
" --branch-loc 1 5 -1")
# Arguments for rotation
parser.add_argument('-aa', '--azimuth-angle', type=float, default=0,
help="The manipulated branch is rotated an angle 'aa' around the old or new" +
" surface normal vector. 'aa' is assumed to be in degrees," +
" and not radians. Default is no rotation.", metavar="surface_normal_axis_angle")
parser.add_argument('-pa', '--polar-angle', type=float, default=0,
help="The manipulated branch is rotated an angle 'pa' around the" +
" surface tangent vector, constructed by the cross product of the surface normal vector" +
" and the Frenet normal vector. 'pa' is assumed to be in degrees," +
" and not radians. Default is no rotation.", metavar="surface_tangent_axis_angle")
# Argument for selecting branch
parser.add_argument('-bn', '--branch-number', type=int, default=None,
help="The number corresponding the branch to manipulate. " +
"The branches are ordered from 1 to N, " +
"from upstream to downstream, relative to the inlet. " +
"If not selected, the user must manually select the branch "
"to manipulate. ", metavar="branch_number")
# Argument for selecting branch
parser.add_argument('-rb', '--remove-branch', type=str2bool, default=False,
help="If True, will remove selected branch and perform no manipulation")
# Argument for clamping branch when translating
parser.add_argument('-cb', '--clamp-branch', type=str2bool, default=False,
help="If True, will clamp selected branch to branch endpoint")
# Parse paths to get default values
if required:
args = parser.parse_args()
else:
args = parser.parse_args(["-i" + input_path, "-o" + output_path])
if not 0 <= args.azimuth_angle <= 360:
raise ArgumentTypeError("The azimuth angle is limited to be within [0, 360] degrees, cannot have value" +
" {}".format(args.azimuth_angle))
if not -180 <= args.polar_angle <= 180:
raise ArgumentTypeError("The polar angle is limited to be within [-180, 180] degrees, cannot have value" +
" {}".format(args.polar_angle))
# Convert from deg to rad and invert rotation if exceeding 180 degrees
polar_angle_to_radians = args.polar_angle * math.pi / 180
azimuth_angle_to_radians = args.azimuth_angle * math.pi / 180
if azimuth_angle_to_radians > np.pi:
azimuth_angle_to_radians -= 2 * np.pi
if args.branch_number is not None:
if args.branch_number < 1:
raise ValueError("ERROR: Branch number cannot be 0 or negative. Please select a positive number")
if args.no_smooth_point is not None and len(args.no_smooth_point):
if len(args.no_smooth_point) % 3 != 0:
raise ValueError("ERROR: Please provide the no smooth point(s) as a multiple of 3")
return dict(input_filepath=args.ifile, smooth=args.smooth, smooth_factor=args.smooth_factor,
output_filepath=args.ofile, poly_ball_size=args.poly_ball_size, no_smooth=args.no_smooth,
no_smooth_point=args.no_smooth_point, resampling_step=args.resampling_step,
polar_angle=polar_angle_to_radians, azimuth_angle=azimuth_angle_to_radians,
clamp_branch=args.clamp_branch, remove_branch=args.remove_branch,
branch_to_manipulate_number=args.branch_number, branch_location=args.branch_location,
translation_method=args.translation_method) | b838e60a78cd6840d2802a879d6d64cf29bf4e6c | 3,630,841 |
def predict_fr(dst_path, ref_path):
"""
用于FR的预测函数
:param dst_path:
:param ref_path:
:return:
"""
assert ref_path is not None
if utils.is_img(dst_path):
img_dst = cv2.imread(dst_path)
img_dst = utils.transform(img_dst, config.input_process)[np.newaxis, ...]
img_ref = cv2.imread(ref_path)
img_ref = utils.transform(img_ref, config.input_process)[np.newaxis, ...]
img_input = np.concatenate([img_dst, img_ref], 1)
mos_pred = sess.run('test_output:0', feed_dict={'test_input:0': img_input})
if config.output_process.use_sigmoid:
mos_pred = utils.sigmoid(mos_pred)
mos_pred = mos_pred * config.output_process.scale
return mos_pred.reshape(-1).tolist()
elif utils.is_video(dst_path):
vcap_dst = utils.read_video_gen(dst_path, 0, -1, 10, 4)
vcap_ref = utils.read_video_gen(ref_path, 0, -1, 10, 4)
mos_preds = []
while True:
imgs_dst = next(vcap_dst, None)
imgs_ref = next(vcap_ref, None)
if imgs_dst is None or len(imgs_dst) == 0 or imgs_ref is None or len(imgs_ref) == 0:
break
imgs_dst = [utils.transform(img, config.input_process) for img in imgs_dst]
imgs_dst = np.stack(imgs_dst)
imgs_ref = [utils.transform(img, config.input_process) for img in imgs_ref]
imgs_ref = np.stack(imgs_ref)
imgs_input = np.concatenate([imgs_dst, imgs_ref], 1)
mos_pred = sess.run('test_output:0', feed_dict={'test_input:0': imgs_input})
if config.output_process.use_sigmoid:
mos_pred = utils.sigmoid(mos_pred)
mos_pred = mos_pred.reshape(-1) * config.output_process.scale
mos_preds.append(mos_pred)
return np.concatenate(mos_preds).tolist() if mos_preds else [] | 2eeb4e123d09136d5da037b2955cf5f331ed8e6c | 3,630,842 |
def disk_example():
"""Create an example of disk element.
This function returns an instance of a simple disk. The purpose is to make available
a simple model so that doctest can be written using it.
Returns
-------
disk : ross.DiskElement
An instance of a disk object.
Examples
--------
>>> disk = disk_example()
>>> disk.Ip
0.32956362
"""
disk = DiskElement(0, 32.589_727_65, 0.178_089_28, 0.329_563_62)
return disk | b02c25a59f52ca47c84c5fffa29414fc622555c1 | 3,630,843 |
def build_optim(
model,
optim="adam",
lr=0.002,
max_grad_norm=0,
beta1=0.9,
beta2=0.999,
decay_method="noam",
warmup_steps=8000,
):
""" Build optimizer """
optim = Optimizer(
optim,
lr,
max_grad_norm,
beta1=beta1,
beta2=beta2,
decay_method=decay_method,
warmup_steps=warmup_steps,
)
optim.set_parameters(list(model.named_parameters()))
return optim | eaa1b32098d7eb8f58d0c1ac41906badb56a37c6 | 3,630,844 |
def bst(height=4):
"""Generate a random binary search tree and return its root.
:param height: the height of the tree (default: 4)
:return: the root of the generated binary search tree
"""
values = _generate_values(height)
root = _new_node(values[0])
for index in range(1, len(values)):
depth = _bst_insert(root, values[index])
if depth == height:
break
return root | 301f0776f67bbcd3ce90fa1109bcee80180a6fd1 | 3,630,845 |
from typing import Optional
def get_model_info(model: str, repo: str = "onnx/models:master", opset: Optional[int] = None) -> ModelInfo:
"""
Get the model info matching the given name and opset.
@param model: The name of the onnx model in the manifest. This field is case-sensitive
@param repo: The location of the model repo in format "user/repo[:branch]".
If no branch is found will default to "master"
@param opset: The opset of the model to get. The default of `None` will return the model with largest opset.
"""
matching_models = list_models(repo, model)
if not matching_models:
raise AssertionError("No models found with name {}".format(model))
if opset is None:
selected_models = sorted(matching_models, key=lambda m: -m.opset)
else:
selected_models = [m for m in matching_models if m.opset == opset]
if len(selected_models) == 0:
valid_opsets = [m.opset for m in matching_models]
raise AssertionError("{} has no version with opset {}. Valid opsets: {}".format(model, opset, valid_opsets))
return selected_models[0] | 63e94ff122066d26fcf173f49c6f63662443acc0 | 3,630,846 |
def byprotocolobj(protocolobj):
""" Returns the Session for an instance of :class:`Protocol` given
as *protocolobj*. Keys will match when *protocolobj* is an
instance of the respective key class. """
for key in sessions.keys():
if isinstance(protocolobj, key):
return sessions[key]
raise KeyError('No Session defined for protocol object {}'.\
format(protocolobj)) | 9d683caec6da167b815fdcc60d02ad1fef002647 | 3,630,847 |
from typing import Callable
from typing import Dict
def trace_numpy_function(
function_to_trace: Callable, function_parameters: Dict[str, BaseValue]
) -> OPGraph:
"""Trace a numpy function.
Args:
function_to_trace (Callable): The function you want to trace
function_parameters (Dict[str, BaseValue]): A dictionary indicating what each input of the
function is e.g. an EncryptedScalar holding a 7bits unsigned Integer
Returns:
OPGraph: The graph containing the ir nodes representing the computation done in the input
function
"""
function_parameters = prepare_function_parameters(function_to_trace, function_parameters)
input_tracers = make_input_tracers(NPTracer, function_parameters)
# We could easily create a graph of NPTracer, but we may end up with dead nodes starting from
# the inputs that's why we create the graph starting from the outputs
with tracing_context([NPTracer]):
output_tracers = function_to_trace(**input_tracers)
if isinstance(output_tracers, NPTracer):
output_tracers = (output_tracers,)
op_graph = OPGraph.from_output_tracers(output_tracers)
return op_graph | 38454e0251bcc3bde4b9d49c86fd76bf6b4cd278 | 3,630,848 |
def random_crop(arr, new_h=224, new_w=224):
"""Crop an image of shape (dim, dim, channels) to (new_h, new_w, channels)."""
height = len(arr)
width = len(arr[0])
assert height >= new_h
assert width >= new_w
if height > new_h or width > new_w:
height_sample_pt = np.random.randint(height-new_h)
width_sample_pt = np.random.randint(width-new_w)
return arr[height_sample_pt:height_sample_pt+new_h,width_sample_pt:width_sample_pt+new_w,:]
else:
return arr | 55984b293e66064a919d38a203270134dd5d6b0d | 3,630,849 |
def bilingual(obj, field, attr=None):
""" This is a quick and dirty way to define bilingual content in a single field. """
field_locale = '%s_%s' % (field, get_language())
value = None
try:
value = getattr(obj, field_locale)
except AttributeError:
pass
if not value:
try:
value = getattr(obj, '%s_en' % field, None)
except AttributeError:
pass
if value and attr:
try:
return getattr(value, attr, '')
except ValueError:
pass
return value or '' | 1c4c7b5b3ec650f9e30749d6188b99f3b858fc5c | 3,630,850 |
def get_available_quantity(variant: "ProductVariant", country_code: str) -> int:
"""Return available quantity for given product in given country."""
try:
stock = Stock.objects.get_variant_stock_for_country(country_code, variant)
except Stock.DoesNotExist:
return 0
return stock.quantity_available | f3cac101a2079564e35c07542b63c2217c4c9aae | 3,630,851 |
def in_range(x, a1, a2):
"""Check if (modulo 360) x is in the range a1...a2. a1 must be < a2."""
a1 %= 360.
a2 %= 360.
if a1 <= a2: # "normal" range (not including 0)
return a1 <= x <= a2
# "jumping" range (around 0)
return a1 <= x or x <= a2 | 8855ea29e44c546d55122c7c6e4878b44a3bc272 | 3,630,852 |
def _dark_parse_accept_lang_header(accept):
"""
The use of 'zh-cn' for 'Simplified Chinese' and 'zh-tw' for 'Traditional Chinese'
are now deprecated, as discussed here: https://code.djangoproject.com/ticket/18419.
The new language codes 'zh-hans' and 'zh-hant' are now used since django 1.7.
Although majority of browsers still use the old language codes, some new browsers
such as IE11 in Windows 8.1 start to use the new ones, which makes the current
chinese translations of edX don't work properly under these browsers.
This function can keep compatibility between the old and new language codes. If one
day edX uses django 1.7 or higher, this function can be modified to support the old
language codes until there are no browsers use them.
"""
browser_langs = parse_accept_lang_header(accept)
django_langs = []
for lang, priority in browser_langs:
lang = CHINESE_LANGUAGE_CODE_MAP.get(lang.lower(), lang)
django_langs.append((lang, priority))
return django_langs | 03f4b15dba30f569eb4bb853426e5ceeafa6f2a5 | 3,630,853 |
def trip_direction(trip_original_stops, direction_stops):
"""
Guess the trip direction_id based on trip_original_stops, and
a direction_stops which should be a dictionary with 2 keys: "0" and "1" -
corresponding values should be sets of stops encountered in given dir
"""
# Stops for each direction have to be unique
dir_stops_0 = direction_stops["0"].difference(direction_stops["1"])
dir_stops_1 = direction_stops["1"].difference(direction_stops["0"])
# Trip stops in direction 0 and direction 1
trip_stops_0 = trip_original_stops.intersection(dir_stops_0)
trip_stops_1 = trip_original_stops.intersection(dir_stops_1)
# Amount of stops of trip in each direction
trip_stops_0_len = len(trip_stops_0)
trip_stops_1_len = len(trip_stops_1)
# More or equal stops belonging to dir_0 then dir_1 => "0"
if trip_stops_0_len >= trip_stops_1_len:
return "0"
# More stops belonging to dir_1
elif trip_stops_0_len < trip_stops_1_len:
return "1"
# How did we get here
else:
raise RuntimeError(f"{trip_stops_0_len} is not bigger, equal or less then {trip_stops_1_len}") | a418c90775039b1d52b09cb2057d71f97361e0d9 | 3,630,854 |
def unf_pb_Valko_MPaa(rsb_m3m3, gamma_oil=0.86, gamma_gas=0.6, t_K=350):
"""
bubble point pressure calculation according to Valko McCain (2002) correlation
:param rsb_m3m3: solution ration at bubble point, must be given, m3/m3
:param gamma_oil: specific oil density (by water)
:param gamma_gas: specific gas density (by air)
:param t_K: temperature, K
:return: bubble point pressure abs in MPa
ref SPE "Reservoir oil bubblepoint pressures revisited; solution gas–oil ratios and surface gas specific gravities"
W. D. McCain Jr.,P.P. Valko,
"""
min_rsb = 1.8
max_rsb = 800
rsb_old = rsb_m3m3
if rsb_m3m3 < min_rsb:
rsb_m3m3 = min_rsb
if rsb_m3m3 > max_rsb:
rsb_m3m3 = max_rsb
z1 = -4.81413889469569 + 0.748104504934282 * np.log(rsb_m3m3) \
+ 0.174372295950536 * np.log(rsb_m3m3) ** 2 - 0.0206 * np.log(rsb_m3m3) ** 3
z2 = 25.537681965 - 57.519938195 / gamma_oil + 46.327882495 / gamma_oil ** 2 - 13.485786265 / gamma_oil ** 3
z3 = 4.51 - 10.84 * gamma_gas + 8.39 * gamma_gas ** 2 - 2.34 * gamma_gas ** 3
z4 = 6.00696e-8 * t_K ** 3 - 8.554832172e-5 * t_K ** 2 + 0.043155018225018 * t_K - 7.22546617091445
z = z1 + z2 + z3 + z4
pb_atma = 119.992765886175 * np.exp(0.0075 * z ** 2 + 0.713 * z)
pb_MPaa = pb_atma / 10.1325
"""
для низких значений газосодержания зададим асимптотику Pb = 1 атма при Rsb = 0
корреляция Valko получена с использованием непараметрической регресии GRACE метод (SPE 35412)
особенность подхода - за пределеми интервала адаптации ассимптотики не соблюдаются
поэтому их устанавливаем вручную
для больших значений газосодержания продолжим линейный тренд корреляции
"""
if rsb_old < min_rsb:
pb_MPaa = (pb_MPaa - 0.1013) * rsb_old / min_rsb + 0.1013
if rsb_old > max_rsb:
pb_MPaa = (pb_MPaa - 0.1013) * rsb_old / max_rsb + 0.1013
return pb_MPaa | 3879fe70af6116251cc08dfc14934f623e23b57f | 3,630,855 |
def disasm(file, objdump_or_gdb, symbol, start, finish):
"""
Disassemble binary file.
"""
if objdump_or_gdb:
out = _run(['objdump', '-d', file])
elif symbol is not None:
out = _run(['gdb', '-batch', '-ex', f'disassemble {symbol}', file])
else:
out = _run(['gdb', '-batch', '-ex', f'disassemble {start},{finish}', file])
return out | ec5af11fa6d73698907e48fae58b041027a94160 | 3,630,856 |
def gaussian_on_simplex(mu, sigma, npoints):
"""
This function provides npoints i.i.d. points on the simplex, normally
distributed according to sigma around :math:`mu`.
Parameters
----------
mu : 1D-numpy-array
expectation value on the simplex
sigma : 2D-numpy-array
covariance matrix on the simplex
npoints : int
number of random points
Raises
------
NameError: raised in the case of inconsistent data
Returns
-------
points : 2D-numpy-array
random points on the standard simplex normally distributed
around :math:`mu` with covarianve matrix sigma
"""
ndim_mu = mu.shape[0]
ndim_sigma = sigma.shape[1]
if ndim_mu != ndim_sigma:
raise NameError('Dimensions of mu and sigma differ.')
return np.random.multivariate_normal(mu, sigma, npoints) | d7b0ef0d897cd3493b8333d528128a2fc68bd359 | 3,630,857 |
def add_common_arguments(parser):
"""Populate the given argparse.ArgumentParser with arguments.
This function can be used to make the definition these argparse arguments
reusable in other modules and avoid the duplication of these definitions
among the executable scripts.
The following arguments are added to the parser:
- **...** (...): ...
Parameters
----------
parser : argparse.ArgumentParser
The parser to populate.
Returns
-------
argparse.ArgumentParser
Return the populated ArgumentParser object.
"""
return parser | c8e3eba16c33f0fcf12caf3a31b281dcee858648 | 3,630,858 |
import os
def is_valid_path_and_ext(fname, wanted_ext=None):
"""
Validates the path exists and the extension is one wanted.
Parameters
----------
fname : str
Input file name.
wanted_ext : List of str, optional
Extensions to check
Return
------
bool
"""
if not os.path.exists(fname):
print(f"Error: No file '{fname}' exists.")
return False
# Simply validates the path existence
if wanted_ext is None:
return True
# Make sure the extension is one desired.
base, ext = os.path.splitext(fname)
if ext not in wanted_ext:
return False
return True | fea067c87a2f867703c234c2fdab418c7e0ab862 | 3,630,859 |
def circuit(params, a, m1, m2, cutoff):
"""Runs the constrained variational circuit with specified parameters,
returning the output fidelity to the requested ON state, as well as
the post-selection probability.
Args:
params (list): list of gate parameters for the constrained
variational quantum circuit. This should contain the following 15 values
in the following order:
* ``sq_r0, sq_r1, sq_r2``: the squeezing magnitudes applied to the first three modes
* ``sq_phi0, sq_phi1, sq_phi2``: the squeezing phase applied to the first three modes
* ``d_r0, d_r1, d_r2``: the displacement magnitudes applied to the first three modes
* ``bs_theta1, bs_theta2, bs_theta3``: the 3-mode interferometer beamsplitter angles theta
* ``bs_phi1, bs_phi2, bs_phi3``: the 3-mode interferometer beamsplitter phases phi
a (float): the ON state parameter
m1 (int): the Fock state measurement of mode 0 to be post-selected
m2 (int): the Fock state measurement of mode 1 to be post-selected
cutoff (int): the Fock basis truncation
Returns:
tuple: a tuple containing the output fidelity to the target ON state,
the probability of post-selection, the state norm before entering the beamsplitter,
the state norm after exiting the beamsplitter, and the density matrix of the output state.
"""
# define target state
ONdm = on_state(a, cutoff)
# unpack circuit parameters
# squeezing magnitudes
sq_r = params[:3]
# squeezing phase
sq_phi = params[3:6]
# displacement magnitudes (assume displacement is real for now)
d_r = params[6:9]
# beamsplitter theta
bs_theta1, bs_theta2, bs_theta3 = params[9:12]
# beamsplitter phi
bs_phi1, bs_phi2, bs_phi3 = params[12:]
# quantum circuit prior to entering the beamsplitter
prog = sf.Program(3)
with prog.context as q:
for k in range(3):
Sgate(sq_r[k], sq_phi[k]) | q[k]
Dgate(d_r[k]) | q[k]
eng = sf.Engine("fock", backend_options={"cutoff_dim": cutoff})
stateIn = eng.run(prog).state
normIn = np.abs(stateIn.trace())
# norm of output state and probability
prog_BS = sf.Program(3)
with prog_BS.context as q:
BSgate(bs_theta1, bs_phi1) | (q[0], q[1])
BSgate(bs_theta2, bs_phi2) | (q[1], q[2])
BSgate(bs_theta3, bs_phi3) | (q[0], q[1])
stateOut = eng.run(prog_BS).state
normOut = np.abs(stateOut.trace())
rho = stateOut.dm()
# probability of meausring m1 and m2
prob = np.abs(np.trace(rho[m1, m1, m2, m2]))
# output state
rhoC = rho[m1, m1, m2, m2]/prob
#fidelity with the target
fidelity = np.abs(np.trace(np.einsum('ij,jk->ik', rhoC, ONdm)))
return (fidelity, prob, normIn, normOut, rhoC) | fb210648ab05aa99c3644ecce74a175951eb1cb3 | 3,630,860 |
def attr_names(obj):
"""
Determine the names of user-defined attributes of the given SimpleNamespace object.
Source: https://stackoverflow.com/a/27532110
:return: A list of strings.
"""
return sorted(obj.__dict__) | ecbc0321d0796925341731df303c48ea911fcf57 | 3,630,861 |
from typing import Optional
from typing import List
from datetime import datetime
from textwrap import dedent
def get_scada_range(
network: Optional[NetworkSchema] = None,
networks: Optional[List[NetworkSchema]] = None,
network_region: Optional[str] = None,
facilities: Optional[List[str]] = None,
energy: bool = False,
) -> Optional[ScadaDateRange]:
"""Get the start and end dates for a network query. This is more efficient
than providing or querying the range at query time
"""
engine = get_database_engine()
__query = """
select
min(f.data_first_seen) at time zone '{timezone}',
max(fs.trading_interval) at time zone '{timezone}'
from facility_scada fs
left join facility f on fs.facility_code = f.code
where
fs.trading_interval >= '{date_min}' and
{facility_query}
{network_query}
{network_region_query}
f.fueltech_id not in ('solar_rooftop', 'imports', 'exports')
and f.interconnector is FALSE
and fs.{field} is not null;
"""
network_query = ""
timezone = network.timezone_database if network else "UTC"
field_name = "generated"
# if energy is False and network and network.code in ["WEM"]:
# energy = True
if energy is True:
field_name = "eoi_quantity"
# Only look back 7 days because the query is more optimized
date_min = datetime.now() - timedelta(days=7)
if network:
network_query = f"f.network_id = '{network.code}' and"
if networks:
net_case = networks_to_in(networks)
network_query = "f.network_id IN ({}) and ".format(net_case)
network_region_query = ""
if network_region:
network_region_query = f"f.network_region = '{network_region}' and"
facility_query = ""
if facilities:
fac_case = duid_in_case(facilities)
facility_query = "f.code IN ({}) and ".format(fac_case)
scada_range_query = dedent(
__query.format(
field=field_name,
date_min=date_min,
facility_query=facility_query,
network_query=network_query,
network_region_query=network_region_query,
timezone=timezone,
)
)
logger.debug(scada_range_query)
with engine.connect() as c:
logger.debug(scada_range_query)
scada_range_result = list(c.execute(scada_range_query))
if len(scada_range_result) < 1:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="No results",
)
scada_min = scada_range_result[0][0]
scada_max = scada_range_result[0][1]
if not scada_min or not scada_max:
return None
# set network timezone since that is what we're querying
scada_min = scada_min.replace(tzinfo=network.get_fixed_offset())
scada_max = scada_max.replace(tzinfo=network.get_fixed_offset())
scada_range = ScadaDateRange(start=scada_min, end=scada_max, network=network)
return scada_range | 7307b826171abfc9d712c640fd0a0618b8576892 | 3,630,862 |
def treetable(childreds, parents, arg3=None, nodename_colname=None):
""" 输入childres子结点id列表,和parents父结点id列表
两个列表长度必须相等
文档:http://note.youdao.com/noteshare?id=126200f45d301fcb4364d06a0cae8376
有两种调用形式
>> treetable(childreds, parents) --> DataFrame (新建df)
>> treetable(df, child_colname, parent_colname) --> DataFrame (修改后的df)
返回一个二维列表
新的childreds (末尾可能回加虚结点)
新的parents
函数会计算每一行childred对应的树排序后的排序编号order
以及每个节点深度depth
>> ls1 = [6, 2, 4, 5, 3], ls2 = [7, 1, 2, 2, 1], treetable(ls1, ls2)
child_id parent_id depth tree_order tree_struct
5 7 root 1 1 = = 7
0 6 7 2 2 = = = = 6
6 1 root 1 3 = = 1
1 2 1 2 4 = = = = 2
2 4 2 3 5 = = = = = = 4
3 5 2 3 6 = = = = = = 5
4 3 1 2 7 = = = = 3
"""
# 0 参数预处理
if isinstance(childreds, pd.DataFrame):
df = childreds
child_colname = parents
parent_colname = arg3
if not arg3: raise TypeError
childreds = df[child_colname].tolist()
parents = df[parent_colname].tolist()
else:
df = None
# 1 建立root根节点,确保除了root其他结点都存在记录
lefts = set(parents) - set(childreds) # parents列中没有在childreds出现的结点
cs, ps = list(childreds), list(parents)
if len(lefts) == 0:
# b_left为空一定有环,b_left不为空也不一定是正常的树
raise ValueError('有环,不是树结构')
elif len(lefts) == 1: # 只有一个未出现的结点,那么它既是根节点
root = list(lefts)[0]
else: # 多个父结点没有记录,则对这些父结点统一加一个root父结点
root = 'root'
allnode = set(parents) | set(childreds) # 所有结点集合
while root in allnode: root += '-' # 一直在末尾加'-',直到这个结点是输入里未出现的
# 添加结点
lefts = list(lefts)
lefts.sort(key=lambda x: parents.index(x))
for t in lefts:
cs.append(t)
ps.append(root)
n = len(cs)
depth, tree_order, len_childs = [-1] * n, [-1] * n, [0] * n
# 2 构造父结点-孩子结点的字典dd
dd = defaultdict(list)
for i in range(n): dd[ps[i]] += [i]
# 3 dfs
cnt = 1
def dfs(node, d):
"""找node的所有子结点"""
nonlocal cnt
for i in dd.get(node, []):
tree_order[i], depth[i], len_childs[i] = cnt, d, len(dd[cs[i]])
cnt += 1
dfs(cs[i], d + 1)
dfs(root, 1)
# 4 输出格式
tree_struct = list(map(lambda i: f"{'_ _ ' * depth[i]}{cs[i]}" + (f'[{len_childs[i]}]' if len_childs[i] else ''),
range(n)))
if df is None:
ls = list(zip(cs, ps, depth, tree_order, len_childs, tree_struct))
df = pd.DataFrame.from_records(ls, columns=('child_id', 'parent_id',
'depth', 'tree_order', 'len_childs', 'tree_struct'))
else:
k = len(df)
df = df.append(pd.DataFrame({child_colname: cs[k:], parent_colname: ps[k:]}), sort=False, ignore_index=True)
if nodename_colname:
tree_struct = list(
map(lambda i: f"{'_ _ ' * depth[i]}{cs[i]} {df.iloc[i][nodename_colname]}"
+ (f'[{len_childs[i]}]' if len_childs[i] else ''), range(n)))
df['depth'], df['tree_order'], df['len_childs'], df['tree_struct'] = depth, tree_order, len_childs, tree_struct
df.sort_values('tree_order', inplace=True) # 注意有时候可能不能排序,要维持输入时候的顺序
return df | e5daaa2839a6fdc0cefa444872a38a99e31ce6cb | 3,630,863 |
def plot_drawdown_periods(returns, top=10, k=None, ax=None, **kwargs):
"""
Plots cumulative returns highlighting top drawdown periods.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 10).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
# y_axis_formatter = FuncFormatter(utils.one_dec_places)
# ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
if k != None:
df_cum_rets = returns.cumsum() + k
else:
df_cum_rets = returns.cumsum()
df_drawdowns = gen_drawdown_table(returns, top=top)
df_cum_rets.plot(ax=ax, **kwargs)
lim = ax.get_ylim()
colors = sns.cubehelix_palette(len(df_drawdowns))[::-1]
for i, (peak, recovery) in df_drawdowns[
['peak date', 'recovery date']].iterrows():
if pd.isnull(recovery):
recovery = returns.index[-1]
ax.fill_between((peak, recovery),
lim[0],
lim[1],
alpha=.4,
color=colors[i])
ax.set_title('Top %i Drawdown Periods' % top)
ax.set_ylabel('Cumulative returns')
ax.legend(['Portfolio'], loc='upper left')
ax.set_xlabel('')
return ax | 258a12f3bb96b727e2486f5ca2c67bec06649e6c | 3,630,864 |
def redirect_back(endpoint='index', **values):
"""
跳转(优先next, 其次endpoint)
:param endpoint:
:param values:
:return:
"""
target = request.args.get('next')
if not target or not is_safe_url(target):
target = url_for(endpoint, **values)
return redirect(target) | 3fd0185d1f0af70f59d6d7b75343ff759cb8f979 | 3,630,865 |
import joblib
def load(
tag: t.Union[str, Tag],
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> t.Union["BaseEstimator", "Pipeline"]:
"""
Load a model from BentoML local modelstore with given name.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`Union[BaseEstimator, Pipeline]`: an instance of :obj:`sklearn` model from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
sklearn = bentoml.sklearn.load('my_model:latest')
""" # noqa
_, model_file = _get_model_info(tag, model_store)
return joblib.load(model_file) | 892f7c280f097732f97af7054b7e41be9552fdf2 | 3,630,866 |
def tag_to_python(tag):
""" Convert a stream tag to a Python-readable object """
newtag = PythonTag()
newtag.offset = tag.offset
newtag.key = pmt.to_python(tag.key)
newtag.value = pmt.to_python(tag.value)
newtag.srcid = pmt.to_python(tag.srcid)
return newtag | 29687b1322709b953e69089d2f398ba83235dc22 | 3,630,867 |
def corrections(word_in, dictionary, keyboard_cm, ed=2):
"""
@input: word_in - input word
dictionary - dictionary/lexicon
keyboard_cm - confusion matrix for keyboard in question
"""
assert isinstance(dictionary, Dictionary)
candidates = oridam_generate_patterns(word_in, keyboard_cm, ed)
# TBD: score candidates by n-gram probability of language model occurrence
# etc. or edit distance from source word etc.
return list(filter(dictionary.isWord, candidates)) | ac6031edef0ef03af6b871425eb9ed60a914fbf2 | 3,630,868 |
def read_polyglot_embeddings(filename):
"""
Read vocabulary and embeddings from a file from polyglot.
"""
with open(filename, 'rb') as f:
data = cPickle.load(f)
# first four words are UNK, <s>, </s> and padding
# we discard <s> and </s>
words = data[0]
matrix = data[1].astype(np.float)
matrix = np.delete(matrix, [1, 2], 0)
WD = nlpnet.word_dictionary.WordDictionary
words = [WD.rare, WD.padding_left] + list(words[4:])
model = dict(zip(words, matrix))
clusters = clusterize_words(model)
vocabulary = clusters.keys()
vocabulary.append(WD.padding_right)
matrix = np.array(clusters.values())
return matrix, vocabulary | 1351e2f3f50df6b192e06201f1989ffb30b0d6fd | 3,630,869 |
def exp_by_squaring(x, n):
"""Assumes n>=0
See: https://en.wikipedia.org/wiki/Exponentiation_by_squaring
"""
if n == 0:
return 1
if n % 2 == 0:
return exp_by_squaring(x * x, n // 2)
else:
return x * exp_by_squaring(x * x, (n - 1) / 2) | 57eab07b123c72dbeeaa6f5f08d18b4abad5c497 | 3,630,870 |
from dagster.config.field import resolve_to_config_type
def dagster_type_materializer(config_schema, required_resource_keys=None):
"""Create an output materialization hydration config that configurably materializes a runtime
value.
The decorated function should take the execution context, the parsed config value, and the
runtime value and the parsed config data, should materialize the runtime value, and should
return an appropriate :py:class:`AssetMaterialization`.
Args:
config_schema (Any): The type of the config data expected by the decorated function.
Examples:
.. code-block:: python
# Takes a list of dicts such as might be read in using csv.DictReader, as well as a config
value, and writes
@dagster_type_materializer(str)
def materialize_df(_context, path, value):
with open(path, 'w') as fd:
writer = csv.DictWriter(fd, fieldnames=value[0].keys())
writer.writeheader()
writer.writerows(rowdicts=value)
return AssetMaterialization.file(path)
"""
config_type = resolve_to_config_type(config_schema)
return lambda func: _create_output_materializer_for_decorator(
config_type, func, required_resource_keys
) | feb39a8b745ebc81a761903b76c13edd1670b1da | 3,630,871 |
from typing import Union
from typing import List
def get_models(deploy_cfg: Union[str, mmcv.Config],
model_cfg: Union[str, mmcv.Config], work_dir: str) -> List:
"""Get the output model informantion for deploy.json.
Args:
deploy_cfg (mmcv.Config): Deploy config dict.
model_cfg (mmcv.Config): The model config dict.
work_dir (str): Work dir to save json files.
Return:
list[dict]: The list contains dicts composed of the model name, net,
weghts, backend, precision batchsize and dynamic_shape.
"""
name, _ = get_model_name_customs(deploy_cfg, model_cfg, work_dir)
precision = 'FP32'
onnx_name = get_onnx_config(deploy_cfg)['save_file']
net = onnx_name
weights = ''
backend = get_backend(deploy_cfg=deploy_cfg)
if backend == Backend.TENSORRT:
net = onnx_name.replace('.onnx', '.engine')
common_cfg = get_common_config(deploy_cfg)
fp16_mode = common_cfg.get('fp16_mode', False)
int8_mode = common_cfg.get('int8_mode', False)
if fp16_mode:
precision = 'FP16'
if int8_mode:
precision = 'INT8'
elif backend == Backend.PPLNN:
precision = 'FP16'
weights = onnx_name.replace('.onnx', '.json')
net = onnx_name
elif backend == Backend.OPENVINO:
net = onnx_name.replace('.onnx', '.xml')
weights = onnx_name.replace('.onnx', '.bin')
elif backend == Backend.NCNN:
net = onnx_name.replace('.onnx', '.param')
weights = onnx_name.replace('.onnx', '.bin')
elif backend == Backend.ONNXRUNTIME:
pass
else:
raise NotImplementedError(f'Not supported backend: {backend.value}.')
dynamic_shape = is_dynamic_batch(deploy_cfg, input_name='input')
batch_size = 1
return [
dict(
name=name,
net=net,
weights=weights,
backend=backend.value,
precision=precision,
batch_size=batch_size,
dynamic_shape=dynamic_shape)
] | a8259f52918f41d5142a892f445c1ae30829ce66 | 3,630,872 |
import random
def weighted_choice(choices):
"""
Pick a weighted value off
:param list choices: Each item is a tuple of choice and weight
:return:
"""
total = sum(weight for choice, weight in choices)
selection = random.uniform(0, total)
counter = 0
for choice, weight in choices:
if counter + weight > selection:
return choice
counter += weight
assert False, "Shouldn't get here" | c32ff27b9892bb88db2928ec22c4ede644f6792c | 3,630,873 |
def css_s(property, value):
"""Creates a stringified CSSString proto with the given values"""
return proto_to_str(make_cssstyle_proto(property, value)) | 212abbe578d337b39a9ea109f2a7910e1ad3be09 | 3,630,874 |
import argparse
def parse_args(args):
"""
Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
parser.add_argument('--data-path', help='Data for prediction', type=str, required=True)
parser.add_argument('--target-path', help='Target path', type=str, default='result.json')
parser.add_argument('--split', help='Target path', type=str, default='val')
parser.add_argument('--max-detections', help='Max detection', default=10)
parser.add_argument('--ninedash-category-id', help='Ninedash category ID', default=1)
parser.add_argument('--model-path', help='Model path of the network', type=str, required=True)
parser.add_argument('--score-threshold', help='Minimum score threshold', type=float, default=0.3)
parser.add_argument('--phi', help='Hyper parameter phi', default=0, type=int, choices=(0, 1, 2, 3, 4, 5, 6))
parser.add_argument('--weighted-bifpn', help='Use weighted BiFPN', action='store_true')
parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)
parser.add_argument('--num-classes', help='Number of classes', default=1, type=int)
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
print(vars(parser.parse_args(args)))
return parser.parse_args(args) | 1071d2fdeb2eec7a7b149b295d504e4796dd3aa7 | 3,630,875 |
from datetime import datetime
import json
async def get_expired_members(request):
"""
Returns a list of all members that should have finished their degree, calculated by current semester and normed time
for each members associated study programme.
:param request:
:return: List of all expired members.
"""
try:
(conn, cur) = await mysql_connect()
today = datetime.date.today()
this_year = today.year
semester_slutt = datetime.date(year=this_year, month=SEMESTER_SLUTT['month'], day=SEMESTER_SLUTT['day'])
if (semester_slutt - today).days > 0:
this_year -= 1
await cur.execute("SELECT u.user_id, u.first_name, u.last_name, u.student_email, s.name, "
"u.year_of_admission, s.length as 'normed_years' FROM user u join study_programme s on "
"u.study_programme_id = s.study_programme_id where (%s - u.year_of_admission) >= s.length "
"order by u.user_id ASC",
this_year)
r = await cur.fetchall()
return web.Response(status=200,
text=json.dumps(r, default=str),
content_type='application/json')
except MySQLError as e:
print(e)
return web.Response(status=500,
text='{"error": "%s"}' % e,
content_type='application/json')
finally:
await cur.close()
conn.close() | 7de1cbf72b30e8dba7548c4ceb17b8c53efe7f7e | 3,630,876 |
def sim_hawkes(dims, adjacency, decays, baseline, kernel_support, max_points, run_time, phi_inv, plot=False, seed=None, track_intensity=False):
"""Simulate point process using Ogata razor.
Input
- S: Settings dictionary
- plot: Indication of whether to plot events and intensities
- seed: Seed for randomization.
"""
# Initiate
t, ts = 0, [np.array([]) for d in range(dims)]
# Return intensities
tracked_intensity = [np.array([]) for d in range(dims)]
# Ogata razor: Compute upper bound for intensity, simulate exponentially distributed waiting times and accept/reject sample points
while sum(x.size for x in ts) < max_points:
# Draw candidate event time
M = upper_bound(t, ts, dims, adjacency, decays, baseline, kernel_support, phi_inv)
M = M.sum()
t += e(1/M)
if t > run_time: break
# Accept/reject sample the point
intn = linear_intensity(t, ts, dims, adjacency, decays, baseline, kernel_support, phi_inv)
if unif(M) < intn.sum():
i = np.random.choice(dims, p=intn/intn.sum())
ts[i] = np.append(ts[i], t)
if track_intensity:
tracked_intensity[i] = np.append(tracked_intensity[i], intn[i])
# Raise exception for too many points. This causes likelihood problems.
if sum(x.size for x in ts) >= max_points:
raise Exception("Max points reached before run time")
if plot:
make_plot(dims, adjacency, decays, baseline, kernel_support, run_time, ts, phi_inv)
plt.legend()
plt.show()
if track_intensity:
return ts, tracked_intensity
else: return ts | 9c412324b24426239a152336777cccdcb2b02204 | 3,630,877 |
def ADD(sum, augend, addend):
"""
args:
sum: the register where the addition result is stored.
augend: the left operand of the addition.
addend: the right operand of the addition.
function:
Performs basic addition.
"""
return TacInstruction(instructions.ADD, sum, augend, addend) | c51b6d98c88f6d6d8bb2bde8b20fddb531409f10 | 3,630,878 |
import io
def __convert__(filename):
"""Convert a python script so that it can be called by slave.
The converted file is named by appending '_converted' to the filename."""
with open(filename,'r') as f:
script = f.read()
if '#main' not in script:
raise SlaveError('Could not find #main section in {0}'.format(filename))
header, main = [s.strip() for s in script.split('#main',maxsplit=1)]
with io.StringIO() as f:
print('# Auto generated script file',file=f)
print('',file=f)
# Put back header
print(header, file=f)
print('', file=f)
# Create script function
print('# Main script function', file=f)
print('def __slave_script__(thread):', file=f)
add_pause = '#pause' not in main
for l in main.split('\n'):
print(" ",__replace__(l, add_pause), file=f)
output = f.getvalue()
return output | dc98d0c4046daed743c6b99673e1c39414707e64 | 3,630,879 |
import json
def parse_value(str_val, type):
""" Parse the string representation of a value (e.g., for use with an attribute of an XML object)
Args:
str_val (:obj:`str`): string representation of the value
type (:obj:`ValueType`): type
Returns:
:obj:`object`: Python representation of the value
"""
if type == ValueType.boolean:
return str_val.lower() == 'true' or str_val.lower() == '1'
if type == ValueType.integer:
return int(str_val)
if type == ValueType.float:
return float(str_val)
if type == ValueType.string:
return str_val
if type == ValueType.kisao_id:
return OntologyTerm(
namespace='KISAO',
id=str_val,
url='https://www.ebi.ac.uk/ols/ontologies/kisao/terms?iri=http%3A%2F%2Fwww.biomodels.net%2Fkisao%2FKISAO%23' + str_val,
)
if type == ValueType.list or type == ValueType.object or type == ValueType.any:
return json.loads(str_val)
raise NotImplementedError('Type {} is not supported'.format(type)) | 61a96db11d8bccfb05bf85f2ab022f3ba441092e | 3,630,880 |
def _ls_emr_step_logs(fs, log_dir_stream, step_id=None):
"""Yield matching step logs, optionally filtering by *step_id*.
Yields dicts with the keys:
path: path/URI of step file
step_id: step_id in *path* (must match *step_id* if set)
"""
matches = _ls_logs(fs, log_dir_stream, _match_emr_step_log_path,
step_id=step_id)
# "recency" isn't useful here; sort by timestamp, with unstamped
# log last
return sorted(matches,
key=lambda m: (m['timestamp'] is None, m['timestamp'] or '')) | 95162a0a24d3d9e7bb58224ee6f636636aa0a26f | 3,630,881 |
def propeller_icon(icon, **kwargs):
"""
Render an icon
**Tag name**:
propeller_icon
**Parameters**:
icon
Icon name. See the `Propeller docs <http://propeller.in/style/icons.php>`_ for all icons.
size
Size of the icon. Must be one of 'xs', 'sm', 'md', or 'lg'. Default: 'sm'
extra_classes
Extra CSS classes to add to the icon HTML. Optional
title
A title for the icon (HTML title attrivute). Optional
**Usage**::
{% propeller_icon icon %}
**Example**::
{% propeller_icon "star" %}
"""
return render_icon(icon, **kwargs) | 2e043741ca1cbd1a4feab0c59eb4a31b4c2314fd | 3,630,882 |
def sbasis(i, n):
"""Standard basis vector e_i of dimension n."""
arr = np.zeros(n)
arr[i] = 1.0
return arr | 9afd8f56c52e13ba59baf8f9c458fe3428e8826b | 3,630,883 |
def get_users():
"""return users in admin."""
current_app.logger.debug(u'Get all users in admin.')
return render_template('admin/users/list.html') | 29f4fa6e23bccc6da3af7d6f167178d9a181aa25 | 3,630,884 |
import struct
def decrypt(text):
"""
:param str text:
:rtype: str
"""
plain = []
for i in range(0, len(text), 2):
num, = struct.unpack('!B', text[i])
plain.append(decrypt_bets.get(num))
return ''.join(plain) | e3379909732ff0bb6dbed771cd859d460e6b2964 | 3,630,885 |
def LF_warning_signs_hypothetical(c):
"""
Check if candidate is in a list of warning signs e.g.
preceded by 'Warning Signs:'
:param c: pain-anatomy candidate
:return: -1 if True, 0 otherwise
"""
sent_spans = get_sent_candidate_spans(c)
sent = ''
for span in sent_spans:
words = span.get_parent()._asdict()['words']
sent += ' '.join(words)
v = 'warning signs :' in sent.lower()
return -1 if v else 0 | 3c03548e66ebb6c8146ec43ee9f63d3337f093d8 | 3,630,886 |
def render_upcoming_events(event_amount=5, category=None):
"""Template tag to render a list of upcoming events."""
return {
'occurrences': _get_upcoming_events(
amount=event_amount, category=category),
} | 015454a6c4a97c1a964eb540892ef1b37ff4dd68 | 3,630,887 |
from typing import List
from typing import Dict
def aggregate_collate_fn(insts: List) -> Dict[str, List[str]]:
"""aggragate the instance to the max seq length in batch.
Args:
insts: list of sample
Returns:
"""
snts, golds = [], []
for inst in insts:
snts.append(inst['snt'])
golds.append(inst['gold'])
assert len(snts) == len(golds)
return {'snts': snts, 'golds': golds} | 8d986d508fd2e5a5c91947563aec2b862ab13361 | 3,630,888 |
def isBetween(p1, p2, p, epsilon=1e-5):
""" test if p is between p1 and p2
Parameters
----------
p1 : np.array
p2 : np.array
p : np.array
epsilon : float
tolerance default 1e-5
Returns
-------
boolean
Examples
--------
>>> p1 = np.array([0,0])
>>> p2 = np.array([2,0])
>>> p = np.array([1,0])
>>> assert(isBetween(p1,p2,p)),'error'
"""
crossproduct = np.cross(p - p1, p2 - p1)
if abs(crossproduct) > epsilon:
return False
dotproduct = np.dot(p - p1, p2 - p1)
if dotproduct < 0:
return False
squaredlengthba = np.dot(p2 - p1, p2 - p1)
if dotproduct > squaredlengthba:
return False
else:
return True | 5a19f6903dfefa7340895a622c8a2a8c7d33b2f4 | 3,630,889 |
import functools
def load_val_data(batch_size, dataset_name):
"""Load Patch Camelyon val data"""
val_dataset = tfds.load(
name=dataset_name, split='validation', as_supervised=True)
val_dataset = val_dataset.map(
functools.partial(preprocess_data, is_training=False))
val_dataset = val_dataset.batch(batch_size, drop_remainder=False)
val_dataset = val_dataset.prefetch(tf.data.experimental.AUTOTUNE)
return val_dataset | 5df6916721f588db3553c7bc05e7b40f0edf6b19 | 3,630,890 |
def dictfetchall(cursor):
"""
Return all rows from a cursor as a dict.
:param cursor: a database cursor.
:return: the results from the cursor as a dictionary.
"""
columns = [col[0] for col in cursor.description]
return [dict(zip(columns, row))
for row in cursor.fetchall()] | e8c4d3d53a4d204d2f7ac95fefb08c5a77fa5e5f | 3,630,891 |
def dev_login(request):
"""docstring"""
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('dashboard:index'))
else:
return HttpResponse("ACCOUNT NOT ACTIVE")
else:
print("SOMEONE TRIED TO LOGIN AND FAILED")
print("Username: {} Password: {}".format(username, password))
return HttpResponse("invalid credentials")
else:
return render(request, "login.html", {}) | b0f4d2b1b3eb2bdefc361d85b62cadc802a15b54 | 3,630,892 |
def CheckBackwardsCompatibility(new_universe, old_universe):
"""Checks that non-abstract types are not removed or changed in new configs.
Method expects types in passed universe to have inherited_fields_expanded.
Method has the side effect of setting is_changed field on everything in this
universe that has changes except folders at the entity type level.
Args:
new_universe: EntityTypeUniverse object for the new config
old_universe: EntityTypeUniverse object for the old config
Returns:
A list of findings generated by the compatibility check.
Raises:
RuntimeError: if fields are not expanded for any types
"""
# for every non-abstract type in the old universe, there should be a
# corresponding type with the same fields in the new universe.
old_ns_map = old_universe.type_namespaces_map
new_ns_map = new_universe.type_namespaces_map.copy()
findings = []
for ns_name in old_ns_map:
old_ns = old_ns_map[ns_name]
if ns_name not in new_ns_map:
old_types = list(old_ns.valid_types_map.keys())
for old_type_name in old_types:
if old_ns.valid_types_map[old_type_name].is_abstract:
continue
context = findings_lib.FileContext(
old_universe.namespace_folder_map[ns_name].GetFolderpath())
finding = findings_lib.RemovedNamespaceWarning(context, ns_name,
list(old_types))
new_universe.AddFinding(finding)
findings.append(finding)
break
continue
# Remove namespace from new ns map so when we're done we'll only have newly
# created namespaces left in it.
new_ns = new_ns_map.pop(ns_name)
new_ns_types = new_ns.valid_types_map.copy()
for type_name in old_ns.valid_types_map:
old_type = old_ns.valid_types_map[type_name]
if old_type.uid:
new_type_uid_entry = new_universe.type_ids_map.get(old_type.uid)
if new_type_uid_entry:
if (new_type_uid_entry.namespace == ns_name and
new_type_uid_entry.typename == type_name):
new_type = new_ns_types.pop(type_name)
else:
new_type = new_universe.GetEntityType(new_type_uid_entry.namespace,
new_type_uid_entry.typename)
else:
# type has been removed
if not old_type.is_abstract:
finding = findings_lib.RemovedTypeWarning(old_type)
new_ns.AddFinding(finding)
findings.append(finding)
continue
elif type_name not in new_ns_types:
if not old_type.is_abstract:
finding = findings_lib.RemovedTypeWarning(old_type)
new_ns.AddFinding(finding)
findings.append(finding)
continue
else:
new_type = new_ns_types.pop(type_name)
# Check to appease python type static analyzer
if new_type is None:
raise RuntimeError('new_type should never be None at this point.')
old_fields = old_type.GetAllFields()
new_fields = new_type.GetAllFields()
if old_fields == new_fields:
if (new_type.description != old_type.description or
new_type.typename != old_type.typename or
new_type.is_abstract != old_type.is_abstract or
new_type.is_canonical != old_type.is_canonical):
new_type.SetChanged()
new_ns.SetChanged()
continue
new_type.SetChanged()
new_ns.SetChanged()
new_universe.namespace_folder_map[new_ns.namespace].SetChanged()
if old_type.is_abstract:
continue
# Check added fields
for field in old_fields:
if field in new_fields:
new_fields.pop(field)
continue
finding = findings_lib.RemovedFieldWarning(new_type, field)
new_type.AddFinding(finding)
findings.append(finding)
for field in new_fields:
if new_fields[field].optional:
continue
finding = findings_lib.AddedFieldWarning(new_type, field)
new_type.AddFinding(finding)
findings.append(finding)
for new_type in new_ns_types.values():
new_type.SetChanged()
# Mark anything new as changed
for ns_name in new_ns_map:
new_ns = new_ns_map[ns_name]
new_ns.SetChanged()
new_universe.namespace_folder_map[new_ns.namespace].SetChanged()
for new_type in new_ns.valid_types_map.values():
new_type.SetChanged()
return findings | a1ec6de3062926c0e08789d4d937c3b132b9aacc | 3,630,893 |
def pgram(N, years, fname):
"""
Calculate periodograms of LSST light curves.
"""
ps = np.linspace(2, 100, 1000) # the period array (in days)
print("Computing periodograms")
# Now compute LS pgrams for a set of LSST light curves & save highest peak
ids = np.arange(N)
periods = np.zeros_like(ids)
for i, id in enumerate(ids):
sid = str(int(id)).zfill(4)
x, y, yerr = np.genfromtxt("simulations/{0}/{1}.txt".format(fname,
sid)).T
m = x < years * 365.25
xt, yt, yerrt = x[m], y[m], yerr[m][m]
model = LombScargle().fit(xt, yt, yerrt) # compute pgram
pgram = model.periodogram(ps)
# find peaks
peaks = np.array([j for j in range(1, len(ps)-1) if pgram[j-1] <
pgram[j] and pgram[j+1] < pgram[j]])
if len(peaks):
period = ps[pgram == max(pgram[peaks])][0]
else:
period = 0
periods[i] = period
data = np.vstack((ids, periods))
f = open("results/{0}_{1}yr_results.txt".format(fname, years), "a")
np.savetxt(f, data.T)
f.close()
return periods | cfbaef2650f025a50b3735d3db39d638242a6175 | 3,630,894 |
import requests
def check_version(version, server_url, client='cli'):
"""
Check if the current version of the client software is supported by the One Codex
backend. Returns a tuple with two values:
- True if the user *must* upgrade their software, otherwise False
- An error message if the user should upgrade, otherwise None.
"""
def version_inadequate(client_version, server_version):
"""
Simple, fast check for version inequality.
Could use python package `semver` if we need more precise checks in
edge cases, but this generally works for now.
"""
return tuple(client_version.split('.')) < tuple(server_version.split('.'))
if client == 'cli':
data = requests.post(server_url + 'api/v0/check_for_cli_update', data={'version': version})
elif client == 'gui':
data = requests.post(server_url + 'api/v0/check_upload_app_version',
data={'version': version})
else:
raise Exception('Not a valid client descriptor')
if data.status_code != 200:
return False, 'Error connecting to server'
data = data.json()
latest_version = data['latest_version']
if client == 'cli':
uploader_text = ' from http://www.onecodex.com/uploader.html'
else:
uploader_text = (' from the '
'<a href="http://www.onecodex.com/uploader.html">One Codex website</a>')
# TODO: once the cli route returns this, remove this outer check
if 'min_supported_version' in data:
min_version = data['min_supported_version']
if version_inadequate(version, min_version):
return True, ('Please upgrade your client to the latest version ' +
'(v{}){}; '.format(latest_version, uploader_text) +
'this version (v{}) is no longer supported.'.format(version))
if version_inadequate(version, latest_version):
return False, ('Please upgrade your client to the latest version ' +
'(v{}){}'.format(latest_version, uploader_text))
return False, None | 98aa8ba12a26609610c705a735a962c474c1321e | 3,630,895 |
def get_layout(data, width_limit):
"""A row of a chart can be dissected as four components below:
1. Label region ('label1'): fixed length (set to max label length + 1)
2. Intermediate region (' | '): 3 characters
3. Bar region ('▇ or '): variable length
This function first calculates the width of label region(1),
and compute the longest of the lengths of bar(3) regions.
Then returns the layout of the chart, which is described by the widths of
each regions.
The total widths of the chart will not exceed width_limit-15 characters, just for an
aesthetic reason.
"""
labels = [d[0] for d in data]
label_width = len(max(labels, key=lambda label: len(label))) + 1
intermediate_width = 3
bar_width = (width_limit - 15) - (label_width + intermediate_width)
return label_width, bar_width | dbb8bfa2c537f3b05713bf3abdc106ec74bc7ac9 | 3,630,896 |
import os
def get_wordlist(seed):
"""
Takes a seed value from possible_seeds and generates a wordlist from the
thesaurus files.
Args:
seed - a string, one of possible_seeds
Returns:
a list of words for flag values
"""
word_list = []
tmp_list = []
word_string = ""
with open (os.path.join(wordlist_dir, seed + "_thesaurus"), "r") as f:
word_string = f.read()
tmp_list = word_string.split(",")
# the extra list isn't really necessary, i'm just doing it for clarity
# and because java and c have poisoned my mind
for word in tmp_list:
word_list.append(word.strip())
return word_list | 776d2760aeeb0e9efb74126e4864d34ed6d4268a | 3,630,897 |
def add_stats(all_data, yesterday_date_str, rows, key):
""" Given the entire registration report, augment a particular slice
of the data with male/female/total counts and label for each row.
The input rows are dictionaries with entries containing male and
female registrations by date, and total registrations by age group
(as well as country, region, and a few other data points).
On output, the row will gain:
- label for spreadsheet row and whether or not to translate it
- Male/Female/Total counts for total, yesterday, and penultimate day
On input:
all_data -- registration report
yesterday_date_str -- "yesterday"
rows -- a copy of a slice of all_data (e.g., by office, or by subconstituency),
which will be augmented with counts and labels
key -- the key in all_data which yields the desired slice
"""
dates = all_data['dates']
penultimate_day = dates[-2] if len(dates) >= 2 else None
result = []
for row in rows:
row['label'], row['label_translated'] = label(all_data, row[key], key)
m = sum([row.get(d, [0, 0])[0] for d in dates])
f = sum([row.get(d, [0, 0])[1] for d in dates])
row['total'] = [m + f, m, f]
if dates[0] != dates[-1]:
if yesterday_date_str in row:
m = row[yesterday_date_str][0]
f = row[yesterday_date_str][1]
row['yesterday'] = [m + f, m, f]
else:
row['yesterday'] = [0, 0, 0]
if penultimate_day in row:
m = row[penultimate_day][0]
f = row[penultimate_day][1]
row['penultimate_day'] = [m + f, m, f]
else:
row['penultimate_day'] = [0, 0, 0]
else:
row['yesterday'] = row['penultimate_day'] = [0, 0, 0]
result.append(row)
return result | b49154bce300a601d27e2f7d9fa54899da858472 | 3,630,898 |
import nose
import re
def assert_raises_regex(exception_class, expected_regexp,
callable_obj=None, *args, **kwargs):
"""
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Name of this function adheres to Python 3.2+ reference, but should work in
all versions down to 2.6.
"""
__tracebackhide__ = True # Hide traceback for py.test
global assert_raises_regex_impl
if assert_raises_regex_impl is None:
try:
# Python 3.2+
assert_raises_regex_impl = nose.tools.assert_raises_regex
except AttributeError:
try:
# 2.7+
assert_raises_regex_impl = nose.tools.assert_raises_regexp
except AttributeError:
# 2.6
# This class is copied from Python2.7 stdlib almost verbatim
class _AssertRaisesContext(object):
def __init__(self, expected, expected_regexp=None):
self.expected = expected
self.expected_regexp = expected_regexp
def failureException(self, msg):
return AssertionError(msg)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
def impl(cls, regex, callable_obj, *a, **kw):
mgr = _AssertRaisesContext(cls, regex)
if callable_obj is None:
return mgr
with mgr:
callable_obj(*a, **kw)
assert_raises_regex_impl = impl
return assert_raises_regex_impl(exception_class, expected_regexp,
callable_obj, *args, **kwargs) | f61a5145c62b10d17db3fe4dc380935fb2c1395e | 3,630,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.