content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def mahalanobis(data, produce=None):
"""
Calculate mahalanobis distance on a matrix of column vectors.
Assumes that rows are observations and columns are features.
Parameters
----------
data : numpy array or pandas dataframe
The data to calculate distances on (columns are variables, rows are
observations).
produce : str, optional
Variation of the output to produce, either `squared`, `leverage',
or `sqrt` (None). The default is None.
Returns
-------
numpy array
Array containing the distances.
"""
arr = np.array(data).reshape(data.shape[0], -1)
cent = arr - arr.mean(axis=0)
covmat = np.cov(cent, rowvar=False)
invcov = None
if arr.shape[1] == 1:
invcov = 1/covmat
else:
try:
invcov = np.linalg.inv(covmat)
except np.linalg.LinAlgError:
invcov = np.linalg.pinv(covmat)
md2 = np.sum(cent.dot(invcov) * cent, axis=1)
if produce == "squared":
return md2
elif produce == "leverage":
n = data.shape[0]
return ((md2/(n - 1)) + (1/n))
else:
return np.sqrt(md2)
|
b6dff6cfe12b4c44b6a97a6bd1f51a2250b7b63f
| 3,647,000
|
def text(el):
"""
Helper to get the text content of a BeautifulSoup item
"""
return el.get_text().strip()
|
7b34c77c79677a73cc66532fe6305635b1bdac43
| 3,647,001
|
def collect_DAC_pow(dig, IF_freq):
"""TODO: Desciption what I, the function, do"""
return external_ATS9870_CS_VNA.collect_amp(dig, IF_freq)
|
42f649520b950357419c6c26d3d5426849855929
| 3,647,002
|
def get_sha512_manifest(zfile):
"""
Get MANIFEST.MF from a bar file.
:param zfile: Open (!!!) ZipFile instance.
:type zfile: zipfile.ZipFile
"""
names = zfile.namelist()
manifest = None
for name in names:
if name.endswith("MANIFEST.MF"):
manifest = name
break
if manifest is None:
raise SystemExit
return manifest
|
7ef150bb3e89f8723649ee983085a413ec8a31df
| 3,647,003
|
def plot_heatmap(filename, xdata, ydata, binx, biny, title = None, xlabel = None, ylabel = None, dpi = 150, figsize = (10,10), tfont = 17, lfont = 14):
"""
Present variables as a 2D heatmap
to correlate magnitude and direction.
"""
def get_bin_id(mybins, vv):
for ibin in range(len(mybins)-1):
if vv >= mybins[ibin] and vv < mybins[ibin+1]:
return ibin + 1
return 0
total = len(xdata)
if total == 0:
print('Not enough data to produce heatmap, exiting...')
return
nx, nxbins = np.histogram(xdata, bins = binx)
ny, nybins = np.histogram(ydata, bins = biny)
temp_x = np.zeros(total)
temp_y = np.zeros(total)
for ij in range(total):
temp_x[ij] = get_bin_id(nxbins, xdata[ij])
temp_y[ij] = get_bin_id(nybins, ydata[ij])
table2d = np.zeros((len(nybins)-1,len(nxbins)-1))
for ij in range(len(temp_x)):
table2d[int(temp_y[ij])-1, int(temp_x[ij])-1] += 1
x_labels = []
y_labels = []
for ij in range(len(nxbins)-1):
x_labels.append('{:.2f}'.format(0.5*(nxbins[ij] + nxbins[ij+1])))
for ij in range(len(nybins)-1):
y_labels.append('{:.1f}'.format(0.5*(nybins[ij] + nybins[ij+1])))
fig, ax = plt.subplots()
fig.set_size_inches(figsize[0], figsize[1])
im = ax.imshow(table2d)
# We want to show all ticks...
ax.set_xticks(np.arange(len(x_labels)))
ax.set_yticks(np.arange(len(y_labels)))
# ... and label them with the respective list entries
ax.set_xticklabels(x_labels)
ax.set_yticklabels(y_labels)
if title:
ax.set_title(title, fontsize = tfont)
if ylabel:
ax.set_ylabel(ylabel, fontsize = lfont)
if xlabel:
ax.set_xlabel(xlabel, fontsize = lfont)
ylims = ax.get_yticks()
rr = ylims[1] - ylims[0]
ax.set_ylim(ylims[0] - rr/2., ylims[-1] + rr/2.)
cfont = max([8, lfont-2])
ax.tick_params(axis = 'both', which = 'major', labelsize = cfont)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
for i in range(len(nxbins)-1):
for j in range(len(nybins)-1):
text = ax.text(i, j, int(100.0*table2d[j, i]/total), ha="center", va="center", color="w")
fig.tight_layout()
if isinstance(filename, list):
for item in filename:
fig.savefig(item, dpi = dpi)
else:
fig.savefig(filename, dpi = dpi)
plt.close()
return 0
|
3397bf2fc02932056411ef8addde264fa50b9ea5
| 3,647,004
|
def cmake_var_string(cmake_vars):
"""Converts a dictionary to an input suitable for expand_cmake_vars.
Ideally we would jist stringify in the expand_cmake_vars() rule, but select()
interacts badly with genrules.
TODO(phawkins): replace the genrule() with native rule and delete this rule.
Args:
cmake_vars: a dictionary with string keys and values that are convertable to
strings.
"""
return " ".join([_quote("{}={}".format(k, str(v)))
for (k, v) in cmake_vars.items()])
|
3f0fb115c54f6ee1e0e923b67412e36ca56b2ea7
| 3,647,005
|
def scattering_transform1d(n_classes, sequence_length):
""" Scattering transform
"""
log_eps = 1e-6
x_in = layers.Input(shape=(sequence_length))
x = Scattering1D(8, 12)(x_in)
x = layers.Lambda(lambda x: x[..., 1:, :])(x)
x = layers.Lambda(lambda x: tf.math.log(tf.abs(x) + log_eps))(x)
x = layers.GlobalAveragePooling1D(data_format='channels_first')(x)
x = layers.BatchNormalization(axis=1)(x)
x_out = layers.Dense(n_classes, activation='softmax')(x)
model = tf.keras.models.Model(x_in, x_out)
return model
|
53547918c5a0efa5c0e3766c770903b146eff19e
| 3,647,006
|
import zlib
def addFileContent(session, filepath, source_file_name, content_hash,
encoding):
"""
Add the necessary file contents. If the file is already stored in the
database then its ID returns. If content_hash in None then this function
calculates the content hash. Or if is available at the caller and is
provided then it will not be calculated again.
This function must not be called between addCheckerRun() and
finishCheckerRun() functions when SQLite database is used! addCheckerRun()
function opens a transaction which is closed by finishCheckerRun() and
since SQLite doesn't support parallel transactions, this API call will
wait until the other transactions finish. In the meantime the run adding
transaction times out.
"""
source_file_content = None
if not content_hash:
source_file_content = get_file_content(source_file_name, encoding)
hasher = sha256()
hasher.update(source_file_content)
content_hash = hasher.hexdigest()
file_content = session.query(FileContent).get(content_hash)
if not file_content:
if not source_file_content:
source_file_content = get_file_content(source_file_name, encoding)
try:
compressed_content = zlib.compress(source_file_content,
zlib.Z_BEST_COMPRESSION)
fc = FileContent(content_hash, compressed_content)
session.add(fc)
session.commit()
except sqlalchemy.exc.IntegrityError:
# Other transaction moght have added the same content in
# the meantime.
session.rollback()
file_record = session.query(File) \
.filter(File.content_hash == content_hash,
File.filepath == filepath) \
.one_or_none()
if not file_record:
try:
file_record = File(filepath, content_hash)
session.add(file_record)
session.commit()
except sqlalchemy.exc.IntegrityError as ex:
LOG.error(ex)
# Other transaction might have added the same file in the
# meantime.
session.rollback()
file_record = session.query(File) \
.filter(File.content_hash == content_hash,
File.filepath == filepath) \
.one_or_none()
return file_record.id
|
fdd77f23151ed9627c5d9bbfb157839810c9655a
| 3,647,007
|
def model_fn_builder(num_labels, learning_rate, num_train_steps,
num_warmup_steps):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)
# TRAIN and EVAL
if not is_predicting:
(loss, predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
train_op = bert.optimization.create_optimizer(
loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)
# Calculate evaluation metrics.
def metric_fn(label_ids, predicted_labels):
accuracy = tf.metrics.accuracy(label_ids, predicted_labels)
f1_score = tf.contrib.metrics.f1_score(
label_ids,
predicted_labels)
auc = tf.metrics.auc(
label_ids,
predicted_labels)
recall = tf.metrics.recall(
label_ids,
predicted_labels)
precision = tf.metrics.precision(
label_ids,
predicted_labels)
true_pos = tf.metrics.true_positives(
label_ids,
predicted_labels)
true_neg = tf.metrics.true_negatives(
label_ids,
predicted_labels)
false_pos = tf.metrics.false_positives(
label_ids,
predicted_labels)
false_neg = tf.metrics.false_negatives(
label_ids,
predicted_labels)
return {
"eval_accuracy": accuracy,
"f1_score": f1_score,
"auc": auc,
"precision": precision,
"recall": recall,
"true_positives": true_pos,
"true_negatives": true_neg,
"false_positives": false_pos,
"false_negatives": false_neg
}
eval_metrics = metric_fn(label_ids, predicted_labels)
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op)
else:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops=eval_metrics)
else:
(predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
predictions = {
'probabilities': log_probs,
'labels': predicted_labels
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Return the actual model function in the closure
return model_fn
|
570f5297fbcc57eaae1d08e9ee816207db707ffd
| 3,647,008
|
def to_numeric_df(kdf):
"""
Takes a dataframe and turns it into a dataframe containing a single numerical
vector of doubles. This dataframe has a single field called '_1'.
TODO: index is not preserved currently
:param df:
:return: a pair of dataframe, list of strings (the name of the columns
that were converted to numerical types)
"""
# TODO, it should be more robust.
accepted_types = {np.dtype(dt) for dt in [np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64, np.bool_]}
numeric_fields = [fname for fname in kdf._metadata.column_fields
if kdf[fname].dtype in accepted_types]
numeric_df = kdf._sdf.select(*numeric_fields)
va = VectorAssembler(inputCols=numeric_fields, outputCol="_1")
v = va.transform(numeric_df).select("_1")
return v, numeric_fields
|
5eba4585ca55360bfff959b4f46580cc747e3f93
| 3,647,009
|
def FancyAnalyzer(expression=r"\s+", stoplist=STOP_WORDS, minsize=2,
maxsize=None, gaps=True, splitwords=True, splitnums=True,
mergewords=False, mergenums=False):
"""Composes a RegexTokenizer with an IntraWordFilter, LowercaseFilter, and
StopFilter.
>>> ana = FancyAnalyzer()
>>> [token.text for token in ana(u"Should I call getInt or get_real?")]
[u"should", u"call", u"getInt", u"get", u"int", u"get_real", u"get", u"real"]
:param expression: The regular expression pattern to use to extract tokens.
:param stoplist: A list of stop words. Set this to None to disable
the stop word filter.
:param minsize: Words smaller than this are removed from the stream.
:param maxsize: Words longer that this are removed from the stream.
:param gaps: If True, the tokenizer *splits* on the expression, rather
than matching on the expression.
"""
ret = RegexTokenizer(expression=expression, gaps=gaps)
iwf = IntraWordFilter(splitwords=splitwords, splitnums=splitnums,
mergewords=mergewords, mergenums=mergenums)
lcf = LowercaseFilter()
swf = StopFilter(stoplist=stoplist, minsize=minsize)
return ret | iwf | lcf | swf
|
50fddbbdc22770b3a9b732bb328bf48c0407aafe
| 3,647,010
|
def find_res_shift(x_min, x_max, y_min, y_max, z_min, z_max, target_id, my_sites, res_two_three_dict, my_mols, color_list, button_list):
"""Function to find the relavant residue shifts"""
print "FINDING MAX SHIFTS"
max_shift = []
# Get the delta value
delta = 5.0
# Filter residues to the ones within 1.0 A of any molecule AND then sort by size
tot_res = Residue.objects.filter(target_id=target_id)
if x_max:
criterion1 = Q(x_max__gte=x_max + delta)
criterion2 = Q(x_max__gte=x_min + delta)
near_res = tot_res.exclude(criterion1 & criterion2)
criterion1 = Q(x_min__lte=x_max - delta)
criterion2 = Q(x_min__lte=x_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
criterion1 = Q(y_max__gte=y_max + delta)
criterion2 = Q(y_max__gte=y_min + delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now do y_min
criterion1 = Q(y_min__lte=y_max - delta)
criterion2 = Q(y_min__lte=y_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now do Z
# First Z_max
criterion1 = Q(z_max__gte=z_max + delta)
criterion2 = Q(z_max__gte=z_min + delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now Z min
criterion1 = Q(z_min__lte=z_max - delta)
criterion2 = Q(z_min__lte=z_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
near_res = set(near_res.filter().values_list("res_name", "res_num"))
else:
tot_near_res = []
tot_res_d = {}
for my_site in my_sites:
criterion1 = Q(x_max__gte=my_site.x_max + delta)
criterion2 = Q(x_max__gte=my_site.x_min + delta)
near_res = tot_res.exclude(criterion1 & criterion2)
criterion1 = Q(x_min__lte=my_site.x_max - delta)
criterion2 = Q(x_min__lte=my_site.x_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
criterion1 = Q(y_max__gte=my_site.y_max + delta)
criterion2 = Q(y_max__gte=my_site.y_min + delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now do y_min
criterion1 = Q(y_min__lte=my_site.y_max - delta)
criterion2 = Q(y_min__lte=my_site.y_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now do Z
# First Z_max
criterion1 = Q(z_max__gte=my_site.z_max + delta)
criterion2 = Q(z_max__gte=my_site.z_min + delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now Z min
criterion1 = Q(z_min__lte=my_site.z_max - delta)
criterion2 = Q(z_min__lte=my_site.z_min - delta)
near_res = near_res.exclude(criterion1 & criterion2)
# Now we get the near res for this site
near_res = set(near_res.filter().values_list("res_name", "res_num"))
for res in near_res:
if res in tot_res_d:
tot_res_d[res].append(my_site.pk)
else:
tot_res_d[res] = [my_site.pk]
tot_near_res.extend(list(near_res))
near_res = tot_near_res
print "Getting clusters"
my_res = ResShift.objects.filter(target_id=target_id, res_name__in=[x[0] for x in near_res], res_num__in=[x[1] for x in near_res])
# Only find those close to the BOX / main
out_res_d = {}
for i, val in enumerate(sorted(my_res.values_list("max_shift", "res_name", "pk", "res_num"),reverse=True)):
my_mol = Molecule()
# Define the site the residues are in
res_hash = (val[1], val[3])
if res_hash in tot_res_d:
my_mol.sites = " ".join(["SITE"+ str(x) for x in tot_res_d[res_hash]])
#my_mol.my_list = [(x[0]) for x in sorted(ResShift.objects.filter(target_id=target).values_list("max_shift"),reverse=True)[:5]]
if val[1] in res_two_three_dict:
this_res_name = res_two_three_dict[val[1]]
else:
this_res_name = "UNI"
my_mol.res = "^" + this_res_name + str(val[3])
out_res_d[my_mol.res] = {}
my_mol.my_name = val[1] + ": " + str(val[3])
my_mol.shift = val[0]
my_mol.button = button_list[i % len(button_list)]
my_mol.bg = color_list[i % len(color_list)]
my_mol.res_cl = {}
# Now get how the molecules rank on this residue move
# instead we want to go trhrough molecules
my_mol.my_list = []
# Now colour the clusters
for item in my_mols:
this_res = tot_res.filter(res_name=val[1], res_num=val[3],
prot_id__molecule=item)
if len(this_res) ==0:
new_mol = Molecule()
# Get the PK from here
new_mol.pk = item.pk
new_mol.shift = 0.0
new_mol.colour = ""
out_res_d[my_mol.res][item.prot_id.code] = ""
my_mol.my_list.append(new_mol)
elif len(this_res) == 1:
this_res = this_res[0]
new_mol = Molecule()
# Get the PK from here
new_mol.pk = item.pk
new_mol.shift = this_res.max_shift
new_mol.clus_id = "RESCL" + str(this_res.clust_id) + "_" + val[1] + "_" + str(val[3])
my_mol.res_cl["RESCL" + str(this_res.clust_id) + "_" + val[1] + "_" + str(val[3])] = [color_list[this_res.clust_id % len(color_list)], button_list[this_res.clust_id % len(button_list)]]
new_mol.colour = color_list[this_res.clust_id % len(color_list)]
out_res_d[my_mol.res][this_res.prot_id.code] = button_list[this_res.clust_id % len(button_list)]
my_mol.my_list.append(new_mol)
else:
print "ERROR MORE THAN ONE MOLS"
# Now append this guy to the list
max_shift.append(my_mol)
return json.dumps(out_res_d), max_shift
|
d46a146071f5cd48ab1382d03ac4678cc2c301fd
| 3,647,011
|
def lookup(*getters):
"""Find data by provided parameters and group by type respectively"""
getters = list(reversed(getters))
def wrap(struct):
while getters:
_type, getter = getters.pop()
if _type == G_TYPE_KEY:
struct = getter(struct)
continue
if _type == G_TYPE_ARR:
n_getters = list(reversed(getters))
return [lookup(*n_getters)(elem) for elem in getter(struct)]
return struct
return wrap
|
937a44e8366016cb136f0b40a91448b97c52357d
| 3,647,012
|
def compute_one(t, lhs, rhs, **kwargs):
""" Join two pandas data frames on arbitrary columns
The approach taken here could probably be improved.
To join on two columns we force each column to be the index of the
dataframe, perform the join, and then reset the index back to the left
side's original index.
"""
result = pd.merge(lhs, rhs,
left_on=t.on_left, right_on=t.on_right,
how=t.how)
return result.reset_index()[t.columns]
|
c050fdeae2e354be3748984a32ad96b81593355b
| 3,647,013
|
def simulate(mat, det, e0=20.0, dose=defaultDose, withPoisson=True, nTraj=defaultNumTraj, sf=defaultCharFluor, bf=defaultBremFluor, xtraParams=defaultXtraParams):
"""simulate(mat,det,[e0=20.0],[withPoisson=True],[nTraj=defaultNumTraj],[dose=defaultDose],[sf=defaultCharFluor],[bf=defaultBremFluor],[xtraParams=defaultXtraParams])
Simulate a bulk spectrum for the material mat on the detector det at beam energy e0 (in keV). If \
sf then simulate characteristic secondary fluorescence. If bf then simulate bremsstrahlung secondary \
fluorescence. nTraj specifies the number of electron trajectories. dose is in nA*sec."""
mat = dtsa2.material(mat)
if not isinstance(mat, epq.Material):
print u"Please provide a material with a density - %s" % mat
tmp = u"MC simulation of bulk %s at %0.1f keV%s%s" % (mat, e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
print tmp
res = base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildBulk, { "Material" : mat }, xtraParams)
res.getProperties().setCompositionProperty(epq.SpectrumProperties.StandardComposition, mat)
return res
|
5ffdf63038fa2ba4305001f1b1ec5da0c13ebf3d
| 3,647,014
|
def link_match_family(link, family_name):
"""Checks whether the a link can be used in a given family.
When this function is used with built-in family names, it tests whether the link name can be
used with the given built-in family. If the family name is not known, we return True because
the user is working with a custom ``Family`` object.
Which links can work with which families are taken from statsmodels.
"""
if family_name in FAMILY_LINKS:
return link in FAMILY_LINKS[family_name]
# Custom family, we don't know what link functions can be used
return True
|
7d95556b5ff6537bc994d7b017263ced13d4efc0
| 3,647,015
|
import tqdm
def auc(test_set, user_factors, subreddit_factors, subreddits, users):
"""
Returns the auc score on a test data set
"""
num_users = len(test_set)
total = 0
# treat the signal as 1 as per the implicit bpr paper
for subreddit, user, signal in tqdm.tqdm_notebook(test_set): # outer summation
# inner summation
# TODO: try to parallelize
u = users_index[user]
i = subreddits_index[subreddit]
x_ui = user_factors[u].dot(subreddit_factors[i])
js = []
for j in range(0, num_subreddits):
if j != i and j not in E_u[u]:
js.append(j)
total += np.sum(np.heaviside(x_ui - user_factors[u].dot(subreddit_factors[js].T), 0)) / len(js)
# for j in range(0, subreddits):
# numel = 0
# total_user = 0
# if j != i and j not in E_u[u]:
# numel += 1
# x_uj = user_factors[u].dot(subreddit_factors[j])
# total_user += heaviside(x_ui - x_uj)
# total += (total_user * 1.0 / numel)
return total / num_users
|
93179ede0fb84e7f491f147d1a356036d2908a2f
| 3,647,016
|
def convert_AST_to_expr(ast):
"""Creates expression from the AST."""
converter = ASTToInstrBlockConverter()
instrs = converter.my_visit(ast)
return instrs[0]
|
b4dca77c48cd0001a2f55c71a077a6b195a181ce
| 3,647,017
|
import time
def add_data_from_api(service, repo, variable_type, keys):
"""Retrieves Github API data. Utilizes the function from github_api/github.py to do so.
This function adds the retrieved variables directly to the data dictionary.
Args:
service (Service): Service object with API connection and metadata vars
repo (Repo) : Repository variables bundled together
variable_type (string): which type of variable should be retrieved.
Supported are: contributors, languages, readmes
keys (list): A list of the keys for the retrieved data
Returns:
boolean: Whether the request was successful or not.
In case of unsuccessful request, skip repository
"""
# for nested data only, otherwise key can be directly used
if variable_type in ("contributors", "languages"):
data[variable_type] = []
retrieved_data = get_data_from_api(service, repo, variable_type, verbose=False)
if retrieved_data is not None:
if variable_type in ("contributors", "languages"):
for entry in retrieved_data:
data[variable_type].append(dict(zip(keys, entry[1:])))
elif variable_type == "readmes":
data[keys[0]] = retrieved_data[1]
else:
return False
time.sleep(2)
return True
|
32361d85fb92efd03b79f74f8db2e02a8fcd9866
| 3,647,018
|
def part1(data):
"""
>>> part1(read_input())
0
"""
return data
|
1482c41b112a3e74775e71c4aabbd588de2b6553
| 3,647,019
|
import torch
def get_rectanguloid_mask(y, fat=1):
"""Get a rectanguloid mask of the data"""
M = y.nonzero().max(0)[0].tolist()
m = y.nonzero().min(0)[0].tolist()
M = [min(M[i] + fat, y.shape[i] - 1) for i in range(3)]
m = [max(v - fat, 0) for v in m]
mask = torch.zeros_like(y)
mask[m[0] : M[0], m[1] : M[1], m[2] : M[2]] = 1
return mask
|
0ff3ab25f2ab109eb533c7e4fafd724718dbb986
| 3,647,020
|
import re
def colorize_output(output):
"""Add HTML colors to the output."""
# Task status
color_output = re.sub(r'(ok: [-\w\d\[\]]+)',
r'<font color="green">\g<1></font>',
output)
color_output = re.sub(r'(changed: [-\w\d\[\]]+)',
r'<font color="orange">\g<1></font>',
color_output)
if not re.search(r'failed: 0', color_output):
color_output = re.sub(r'(failed: [-\w\d\[\]]+)',
r'<font color="red">\g<1></font>',
color_output)
color_output = re.sub(r'(fatal: [-\w\d\[\]]+):',
r'<font color="red">\g<1></font>',
color_output)
# Play recap
color_output = re.sub(r'(ok=[\d]+)',
r'<font color="green">\g<1></font>',
color_output)
color_output = re.sub(r'(changed=[\d]+)',
r'<font color="orange">\g<1></font>',
color_output)
color_output = re.sub(r'(failed=[1-9][0-9]*)',
r'<font color="red">\g<1></font>',
color_output)
return color_output
|
80759da16262d850b45278faede4b60b7aa4a7c6
| 3,647,021
|
import argparse
def parse_args():
"""Parse command-line arguments"""
parser = argparse.ArgumentParser(
description='Stop a subjective evaluation without ' +
'destroying resources')
parser.add_argument('--aws_api_key', help='The public API key for AWS')
parser.add_argument(
'--aws_api_secret_key',
help='The private API key for AWS')
parser.add_argument('--heroku_api_key', help='The API key for Heroku')
parser.add_argument(
'--mysql_local_user',
help='The username of the local MySQL database')
parser.add_argument(
'--mysql_local_password',
help='The corresponding password of the local MySQL database')
return parser.parse_args()
|
661a9bdec94b88c06f6d4080ef20cc31f81901ff
| 3,647,022
|
def parse_user_date(usr_date: str) -> date:
"""
Parses a user's date input, prompts the user to input useful date data if user's date was
invalid
Args:
usr_date : str, user input of date info. Should be in <yyyy/mm/dd> format
Returns:
valid datetime.date() object
"""
expected_len = len("yyyy/mm/dd")
if usr_date is None:
return prompt_user_date()
try:
dt_list = usr_date[0:expected_len].split("/")
# Ensure right number of fields
if len(dt_list) >= 3:
try:
# Ensure year is long enough to be useful
if len(dt_list[0]) == 4:
year = int(dt_list[0])
else:
raise BreakoutError()
# set rest of info
month = int(dt_list[1])
day = int(dt_list[2])
# deal with bad user characters
except ValueError:
raise BreakoutError()
# create date if user isn't a dingus
calendar_date = date(year, month, day)
else:
raise BreakoutError()
except BreakoutError:
# Make user give us a useful date if they are a dingus
calendar_date = prompt_user_date()
return calendar_date
|
10becdce6ef4fdc5606ce110b09e102c186dfc04
| 3,647,023
|
def up_sampling_block(x, n_filter, kernel_size, name,
activation='relu', up_size=(2, 2)):
"""Xception block
x => sepconv block -> sepconv block -> sepconv block-> add(Act(x)) =>
"""
x = layers.UpSampling2D(size=up_size, name=name+'up')(x)
if activation:
x = layers.Activation('relu', name=name+'_act')(x)
x = sepconv_bn_relu(x, n_filter, kernel_size, padding='same', activation=None, name=name+'_sepconv1')
return x
|
001fdb6475da138bedfdb891af6e657e5ce6160c
| 3,647,024
|
def connected_components(graph):
"""
Connected components.
@attention: Indentification of connected components is meaningful only for non-directed graphs.
@type graph: graph
@param graph: Graph.
@rtype: dictionary
@return: Pairing that associates each node to its connected component.
"""
visited = {}
count = 1
# For 'each' node not found to belong to a connected component, find its connected component.
for each in graph:
if (each not in visited):
_dfs(graph, visited, count, each)
count = count + 1
return visited
|
80c5bfc679c1dc274db6a3bf8f8becfa1fc99d4f
| 3,647,025
|
import typing
def format_keyvals(
entries: typing.Iterable[typing.Tuple[str, typing.Union[None, str, urwid.Widget]]],
key_format: str = "key",
value_format: str = "text",
indent: int = 0
) -> typing.List[urwid.Columns]:
"""
Format a list of (key, value) tuples.
Args:
entries: The list to format. keys must be strings, values can also be None or urwid widgets.
The latter makes it possible to use the result of format_keyvals() as a value.
key_format: The display attribute for the key.
value_format: The display attribute for the value.
indent: Additional indent to apply.
"""
max_key_len = max((len(k) for k, v in entries if k is not None), default=0)
max_key_len = min(max_key_len, KEY_MAX)
if indent > 2:
indent -= 2 # We use dividechars=2 below, which already adds two empty spaces
ret = []
for k, v in entries:
if v is None:
v = urwid.Text("")
elif not isinstance(v, urwid.Widget):
v = urwid.Text([(value_format, v)])
ret.append(
urwid.Columns(
[
("fixed", indent, urwid.Text("")),
(
"fixed",
max_key_len,
urwid.Text([(key_format, k)])
),
v
],
dividechars=2
)
)
return ret
|
eb1769a3d7b47b6b4f24f02dcffd3639592c8dc6
| 3,647,026
|
def get_item_workdays(scorecard):
""" Gets the number of days in this period"""
supplier = frappe.get_doc('Supplier', scorecard.supplier)
total_item_days = frappe.db.sql("""
SELECT
SUM(DATEDIFF( %(end_date)s, po_item.schedule_date) * (po_item.qty))
FROM
`tabPurchase Order Item` po_item,
`tabPurchase Order` po
WHERE
po.supplier = %(supplier)s
AND po_item.received_qty < po_item.qty
AND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s
AND po_item.parent = po.name""",
{"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date}, as_dict=0)[0][0]
if not total_item_days:
total_item_days = 0
return total_item_days
|
cec620114ae784e5c272d41b6e1028175b466691
| 3,647,027
|
def load_data(ticker='SNAP', barSizeSetting='3 mins', what='TRADES'):
"""
loads historical tick data
"""
if what == 'TRADES':
folder = '/home/nate/Dropbox/data/ib_full_adj/data/'
elif what == 'ADJUSTED_LAST':
folder = '/home/nate/Dropbox/data/ib_split_adj_only/data/'
bss = barSizeSetting.replace(' ', '_')
trades = pd.read_hdf(folder + ticker + '_trades_' + bss + '.h5')
# fill 0 volume with 1
trades.at[trades['volume'] == 0, 'volume'] = 1
bid = pd.read_hdf(folder + ticker + '_bid_' + bss + '.h5')
ask = pd.read_hdf(folder + ticker + '_ask_' + bss + '.h5')
opt_vol = pd.read_hdf(folder + ticker + '_opt_vol_' + bss + '.h5')
# drop duplicates just in case...dupes throw off concat
trades.drop_duplicates(inplace=True)
bid.drop_duplicates(inplace=True)
ask.drop_duplicates(inplace=True)
opt_vol.drop_duplicates(inplace=True)
# sometimes with dupes, index is no longer sorted
trades.sort_index(inplace=True)
bid.sort_index(inplace=True)
ask.sort_index(inplace=True)
opt_vol.sort_index(inplace=True)
# TODO: find opt_vol and other files with problems
# e.g. found BOX opt_vol file had some price data in it
# look for outliers or matches within other DFs, then delete messed up DFs
# rename columns so can join to one big dataframe
bid.columns = ['bid_' + c for c in bid.columns]
ask.columns = ['ask_' + c for c in ask.columns]
opt_vol.columns = ['opt_vol_' + c for c in opt_vol.columns]
# inner join should drop na's but just to be safe
# opt_vol has missing values at the end of each day for some reason...
# so cant do inner join or dropna
full_df = pd.concat([trades, bid, ask, opt_vol], axis=1)#, join='inner').dropna()
full_df.index = full_df.index.tz_localize('America/New_York')
return full_df
|
8ad01227131322f7780e75e1e72f89b1da3fef0b
| 3,647,028
|
def time_human(x):
""" Gets time as human readable """
# Round time
x = round(x, 2)
for number, unit in [(60, "s"), (60, "min"), (24, "h"), (365, "days")]:
if abs(x) < number:
return f"{x:.2f} {unit}"
x /= number
return f"{x:.2f} years"
|
3f7f51ac7454e429fc30da64eed075aaf1f10b5b
| 3,647,029
|
from typing import Dict
def transaction_json_to_binary_codec_form(
dictionary: Dict[str, XRPL_VALUE_TYPE]
) -> Dict[str, XRPL_VALUE_TYPE]:
"""
Returns a new dictionary in which the keys have been formatted as CamelCase and
standardized to be serialized by the binary codec.
Args:
dictionary: The dictionary to be reformatted.
Returns:
A new dictionary object that has been reformatted.
"""
# This method should be made private when it is removed from `xrpl.transactions`
return {
_key_to_tx_json(key): _value_to_tx_json(value)
for (key, value) in dictionary.items()
}
|
94516b8418fc25d1966d6f5c969f9b4e411100ab
| 3,647,030
|
def conv3x3(in_planes, out_planes, stride=1, groups=1):
"""3x3 convolution with padding"""
return nn.Conv1d(in_planes, out_planes, kernel_size=7, stride=stride,
padding=3, bias=False, groups=groups)
|
90fa7549a2ba8722edab3712bac4d3af7fb5f2f2
| 3,647,031
|
def limit_sub_bbox(bbox, sub_bbox):
"""
>>> limit_sub_bbox((0, 1, 10, 11), (-1, -1, 9, 8))
(0, 1, 9, 8)
>>> limit_sub_bbox((0, 0, 10, 10), (5, 2, 18, 18))
(5, 2, 10, 10)
"""
minx = max(bbox[0], sub_bbox[0])
miny = max(bbox[1], sub_bbox[1])
maxx = min(bbox[2], sub_bbox[2])
maxy = min(bbox[3], sub_bbox[3])
return minx, miny, maxx, maxy
|
fa5b7763b30442fba137814ac7b0336528c4540b
| 3,647,032
|
def _load_taxa_incorp_list(inFile, config):
"""Loading list of taxa that incorporate isotope.
Parameters
----------
inFile : str
File name of taxon list
config : config object
Returns
-------
{library:[taxon1, ...]}
"""
taxa = {}
with open(inFile, 'rb') as inFH:
for line in inFH:
line = line.rstrip().split('\t')
# if 1 column, using config-defined libraries
if len(line) == 1:
line = [[x,line[0]] for x in config.keys()]
else:
line = [line]
for x in line:
try:
taxa[x[0]].append(x[1])
except KeyError:
taxa[x[0]] = [x[1]]
return taxa
|
d614f2be0c5ad4fa61d1d70915428324d7af97b4
| 3,647,033
|
def get_subsections(config: Config) -> t.List[t.Tuple[str, t.Dict]]:
"""Collect parameter subsections from main configuration.
If the `parameters` section contains subsections (e.g. '[parameters.1]',
'[parameters.2]'), collect the subsection key-value pairs. Otherwise,
return an empty dictionary (i.e. there are no subsections).
This is useful for specifying multiple API keys for your configuration.
For example:
```
[parameters.alice]
api_key=KKKKK1
api_url=UUUUU1
[parameters.bob]
api_key=KKKKK2
api_url=UUUUU2
[parameters.eve]
api_key=KKKKK3
api_url=UUUUU3
```
"""
return [(name, params) for name, params in config['parameters'].items()
if isinstance(params, dict)] or [('default', {})]
|
0cb022fb6ae192736186a519c6ffbcf9bcfdf541
| 3,647,034
|
def calc_psi(B, rev=False):
"""Calc Flux function (only valid in 2d)
Parameters:
B (VectorField): magnetic field, should only have two
spatial dimensions so we can infer the symmetry dimension
rev (bool): since this integration doesn't like going
through undefined regions (like within 1 earth radius of
the origin for openggcm), you can use this to start
integrating from the opposite corner.
Returns:
ScalarField: 2-D scalar flux function
Raises:
ValueError: If B has <> 2 spatial dimensions
"""
# TODO: if this is painfully slow, i bet just putting this exact
# code in a cython module would make it a bunch faster, the problem
# being that the loops are in python instead of some broadcasting
# numpy type thing
B = B.slice_reduce(":")
# try to guess if a dim of a 3D field is invariant
reduced_axes = []
if B.nr_sdims > 2:
slcs = [slice(None)] * B.nr_sdims
for i, nxi in enumerate(B.sshape):
if nxi <= 2:
slcs[i] = 0
reduced_axes.append(B.crds.axes[i])
slcs.insert(B.nr_comp, slice(None))
B = B[slcs]
# ok, so the above didn't work... just nip out the smallest dim?
if B.nr_sdims == 3:
slcs = [slice(None)] * B.nr_sdims
i = np.argmin(B.sshape)
slcs[i] = 0
reduced_axes.append(B.crds.axes[i])
logger.warning("Tried to get the flux function of a 3D field. "
"I can't do that, so I'm\njust ignoring the {0} "
"dimension".format(reduced_axes[-1]))
slcs.insert(B.nr_comp, slice(None))
B = B[slcs]
if B.nr_sdims != 2:
raise ValueError("flux function only implemented for 2D fields")
comps = ""
for comp in "xyz":
if comp in B.crds.axes:
comps += comp
# ex: comps = "yz", comp_inds = [1, 2]
comp_inds = [dict(x=0, y=1, z=2)[comp] for comp in comps]
# Note: what follows says y, z, but it has been generalized
# to any two directions, so hy isn't necessarily hy, but it's
# easier to see at a glance if it's correct using a specific
# example
ycc, zcc = B.get_crds(comps)
comp_views = B.component_views()
hy, hz = comp_views[comp_inds[0]], comp_views[comp_inds[1]]
dy = ycc[1:] - ycc[:-1]
dz = zcc[1:] - zcc[:-1]
ny, nz = len(ycc), len(zcc)
A = np.empty((ny, nz), dtype=B.dtype)
if rev:
A[-1, -1] = 0.0
for i in range(ny - 2, -1, -1):
A[i, -1] = A[i + 1, -1] - dy[i] * 0.5 * (hz[i, -1] + hz[i + 1, -1])
for j in range(nz - 2, -1, -1):
A[:, j] = A[:, j + 1] + dz[j] * 0.5 * (hy[:, j + 1] + hy[:, j])
else:
A[0, 0] = 0.0
for i in range(1, ny):
A[i, 0] = A[i - 1, 0] + dy[i - 1] * 0.5 * (hz[i, 0] + hz[i - 1, 0])
for j in range(1, nz):
A[:, j] = A[:, j - 1] - dz[j - 1] * 0.5 * (hy[:, j - 1] + hy[:, j])
psi = field.wrap_field(A, B.crds, name="psi", center=B.center,
pretty_name=r"$\psi$", parents=[B])
if reduced_axes:
slc = "..., " + ", ".join("{0}=None".format(ax) for ax in reduced_axes)
psi = psi[slc]
return psi
|
80beea86346fadd7e96b82c1da6eba56bde597fd
| 3,647,035
|
def infer_printed_type(t):
"""Infer the types that should be printed.
The algorithm is as follows:
1. Replace all constant types with None.
2. Apply type-inference on the resulting type.
3. For the first internal type variable that appears, find a constant
whose type contains that variable, set that constant to print_type.
4. Repeat until no internal type variables appear.
"""
def clear_const_type(t):
if t.is_const() and not hasattr(t, "print_type"):
t.backupT = t.T
t.T = None
elif t.is_comb():
clear_const_type(t.fun)
clear_const_type(t.arg)
elif t.is_abs():
if not hasattr(t, "print_type"):
t.backup_var_T = t.var_T
t.var_T = None
clear_const_type(t.body)
def recover_const_type(t):
if t.is_const():
t.T = t.backupT
elif t.is_comb():
recover_const_type(t.fun)
recover_const_type(t.arg)
elif t.is_abs():
t.var_T = t.backup_var_T
recover_const_type(t.body)
for i in range(100):
clear_const_type(t)
type_infer(t, forbid_internal=False)
def has_internalT(T):
return any(is_internal_type(subT) for subT in T.get_tsubs())
to_replace, to_replaceT = None, None
def find_to_replace(t):
nonlocal to_replace, to_replaceT
if (t.is_zero() or t.is_one() or \
(t.is_comb('of_nat', 1) and t.arg.is_binary() and t.arg.dest_binary() >= 2)) and \
has_internalT(t.get_type()):
replT = t.get_type()
if t.is_comb():
t = t.fun
if to_replace is None or replT.size() < to_replaceT.size():
to_replace = t
to_replaceT = replT
elif t.is_const() and has_internalT(t.T):
if to_replace is None or t.T.size() < to_replaceT.size():
to_replace = t
to_replaceT = t.T
elif t.is_abs():
if has_internalT(t.var_T):
if to_replace is None or t.var_T.size() < to_replaceT.size():
to_replace = t
to_replaceT = t.var_T
find_to_replace(t.body)
elif t.is_comb():
find_to_replace(t.fun)
find_to_replace(t.arg)
find_to_replace(t)
recover_const_type(t)
if to_replace is None:
break
to_replace.print_type = True
assert i != 99, "infer_printed_type: infinite loop."
return None
|
1ad880fc92db2e64ba6ea81f7481efa99b0bd044
| 3,647,036
|
def bias_variable(shape):
"""
返回指定形状的偏置量
:param shape:
:return:
"""
b = tf.Variable(tf.constant(0.0, shape=shape))
return b
|
ff2bb945414508d1dfc1db0b028cf1feeebeb6d8
| 3,647,037
|
def drag_eqn(times,g,r):
"""define scenario and integrate"""
param = np.array([ g, r])
hinit = np.array([0.0,0.0]) # initial values (position and velocity, respectively)
h = odeint(deriv, hinit, times, args = (param,))
return h[:,0], h[:,1]
|
d79150dd894244c11fa882d62da2f33b1173c144
| 3,647,038
|
def virtual_potential_temperature_monc(theta, thref, q_v, q_cl):
"""
Virtual potential temperature.
Derived variable name: th_v_monc
Approximate form as in MONC
Parameters
----------
theta : numpy array or xarray DataArray
Potential Temperature. (K)
thref : numpy array or xarray DataArray
Reference Potential Temperature (usually 1D). (K)
q_v : numpy array or xarray DataArray
specific humidity
q_cl : numpy array or xarray DataArray
specific cloud liquid water content.
Returns
-------
theta_v: numpy array or xarray DataArray
Virtual potential temperature (K)
"""
th_v = theta + thref * (tc.c_virtual * q_v - q_cl)
if type(th_v) is xr.core.dataarray.DataArray:
th_v.name = 'th_v_monc'
return th_v
|
d4c3da0a5f4f2826edce53f610f8ba384845ebb2
| 3,647,039
|
def promote_user(username):
"""Give admin privileges from a normal user."""
user = annotator.credentials.find_one({'username': username})
if user:
if user['admin']:
flash("User {0} is already an administrator".format(username), 'warning')
else:
annotator.credentials.update_one(user, {'$set': {'admin': True}})
flash("User {0} promoted to administrator successfully".format(username), 'info')
else:
flash("Cannot promote unknown user {0} to administrator".format(username), 'warning')
return redirect(url_for('manage_users'))
|
6a938c341f152991741d35dfd1c693743c07f805
| 3,647,040
|
def slide_number_from_xml_file(filename):
"""
Integer slide number from filename
Assumes /path/to/Slidefile/somekindofSlide36.something
"""
return int(filename[filename.rfind("Slide") + 5:filename.rfind(".")])
|
dcfbc322b30a39041ab15b8496f097a5a5329865
| 3,647,041
|
import io
def massivescan(websites):
"""scan multiple websites / urls"""
# scan each website one by one
vulnerables = []
for website in websites:
io.stdout("scanning {}".format(website))
if scanner.scan(website):
io.stdout("SQL injection vulnerability found")
vulnerables.append(website)
if vulnerables:
return vulnerables
io.stdout("no vulnerable websites found")
return False
|
b2be56bf07d00c8839813d66acd337c75b9823ef
| 3,647,042
|
import re
def is_strong_pass(password):
"""
Verify the strength of 'password'
Returns a dict indicating the wrong criteria
A password is considered strong if:
8 characters length or more
1 digit or more
1 symbol or more
1 uppercase letter or more
1 lowercase letter or more
"""
# calculating the length
length_error = len(password) < 8
# searching for digits
digit_error = re.search(r"\d", password) is None
# searching for uppercase
uppercase_error = re.search(r"[A-Z]", password) is None
# searching for lowercase
lowercase_error = re.search(r"[a-z]", password) is None
# searching for symbols
symbol_error = re.search(r"[ !#$@%&'()*+,-./[\\\]^_`{|}~" + r'"]', password) is None
# overall result
password_ok = not (length_error or digit_error or uppercase_error or lowercase_error or symbol_error)
return password_ok
|
bfd1832951ba3059d8c542fa0b9d708a2416a4d4
| 3,647,043
|
def plot_config(config, settings=None):
"""
plot_config: obj -> obj
---------------------------------------------------------------
Sets the defaults for a custom experiment plot configuration object
from configobj.
The defaults are only set if the setting does not exist (thus,
it is implied that the user needs a default).
Required Parameters
-------------------
* config: obj
The configobj instance object to scan if defaults
have been customized.
Optional Parameters
-------------------
* settings: None
The global settings to use if it exists, otherwise
use the defaults.
Returns
-------
* config: obj
The configobj instance after defaults have been set
if applicable.
---------------------------------------------------------------
"""
config = _global_config(config, settings)
config['plot_style'] = 'whitegrid' if 'plot_style' not in config else config['plot_style']
config['plot_color'] = 'gray' if 'plot_color' not in config else config['plot_color']
config['plot_dpi'] = 300 if 'plot_dpi' not in config else config['plot_dpi']
config['plot_ext'] = '.png' if 'plot_ext' not in config else config['plot_ext']
return config
|
3b17e97c68bcec31856cb0dc4d7f3db4280a748f
| 3,647,044
|
import os
def load_spectrogram(spectrogram_path):
"""Load a cante100 dataset spectrogram file.
Args:
spectrogram_path (str): path to audio file
Returns:
np.array: spectrogram
"""
if not os.path.exists(spectrogram_path):
raise IOError("spectrogram_path {} does not exist".format(spectrogram_path))
parsed_spectrogram = np.genfromtxt(spectrogram_path, delimiter=' ')
spectrogram = parsed_spectrogram.astype(np.float)
return spectrogram
|
34c0db598558886ee48518a464dc90242b82d2f8
| 3,647,045
|
def evaluate_fN(model, NHI):
""" Evaluate an f(N,X) model at a set of NHI values
Parameters
----------
NHI : array
log NHI values
Returns
-------
log_fN : array
f(NHI,X) values
"""
# Evaluate without z dependence
log_fNX = model.__call__(NHI)
return log_fNX
|
e952a29fdf5864b26dc534140b2ccfb0b59fe24b
| 3,647,046
|
def generate_volume_data(img_data):
"""
Generate volume data from img_data.
:param img_data: A NIfTI.get_data object, img_data[:][x][y][z] is the tensor matrix information of voxel (x,y,z)img_data:
:return: vtkImageData object which stores volume render object.
"""
dims = [148, 190, 160] # size of input data. Temporarily only support test file.
#TODO: Modify the code to handle more files.
image = vtk.vtkImageData()
image.SetDimensions(dims[0] - 2 , dims[1] - 2 , dims[2] - 2 )
image.SetSpacing(1, 1, 1) # set spacing
image.SetOrigin(0, 0, 0)
image.SetExtent(0, dims[0] - 1, 0, dims[1] - 1, 0, dims[2] - 1)
image.AllocateScalars(vtk.VTK_UNSIGNED_SHORT, 1)
for z in range(0, dims[2]-1):
for y in range(0, dims[1]-1 ):
for x in range(0, dims[0]-1 ):
scalardata = img_data[0][x][y][z] # set confidence as each voxel's scalardata
image.SetScalarComponentFromFloat(x, y, z, 0, scalardata)
return image
|
b726d1484944a3f827cae836ca30cf8c7e81d493
| 3,647,047
|
def pipe(bill_texts_df):
"""
soup = bs(text, 'html.parser')
raw_text = extractRawText(soup)
clean_text = cleanRawText(raw_text)
metadata = extract_metadata(soup)
"""
bill_texts_df['soup'] = \
bill_texts_df['html'].apply(lambda x: bs(x, 'html.parser'))
bill_texts_df['content'] = \
bill_texts_df['soup'].apply(lambda x: extractRawText(x.body))
bill_texts_df['long_title'] = \
bill_texts_df['soup'].apply(lambda x: extractLongTitle(x.body))
bill_texts_df['table_info'] = \
bill_texts_df['soup'].apply(lambda x: extractTableContent(x.body))
return None
|
73a8a850fa15f8ad33f9f823f9b2b4d6f808826b
| 3,647,048
|
def _as_static(data, fs):
"""Get data into the Pyglet audio format."""
fs = int(fs)
if data.ndim not in (1, 2):
raise ValueError('Data must have one or two dimensions')
n_ch = data.shape[0] if data.ndim == 2 else 1
audio_format = AudioFormat(channels=n_ch, sample_size=16,
sample_rate=fs)
data = data.T.ravel('C')
data[data < -1] = -1
data[data > 1] = 1
data = (data * (2 ** 15)).astype('int16').tostring()
return StaticMemorySourceFixed(data, audio_format)
|
b76d4c49107f8b9679e975bd2ce114314289d181
| 3,647,049
|
def preprocess_data(cubes, time_slice: dict = None):
"""Regrid the data to the first cube and optional time-slicing."""
# Increase TEST_REVISION anytime you make changes to this function.
if time_slice:
cubes = [extract_time(cube, **time_slice) for cube in cubes]
first_cube = cubes[0]
# regrid to first cube
regrid_kwargs = {
'grid': first_cube,
'scheme': iris.analysis.Nearest(),
}
cubes = [cube.regrid(**regrid_kwargs) for cube in cubes]
return cubes
|
82e851bda39a4ab7716c7b9cd6038743961d9faf
| 3,647,050
|
import base64
def password_to_str(password):
"""
加密
:param password:
:return:
"""
def add_to_16(password):
while len(password) % 16 != 0:
password += '\0'
return str.encode(password) # 返回bytes
key = 'saierwangluo' # 密钥
aes = AES.new(add_to_16(key), AES.MODE_ECB) # 初始化aes加密器
des3 = DES3.new(add_to_16(key), DES3.MODE_ECB) # 初始化3des加密器
# aes加密
encrypted_text = str(
base64.encodebytes(
aes.encrypt(add_to_16(password))), encoding='utf8'
).replace('\n', '')
des_encrypted_text = str(
base64.encodebytes(des3.encrypt(add_to_16(encrypted_text))), encoding='utf8'
).replace('\n', '') # 3des加密
# 返回加密后数据
return des_encrypted_text
|
60a6d361d6de3c41d2a27cd24312006920ad1013
| 3,647,051
|
from src.Emails.checker.mailru import checker
import re
import requests
def email_checker_mailru(request: Request, email: str):
"""
This API check email from mail.ru<br>
<pre>
:return: JSON<br>
</pre>
Example:<br>
<br>
<code>
https://server1.majhcc.xyz/api/email/checker/mailru?email=oman4omani@mail.ru
"""
# regex mail.ru
if re.match(r'^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]@mail\.ru', email):
try:
result = checker(email)
if result:
return {
'status': 'success',
'available': True
}
elif not result:
return {
'status': 'success',
'available': False
}
elif result == None:
return {
'status': 'error please try again or contact us ==> instagram: @majhcc'
}
else:
return {
'status': 'error please try again or contact us ==> instagram: @majhcc'
}
except Exception as e:
data = {
'content': f'Check email from mail.ru api Error: ***{str(e)}***'
}
requests.post(WEBHOOKURL, data=data)
return {
'status': 'error please try again or contact us ==> instagram: @majhcc'}
else:
return {
'status': 'error',
'result': 'Invalid email'
}
|
2835439d3c7781efa0c244c881f42a404a8d3cad
| 3,647,052
|
from typing import Callable
def guild_only() -> Callable:
"""A :func:`.check` that indicates this command must only be used in a
guild context only. Basically, no private messages are allowed when
using the command.
This check raises a special exception, :exc:`.NoPrivateMessage`
that is inherited from :exc:`.CheckFailure`.
"""
def predicate(ctx: InteractionContext) -> bool:
if ctx.guild is None:
raise NoPrivateMessage()
return True
return check(predicate)
|
40307b2a8672180b2a3532380f11b2701bcf0dd8
| 3,647,053
|
from typing import Union
from typing import List
from typing import Callable
from typing import Any
from typing import Sequence
def make_lvis_metrics(
save_folder=None,
filename_prefix="model_output",
iou_types: Union[str, List[str]] = "bbox",
summarize_to_stdout: bool = True,
evaluator_factory: Callable[
[Any, List[str]], DetectionEvaluator
] = LvisEvaluator,
gt_api_def: Sequence[
SupportedDatasetApiDef
] = DEFAULT_SUPPROTED_DETECTION_DATASETS,
):
"""
Returns an instance of :class:`DetectionMetrics` initialized for the LVIS
dataset.
:param save_folder: path to the folder where to write model output
files. Defaults to None, which means that the model output of
test instances will not be stored.
:param filename_prefix: prefix common to all model outputs files.
Ignored if `save_folder` is None. Defaults to "model_output"
:param iou_types: list of (or a single string) strings describing
the iou types to use when computing metrics.
Defaults to "bbox". Valid values are "bbox" and "segm".
:param summarize_to_stdout: if True, a summary of evaluation metrics
will be printed to stdout (as a table) using the Lvis API.
Defaults to True.
:param evaluator_factory: Defaults to :class:`LvisEvaluator` constructor.
:param gt_api_def: Defaults to the list of supported datasets (LVIS is
supported in Avalanche through class:`LvisDataset`).
:return: A metric plugin that can compute metrics on the LVIS dataset.
"""
return DetectionMetrics(
evaluator_factory=evaluator_factory,
gt_api_def=gt_api_def,
save_folder=save_folder,
filename_prefix=filename_prefix,
iou_types=iou_types,
summarize_to_stdout=summarize_to_stdout,
)
|
cbb3df8d8e9daa7976a7be7d6c0588e943aecd5e
| 3,647,054
|
def _calculate_cos_loop(graph, threebody_cutoff=4.0):
"""
Calculate the cosine theta of triplets using loops
Args:
graph: List
Returns: a list of cosine theta values
"""
pair_vector = get_pair_vector_from_graph(graph)
_, _, n_sites = tf.unique_with_counts(graph[Index.BOND_ATOM_INDICES][:, 0])
start_index = 0
cos = []
for n_site in n_sites:
for i in range(n_site):
for j in range(n_site):
if i == j:
continue
vi = pair_vector[i + start_index].numpy()
vj = pair_vector[j + start_index].numpy()
di = np.linalg.norm(vi)
dj = np.linalg.norm(vj)
if (di <= threebody_cutoff) and (dj <= threebody_cutoff):
cos.append(vi.dot(vj) / np.linalg.norm(vi) / np.linalg.norm(vj))
start_index += n_site
return cos
|
3a3283a67c743b2bb7f7a9627e6847dcfc286276
| 3,647,055
|
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginCopyMethod_Firefox()
|
26df11d662b3d4f98a294df9c61841c1ab76e8fc
| 3,647,056
|
import logging
def temp_url_page(rid):
"""
Temporary page where receipts are stored. The user, which visits it first, get the receipt.
:param rid: (str) receipt id (user is assigned to receipt with this id)
"""
if not user_handler.assign_rid_user(rid, flask.session['username']):
logging.warn('Trying to steal receipt! {ip} has visited page: {url}! Cancelling request!'.
format(ip=flask.request.remote_addr, url=flask.request.url))
flask.abort(400)
return
return flask.redirect(flask.url_for('dashboard_page'))
|
e5ebfe4602e427b4d96cdf1c0298057a5b472052
| 3,647,057
|
def extract_dependencies(content):
"""
Extract the dependencies from the CMake code.
The `find_package()` and `pkg_check_modules` calls must be on a single line
and the first argument must be a literal string for this function to be
able to extract the dependency name.
:param str content: The CMake source code
:returns: The dependencies name
:rtype: list
"""
return \
extract_find_package_calls(content) | \
_extract_pkg_config_calls(content)
|
d9f114695cb3622f4a8dbc23db3a97ed53b164ad
| 3,647,058
|
def _block(x, out_channels, name, conv=conv2d, kernel=(3, 3), strides=(2, 2), dilations=(1, 1), update_collection=None,
act=tf.nn.leaky_relu, pooling='avg', padding='SAME', batch_norm=False):
"""Builds the residual blocks used in the discriminator in GAN.
Args:
x: The 4D input vector.
out_channels: Number of features in the output layer.
name: The variable scope name for the block.
conv: Convolution function. Options conv2d or snconv2d
kernel: The height and width of the convolution kernel filter (Default value = (3, 3))
strides: Rate of convolution strides (Default value = (2, 2))
dilations: Rate of convolution dilation (Default value = (1, 1))
update_collection: The update collections used in the in the spectral_normed_weight. (Default value = None)
downsample: If True, downsample the spatial size the input tensor .
If False, the spatial size of the input tensor is unchanged. (Default value = True)
act: The activation function used in the block. (Default value = tf.nn.relu)
pooling: Strategy of pooling. Default: average pooling. Otherwise, no pooling, just using strides
padding: Padding type (Default value = 'SAME')
batch_norm: A flag that determines if batch norm should be used (Default value = False)
Returns:
A tensor representing the output of the operation.
"""
with tf.variable_scope(name):
if batch_norm:
bn0 = BatchNorm(name='bn_0')
bn1 = BatchNorm(name='bn_1')
input_channels = x.shape.as_list()[-1]
x_0 = x
x = conv(x, out_channels, kernel, dilations=dilations, name='conv1', padding=padding)
if batch_norm:
x = bn0(x)
x = act(x, name="before_downsampling")
x = down_sampling(x, conv, pooling, out_channels, kernel, strides, update_collection, 'conv2', padding)
if batch_norm:
x = bn1(x)
if strides[0] > 1 or strides[1] > 1 or input_channels != out_channels:
x_0 = down_sampling(x_0, conv, pooling, out_channels, kernel, strides, update_collection, 'conv3',
padding)
out = x_0 + x # No RELU: http://torch.ch/blog/2016/02/04/resnets.html
return out
|
21851730e1326b85023d88661da13020c37aa723
| 3,647,059
|
def createLaplaceGaussianKernel(sigma, size):
"""构建高斯拉普拉斯卷积核
Args:
sigma ([float]): 高斯函数的标准差
size ([tuple]): 高斯核的大小,奇数
Returns:
[ndarray]: 高斯拉普拉斯卷积核
"""
H, W = size
r, c = np.mgrid[0:H:1, 0:W:1]
r = r - (H - 1) / 2
c = c - (W - 1) / 2
sigma2 = pow(sigma, 2.0)
norm2 = np.power(r, 2.0) + np.power(c, 2.0)
LoGKernel = (norm2 / sigma2 - 2)*np.exp(-norm2 / (2 * sigma2))
return LoGKernel
|
aae788ba324a243691391a61b02e6a5f1b358c4e
| 3,647,060
|
import os
def is_file_type(fpath, filename, ext_list):
"""Returns true if file is valid, not hidden, and has extension of given type"""
file_parts = filename.split('.')
# invalid file
if not os.path.isfile(os.path.join(fpath, filename)):
return False
# hidden file
elif filename.startswith('.'):
return False
# no extension
elif len(file_parts) < 2:
return False
# check file type
extension = file_parts[-1].lower()
if extension in ext_list:
return True
else:
return False
|
52213f023313e4edb5628fcebf47cb94bc2cfcbe
| 3,647,061
|
def xp_rirgen2(room, source_loc, mic_loc, c=340, fs=16000, t60=0.5,
beta=None, nsamples=None, htw=None, hpfilt=True, method=1):
"""Generates room impulse responses corresponding to each source-microphone pair placed in a room.
Args:
room (numpy/cupy array) = room dimensions in meters, shape: (3, 1)
source_loc (numpy/cupy array) = source locations in meters, shape: (3, nsrc)
mic_loc (numpy/cupy array) = microphone locations in meters, shape: (3, nmic)
kwargs:
c (float) = speed of sound in meters/second (default: 340)
fs (float) = sampling rate in Hz (default: 16000)
t60 (float) = t60 or rt60 in seconds or None to use beta parameters (default: 0.5)
beta (numpy/cupy array) = beta parameters of reflections for each side, shape (6,1) (default: None)
nsamples (int) = number of output samples (default: auto from t60)
htw (int) = half size in samples of the time window used for sinc function interpolation (default automatic)
hpfilt (bool) = use post-generation highpass filter or not (default True)
method (int) = 1 or 2, 2 is not tested thoroughly and is very slow, so use 1 always (default 1)
Returns:
room impulse responses in time-domain of shape (nsrc, nmic, nsamples)
Notes:
1. If input arrays are cupy arrays (on GPU), the code runs with cupy, otherwise with numpy
2. if you do not want to install cupy or not interested in GPU processing,
remove line "import cupy" and replace "xp=cupy.get..." with "xp=np"
.. seealso:: :func:`pyrirgen.RirGenerator`
.. seealso:: :url:https://github.com/ehabets/RIR-Generator/blob/master/rir_generator.cpp
>>> ### DOCTEST ###
>>> room = np.array([4,7,3]).reshape(3,1)
>>> source_loc = np.random.uniform(0,1,(3,2)) * room
>>> mic_loc = np.random.uniform(0,1,(3,4)) * room
>>> t60=0.3
>>> rirs_np = xp_rirgen(room, source_loc, mic_loc, t60=t60)
>>> #import matplotlib.pyplot as plt
>>> #plt.plot(rirs_np[0,0,:] , label='rir for src1 and mic1')
>>> croom = cupy.array(room)
>>> csource_loc = cupy.array(source_loc)
>>> cmic_loc = cupy.array(mic_loc)
>>> rirs_cp = xp_rirgen(croom, csource_loc, cmic_loc, t60=t60)
>>> cupy.testing.assert_allclose(rirs_np, cupy.asnumpy(rirs_cp), atol=1e-5, rtol=1e-5)
>>> beta = np.random.uniform(0.1, 0.9, size=6)
>>> rirs_np = xp_rirgen(room, source_loc, mic_loc, beta=beta, t60=None)
>>> cbeta = cupy.array(beta)
>>> rirs_cp = xp_rirgen(croom, csource_loc, cmic_loc, beta=cbeta, t60=None)
>>> cupy.testing.assert_allclose(rirs_np, cupy.asnumpy(rirs_cp), atol=1e-5, rtol=1e-5)
"""
# xp = cupy.get_array_module(room, source_loc, mic_loc, beta)
xp
if beta is None and t60 is None:
raise Exception('Either t60 or beta array must be provided')
elif beta is None:
V = xp.prod(room)
S = 2 * (room[0] * room[2] + room[1] * room[2] + room[0] * room[1])
alpha = 24 * V * xp.log(10) / (c * S * t60)
if alpha < 1:
beta = xp.ones(6, ) * xp.sqrt(1 - alpha)
else:
raise Exception('t60 value {} too small for the room'.format(t60))
else:
if xp.max(beta) >= 1.0 or xp.min(beta) <= 0.0:
raise Exception('beta array values should be in the interval (0,1).')
if t60 is not None:
print('Overwriting provided t60 value using provided beta array')
alpha = 1 - beta**2
V = xp.prod(room)
Se = 2 * (room[1] * room[2] * (alpha[0] + alpha[1]) + room[0] * room[2] * (alpha[2] + alpha[3]) + room[0] * room[1] * (alpha[4] + alpha[5]))
t60 = 24 * xp.log(10.0) * V / (c * Se);
if htw is None:
htw = np.minimum(32, int(xp.min(room) / 10 / c * fs))
tw_idx = xp.arange(0, 2 * htw).reshape(2 * htw, 1)
try:
assert(xp.all(room.T - mic_loc.T > 0) and xp.all(room.T - source_loc.T > 0))
assert(xp.all(mic_loc.T > 0) and xp.all(source_loc.T > 0))
except:
raise Exception('Room dimensions and source and mic locations are not compatible.')
cTs = c / fs
# convert distances in meters to time-delays in samples
room = room / cTs
mic_loc = mic_loc / cTs
src_loc = source_loc / cTs
nmic = mic_loc.shape[-1]
nsrc = source_loc.shape[-1]
if nsamples is None:
nsamples = int(fs * t60)
def get_reflection_candidates():
nxrefl = int(nsamples / (room[0]))
nyrefl = int(nsamples / (room[1]))
nzrefl = int(nsamples / (room[2]))
xro = xp.arange(-nxrefl, nxrefl + 1)
yro = xp.arange(-nyrefl, nyrefl + 1)
zro = xp.arange(-nzrefl, nzrefl + 1)
xr = xro.reshape(2 * nxrefl + 1, 1, 1)
yr = yro.reshape(1, 2 * nyrefl + 1, 1)
zr = zro.reshape(1, 1, 2 * nzrefl + 1)
RoughDelays = xp.sqrt((2 * xr * room[0]) ** 2 + (2 * yr * room[1]) ** 2 + (2 * zr * room[2]) ** 2)
RoughGains = (beta[0] * beta[1]) ** xp.abs(xr) * (beta[2] * beta[3]) ** xp.abs(yr) * (beta[4] * beta[5]) ** xp.abs(zr) / (
RoughDelays + 0.5 / c * fs) # assume src-mic distance at least .5 metres
maxgain = xp.max(RoughGains)
vreflidx = xp.vstack(xp.nonzero(xp.logical_and(RoughDelays < nsamples, RoughGains > maxgain / 1.0e4)))
nrefl = vreflidx.shape[-1]
reflidx = xp.arange(nrefl).reshape(1, 1, nrefl, 1, 1, 1)
xrefl = xro[vreflidx[..., reflidx][0]]
yrefl = yro[vreflidx[..., reflidx][1]]
zrefl = zro[vreflidx[..., reflidx][2]]
return xrefl, yrefl, zrefl
xrefl, yrefl, zrefl = get_reflection_candidates()
def get_delays_and_gains():
xside = xp.arange(0, 2).reshape(1, 1, 1, 2, 1, 1)
yside = xp.arange(0, 2).reshape(1, 1, 1, 1, 2, 1)
zside = xp.arange(0, 2).reshape(1, 1, 1, 1, 1, 2)
imic = xp.arange(nmic).reshape(1, nmic, 1, 1, 1, 1)
isrc = xp.arange(nsrc).reshape(nsrc, 1, 1, 1, 1, 1)
Delays = xp.sqrt((2 * xrefl * room[0] - mic_loc[0, imic] + (1 - 2 * xside) * src_loc[0, isrc]) ** 2 + (2 * yrefl * room[1] - mic_loc[1, imic] + (1 - 2 * yside) * src_loc[1, isrc]) ** 2 + (2 * zrefl * room[2] - mic_loc[2, imic] + (1 - 2 * zside) * src_loc[2, isrc]) ** 2)
Refl_x = beta[0] ** (xp.abs(xrefl - xside)) * beta[1] ** (xp.abs(xrefl))
Refl_y = beta[2] ** (xp.abs(yrefl - yside)) * beta[3] ** (xp.abs(yrefl))
Refl_z = beta[4] ** (xp.abs(zrefl - zside)) * beta[5] ** (xp.abs(zrefl))
Gains = Refl_x * Refl_y * Refl_z / (4 * np.pi * Delays * cTs)
# Gains[Delays > nsamples] = 0.0
return Delays, Gains
Delays, Gains = get_delays_and_gains()
rirs = xp.zeros((nsrc, nmic, nsamples), dtype=np.float32)
for src in xp.arange(nsrc):
for mic in xp.arange(nmic):
dnow = Delays[src, mic, ...].flatten()
gnow = Gains[src, mic, ...].flatten()
if method == 1:
gnow = gnow[dnow < nsamples - htw - 2]
dnow = dnow[dnow < nsamples - htw - 2]
dnow_floor = xp.floor(dnow)
dnow_dist = dnow - dnow_floor
dnow_floor = dnow_floor.reshape(1, dnow.shape[0])
dnow_dist = dnow_dist.reshape(1, dnow.shape[0])
gnow = gnow.reshape(1, dnow.shape[0])
dnow_ext = dnow_floor + tw_idx - htw + 1
garg = np.pi * (-dnow_dist + 1 + tw_idx - htw)
gnow_ext = gnow * 0.5 * (1.0 - xp.cos(np.pi + garg / htw)) * xp.where(garg == 0.0, 1.0, xp.sin(garg) / garg)
dnow = dnow_ext.flatten().astype(np.uint32)
gnow = gnow_ext.flatten().astype(np.float32)
rirnow = xp.zeros((nsamples,), dtype=np.float32)
if xp == np:
np.add.at(rirnow, dnow, gnow)
else:
xp.scatter_add(rirnow, dnow, gnow)
rirs[src, mic, ...] = rirnow
elif method == 2: ## this is too slow and may not be accurate as well
gnow = gnow[dnow < nsamples]
dnow = dnow[dnow < nsamples]
frange = xp.arange(0, 0.5 + 0.5 / nsamples, 1.0 / nsamples)
rirfft = xp.zeros(frange.shape, dtype=np.complex128)
for i in range(len(frange)):
rirfft[i] = xp.sum(gnow * xp.exp(-1j * 2 * np.pi * frange[i] * dnow))
rirs[src, mic, :] = xp.real(xp.fft.irfft(rirfft)).astype(dtype=np.float32)
if hpfilt:
rirs[:, :, 1:-1] += -0.5 * rirs[:, :, 2:] -0.5 * rirs[:, : , :-2]
return rirs
|
2528cf725df14febb20c5634fbe9acbeadfd5a46
| 3,647,062
|
import warnings
def mean_bias_removal(hindcast, alignment, cross_validate=True, **metric_kwargs):
"""Calc and remove bias from py:class:`~climpred.classes.HindcastEnsemble`.
Args:
hindcast (HindcastEnsemble): hindcast.
alignment (str): which inits or verification times should be aligned?
- maximize/None: maximize the degrees of freedom by slicing ``hind`` and
``verif`` to a common time frame at each lead.
- same_inits: slice to a common init frame prior to computing
metric. This philosophy follows the thought that each lead should be
based on the same set of initializations.
- same_verif: slice to a common/consistent verification time frame prior
to computing metric. This philosophy follows the thought that each lead
should be based on the same set of verification dates.
cross_validate (bool): Use properly defined mean bias removal function. This
excludes the given initialization from the bias calculation. With False,
include the given initialization in the calculation, which is much faster
but yields similar skill with a large N of initializations.
Defaults to True.
Returns:
HindcastEnsemble: bias removed hindcast.
"""
if hindcast.get_initialized().lead.attrs["units"] != "years":
warnings.warn(
"HindcastEnsemble.remove_bias() is still experimental and is only tested "
"for annual leads. Please consider contributing to "
"https://github.com/pangeo-data/climpred/issues/605"
)
def bias_func(a, b, **kwargs):
return a - b
bias_metric = Metric("bias", bias_func, True, False, 1)
# calculate bias lead-time dependent
bias = hindcast.verify(
metric=bias_metric,
comparison="e2o",
dim=[], # not used by bias func, therefore best to add [] here
alignment=alignment,
**metric_kwargs,
).squeeze()
# how to remove bias
if cross_validate: # more correct
mean_bias_func = _mean_bias_removal_cross_validate
else: # faster
mean_bias_func = _mean_bias_removal_quick
bias_removed_hind = mean_bias_func(hindcast._datasets["initialized"], bias, "init")
bias_removed_hind = bias_removed_hind.squeeze()
# remove groupby label from coords
for c in ["dayofyear", "skill", "week", "month"]:
if c in bias_removed_hind.coords and c not in bias_removed_hind.dims:
del bias_removed_hind.coords[c]
# replace raw with bias reducted initialized dataset
hindcast_bias_removed = hindcast.copy()
hindcast_bias_removed._datasets["initialized"] = bias_removed_hind
return hindcast_bias_removed
|
01155462155d9f718fa2a12053297903d47b6661
| 3,647,063
|
import requests
def request_sudoku_valid(sudoku: str) -> bool:
"""valid request"""
is_valid = False
provider_request = requests.get(f"{base_url}/valid/{sudoku}")
if provider_request.status_code == 200:
request_data = provider_request.json()
is_valid = request_data["result"]
# TODO: else raise exception
return is_valid
|
274a6f3e617273d1a1d81777788865337d4d36ae
| 3,647,064
|
def index():
"""
vista principal
"""
return "<i>API RestFull PARCES Version 0.1</i>"
|
8b8b963f75395df665bcf0283528c9641b3ea20e
| 3,647,065
|
def tag(dicts, key, value):
"""Adds the key value to each dict in the sequence"""
for d in dicts:
d[key] = value
return dicts
|
ffcfda13845fb8b522e50211184104a11da50398
| 3,647,066
|
def openpairshelf(filename, flag='c', protocol=None, writeback=False):
"""Returns a ProteinPairDB object, with similar functionality to shelve.open()"""
return ProteinPairDB(filename, flag, protocol, writeback)
|
886a474aa67f729461995fe5427d5f68b9db9fe0
| 3,647,067
|
def createUser(emailid, password, contact_no, firstname, lastname, category, address, description, company_url, image_url, con=None, cur=None, db=None):
"""
Tries to create a new user with the given data.
Returns:
- dict: dict object containing all user data, if query was successfull
- False: If query was unsuccessful
"""
sql = """Insert into users(
emailid,
password,
firstname,
lastname,
contact_no,
category,
address,
description,
company_url,
image_url
) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
db(sql, (emailid,
password,
firstname,
lastname,
contact_no,
category,
address,
description,
company_url,
image_url))
con.commit()
# close database connection
user = getUserUsingEmail(emailid)
return user or False
|
05dc71db991e126d43fd9ddd044f1cf65f3e97c1
| 3,647,068
|
import pathlib
import subprocess
import json
import re
def sensor_pull_storage(appname, accesskey, timestring, *,data_folder = None, ttn_version=3):
"""
Pull data from TTN via the TTN storage API.
appname is the name of the TTN app
accesskey is the full accesskey from ttn. For TTN V3, this is is the
secret that is output when a key is created. For TTN V2, this is
the string from the console, starting with 'ttn-acount-v2.'
timestring indicates amount of data needed, e.g. '100h'.
ttn_version should be 2 or 3; 3 is default.
If data_folder is supplied, it is a string or a Path; it is taken as a directory,
and the name "sensors_lastperiod.json" is appended to form an output file name, and
the data is written to the resulting file, replacing any previous contents.
Otherwise, the data is returned as a Python array (for V3) or a string (for V2).
We've not really tested V2 extensively.
"""
args = [ "curl" ]
if ttn_version == 2:
args += [
"-X", "GET",
"--header", "Accept: application/json",
"--header", f"Authorization: key {accesskey}",
f"https://{appname}.data.thethingsnetwork.org/api/v2/query?last={timestring}"
]
elif ttn_version == 3:
args += [
"-G", f"https://nam1.cloud.thethings.network/api/v3/as/applications/{appname}/packages/storage/uplink_message",
"--header", f"Authorization: Bearer {accesskey}",
"--header", "Accept: text/event-stream",
"-d", f"last={timestring}",
"-d", "field_mask=up.uplink_message.decoded_payload",
]
else:
raise FetchError(f"Illegal ttn_version (not 2 or 3)")
# if the user supplied a data_folder, than tack on the args.
# list1 += list2 syntax means "append each element of list2 to list 1"
# pathlib.Path allows
if data_folder != None:
args += [ "-o", pathlib.Path(data_folder, "sensors_lastperiod.json") ]
result = subprocess.run(
args, shell=False, check=True, capture_output=True
)
sresult = result.stdout
if ttn_version == 3:
return list(map(json.loads, re.sub(r'\n+', '\n', sresult.decode()).splitlines()))
else:
return sresult
|
704a039d23443d4ec45968596ec948237e9a2c29
| 3,647,069
|
async def discordView(cls:"PhaazebotWeb", WebRequest:ExtendedRequest) -> Response:
"""
Default url: /discord/view/{guild_id:\d+}
"""
PhaazeDiscord:"PhaazebotDiscord" = cls.BASE.Discord
if not PhaazeDiscord:
return await cls.Tree.errors.notAllowed(cls, WebRequest, msg="Discord module is not active")
guild_id:str = WebRequest.match_info.get("guild_id", "")
Guild:discord.Guild = discord.utils.get(PhaazeDiscord.guilds, id=int(guild_id))
if not Guild:
return await cls.Tree.Discord.discordinvite.discordInvite(WebRequest, msg=f"Phaaze is not on this Server", guild_id=guild_id)
ViewPage:HTMLFormatter = HTMLFormatter("Platforms/Web/Content/Html/Discord/view.html")
ViewPage.replace(
guild_id=Guild.id,
guild_icon_url=Guild.icon_url,
guild_name=Guild.name
)
site:str = cls.HTMLRoot.replace(
replace_empty=True,
title="Phaaze | Discord - View",
header=getNavbar(active="discord"),
main=ViewPage
)
return cls.response(
body=site,
status=200,
content_type='text/html'
)
|
76f222bdd5164c23c95803d47fc1af48d89192e2
| 3,647,070
|
def update_max_braking_decel(vehicle, mbd):
"""
Updates the max braking decel of the vehicle
:param vehicle: vehicle
:param mbd: new max braking decel
:type vehicle: VehicleProfile
:return: Updated vehicle
"""
return vehicle.update_max_braking_decel(mbd)
|
dea3bf14ca14363246539fd81cf853cd2c0ad980
| 3,647,071
|
from scipy.spatial.distance import pdist, squareform
def get_outlier_removal_mask(xcoords, ycoords, nth_neighbor=10, quantile=.9):
"""
Parameters
----------
xcoords :
ycoords :
nth_neighbor :
(Default value = 10)
quantile :
(Default value = .9)
Returns
-------
"""
D = squareform(pdist(np.vstack((xcoords, ycoords)).T))
distances = D[np.argsort(D, axis=0)[nth_neighbor - 1, :], 0]
return distances <= np.quantile(distances, quantile)
|
8d01088401405613696ced2dbbd9c03940417f10
| 3,647,072
|
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
|
b133557d88deac2d9357731d820de0522521d6f3
| 3,647,073
|
def strategy(history, alivePlayers, whoami, memory):
"""
history contains all previous rounds (key : id of player (shooter), value : id of player (target))
alivePlayers is a list of all player ids
whoami is your own id (to not kill yourself by mistake)
memory is None by default and transferred over (if you set it to 1, it will be 1 in the next round)
memory is NOT shared between games (subject to changes)
"""
# Your code would be here but this strategy is dumb...
"""
You must return an id of a player (if not : you shoot in the air)
Memory must be set to something but can be anything (None included )
"""
return alivePlayers[0], None
|
f211a0961269808d9a7b0a08758273d4a03b9136
| 3,647,074
|
def parse_fn(serialized_example: bytes) -> FeaturesType:
"""Parses and converts Tensors for this module's Features.
This casts the audio_raw_pcm16 feature to float32 and scales it into the range
[-1.0, 1.0].
Args:
serialized_example: A serialized tf.train.ExampleProto with the features
dict keys declared in the :py:class:Features enum.
Returns:
Tensor-valued dict of features. The keys are those declared in the
:py:class:Features enum.
"""
features = tf.io.parse_single_example(
serialized_example, {f.value.name: f.value.spec for f in Features})
audio_key: str = Features.AUDIO.value.name
features[audio_key] = tf.cast(tf.io.decode_raw(features[audio_key], tf.int16),
tf.float32) / np.iinfo(np.int16).max
return features
|
54e841987986027dc6d4d989fe6442ceecd022b8
| 3,647,075
|
import click
def cli(ctx: click.Context) -> int:
"""
Method used to declare root CLI command through decorators.
"""
return 0
|
be5016c5c38f435b8a213a6ce39b5571aee809f1
| 3,647,076
|
def parse_clock(line):
"""Parse clock information"""
search = parse(REGEX_CLOCK, line)
if search:
return int(search.group('clock'))
else:
return None
|
a4464c979d31bab463f949bec83da99e72af6ca6
| 3,647,077
|
import requests
def block_latest(self, **kwargs):
"""
Return the latest block available to the backends, also known as the tip of the blockchain.
https://docs.blockfrost.io/#tag/Cardano-Blocks/paths/~1blocks~1latest/get
:param return_type: Optional. "object", "json" or "pandas". Default: "object".
:type return_type: str
:returns BlockResponse object.
:rtype BlockResponse
:raises ApiError: If API fails
:raises Exception: If the API response is somehow malformed.
"""
return requests.get(
url=f"{self.url}/blocks/latest",
headers=self.default_headers
)
|
a14fc3512138c1d15b32b09bd20ea03678964437
| 3,647,078
|
def get_courses():
"""
Route to display all courses
"""
params = format_dict(request.args)
if params:
try:
result = Course.query.filter_by(**params).order_by(Course.active.desc())
except InvalidRequestError:
return { 'message': 'One or more parameter(s) does not exist' }, 400
else:
result = Course.query.order_by(Course.active.desc())
return { "courses": [c.serialize for c in result] }
|
6dcdcb5df4d0010661ffe92f55522638ae51a2b8
| 3,647,079
|
def zero_adam_param_states(state: flax.optim.OptimizerState, selector: str):
"""Applies a gradient for a set of parameters.
Args:
state: a named tuple containing the state of the optimizer
selector: a path string defining which parameters to freeze.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
step = state.step
params = flax.core.unfreeze(state.param_states)
flat_params = {
"/".join(k): v for k, v in traverse_util.flatten_dict(params).items()
}
for k in flat_params:
if k.startswith(selector):
v = flat_params[k]
# pylint: disable=protected-access
flat_params[k] = flax.optim.adam._AdamParamState(
jnp.zeros_like(v.grad_ema), jnp.zeros_like(v.grad_sq_ema)
)
new_param_states = traverse_util.unflatten_dict(
{tuple(k.split("/")): v for k, v in flat_params.items()}
)
new_param_states = dict(flax.core.freeze(new_param_states))
new_state = flax.optim.OptimizerState(step, new_param_states)
return new_state
|
8a7cb65028866e4a7f3a03b589fa1bf5798a25e0
| 3,647,080
|
import scipy
def leftFitNormal(population):
"""
Obtain mode and standard deviation from the left side of a population.
>>> pop = np.random.normal(loc=-20, scale=3, size=15000)
>>> mode, sigma = leftFitNormal(pop)
>>> -22 < mode < -18
True
>>> round(sigma)
3
>>> pop[pop > -18] += 10 # perturb right side
>>> mode, sigma = leftFitNormal(pop)
>>> -22 < mode < -18
True
>>> round(sigma) == 3
True
>>> pop[pop < -22] -= 10 # perturb left side
>>> mode, sigma = leftFitNormal(pop)
>>> -22 < mode < -18
True
>>> round(sigma) == 3
False
"""
# TODO: Can this function be omitted?
# Quick alternative robust fit:
# median = np.nanmedian(population)
# MADstd = np.nanmedian(np.abs(population - median)) * 1.4826
# Could still modify this estimator to ignore samples > median.
# Note, if the distribution is right-skewed or bimodal (e.g. if there is
# some land amongst mostly open water) then other relative frequencies
# will proportionally be depressed, favouring the fit of a broader
# Gaussian (perhaps also shifted slightly rightward) to the left side
# of the histogram (compared to if the distribution was normal).
# Could address this by normalising the interval area.
#
# Currently the tests for perturbed distributions bypass this limitation
# by _conditionally_ replacing existing samples, rather than by mixing
# additional components into the population i.e. avoiding
# pop[:5000] = np.linspace(-15, -5, 5000).
std = np.nanstd(population) # naive initial estimate
Y, X = hist_fixedwidth(population)
# Take left side of distribution
pos = Y.argmax()
mode = X[pos]
X = X[:pos+1]
Y = Y[:pos+1]
# fit gaussian to (left side of) distribution
def gaussian(x, mean, sigma):
return np.exp(-0.5 * ((x - mean)/sigma)**2) / (sigma * (2*np.pi)**0.5)
(mean, std), cov = scipy.optimize.curve_fit(gaussian, X, Y, p0=[mode, std])
return mode, std
|
28fbd93efa893dbb31e81d9875db97370f163716
| 3,647,081
|
from bs4 import BeautifulSoup
def get_stock_market_list(corp_cls: str, include_corp_name=True) -> dict:
""" 상장 회사 dictionary 반환
Parameters
----------
corp_cls: str
Y: stock market(코스피), K: kosdaq market(코스닥), N: konex Market(코넥스)
include_corp_name: bool, optional
if True, returning dictionary includes corp_name(default: True)
Returns
-------
dict of {stock_code: information}
상장 회사 정보 dictionary 반환( 회사 이름, 섹터, 물품)
"""
if corp_cls.upper() == 'E':
raise ValueError('ETC market is not supported')
corp_cls_to_market = {
"Y": "stockMkt",
"K": "kosdaqMkt",
"N": "konexMkt",
}
url = 'http://kind.krx.co.kr/corpgeneral/corpList.do'
referer = 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=loadInitPage'
market_type = corp_cls_to_market[corp_cls.upper()]
payload = {
'method': 'download',
'pageIndex': 1,
'currentPageSize': 5000,
'orderMode': 3,
'orderStat': 'D',
'searchType': 13,
'marketType': market_type,
'fiscalYearEnd': 'all',
'location': 'all',
}
stock_market_list = dict()
resp = request.post(url=url, payload=payload, referer=referer)
html = BeautifulSoup(resp.text, 'html.parser')
rows = html.find_all('tr')
for row in rows:
cols = row.find_all('td')
if len(cols) > 0:
corp_name = cols[0].text.strip()
stock_code = cols[1].text.strip()
sector = cols[2].text.strip()
product = cols[3].text.strip()
corp_info = {'sector': sector, 'product': product, 'corp_cls': corp_cls}
if include_corp_name:
corp_info['corp_name'] = corp_name
stock_market_list[stock_code] = corp_info
return stock_market_list
|
c8e0242e1ddfcc4f32514f131f3a9797694202c1
| 3,647,082
|
def evaluate_template(template: dict) -> dict:
"""
This function resolves the template by parsing the T2WML expressions
and replacing them by the class trees of those expressions
:param template:
:return:
"""
response = dict()
for key, value in template.items():
if key == 'qualifier':
response[key] = []
for i in range(len(template[key])):
temp_dict = dict()
for k, v in template[key][i].items():
if isinstance(v, (ItemExpression, ValueExpression, BooleanEquation)):
col, row, temp_dict[k] = v.evaluate_and_get_cell(bindings)
temp_dict['cell'] = get_actual_cell_index((col, row))
else:
temp_dict[k] = v
if "property" in temp_dict and temp_dict["property"] == "P585":
if "format" in temp_dict:
try:
datetime_string, precision = parse_datetime_string(temp_dict["value"], additional_formats=[temp_dict["format"]])
if "precision" not in temp_dict:
temp_dict["precision"] = int(precision.value.__str__())
else:
temp_dict["precision"] = translate_precision_to_integer(temp_dict["precision"])
temp_dict["value"] = datetime_string
except Exception as e:
raise e
response[key].append(temp_dict)
else:
if isinstance(value, (ItemExpression, ValueExpression, BooleanEquation)):
col, row, response[key] = value.evaluate_and_get_cell(bindings)
if key == "item":
response['cell'] = get_actual_cell_index((col, row))
else:
response[key] = value
return response
|
596516f9dfb81170212020cfb053339ddb49b716
| 3,647,083
|
def get_CommandeProduits(path, prefix='CP_',cleaned=False):
"""
Read CSV (CommandeProduits) into Dataframe. All relevant columns are kept and renamed with prefix.
Args:
path (str): file path to CommandeProduits.csv
prefix (str): All relevant columns are renamed with prefix
Returns:
df (Dataframe): Resulting dataframe
"""
col = {'Id':prefix+'Id',
'Commande_Id':'Commande_Id',
'OffreProduit_Id':'OffreProduit_Id',
'QuantiteTotale':prefix+'QuantiteTotale',
'QuantiteUnite':prefix+'QuantiteUnite',
'QuantiteValeur':prefix+'QuantiteValeur',
'MontantTotal':prefix+'MontantTotal',
'Weight':prefix+'Weight'}
dt = {'Id': 'int64',
'Commande_Id': 'int64',
'OffreProduit_Id':'int64',
'QuantiteTotale':'float64',
'QuantiteUnite':'object',
'QuantiteValeur':'float64',
'MontantTotal':'float64',
'Weight':'float64'}
if not cleaned:
df = pd.read_csv(path, sep='\t', encoding='utf-8', usecols=list(col.keys()), dtype=dt)
df = df.rename(index=str, columns=col)
else:
df = pd.read_csv(path, sep='\t', encoding='utf-8',index_col=0)
return df
|
18c5c7e375abcc57c2cfcbc4f2c58ecec5aecf59
| 3,647,084
|
def hist_equal(image, hist):
"""
Equalize an image based on a histogram.
Parameters
----------
image : af.Array
- A 2 D arrayfire array representing an image, or
- A multi dimensional array representing batch of images.
hist : af.Array
- Containing the histogram of an image.
Returns
---------
output : af.Array
- The equalized image.
"""
output = Array()
safe_call(backend.get().af_hist_equal(c_pointer(output.arr), image.arr, hist.arr))
return output
|
70aeeb1822752c2f7fb5085d761bb9b309d29335
| 3,647,085
|
def get_close_icon(x1, y1, height, width):
"""percentage = 0.1
height = -1
while height < 15 and percentage < 1.0:
height = int((y2 - y1) * percentage)
percentage += 0.1
return (x2 - height), y1, x2, (y1 + height)"""
return x1, y1, x1 + 15, y1 + 15
|
78b65cdeeb4f6b3a526fd5dd41b34f35545f1e9d
| 3,647,086
|
def train_model(network, data, labels, batch_size,
epochs, validation_data=None, verbose=True, shuffle=False):
"""
Train
"""
model = network.fit(
data,
labels,
batch_size=batch_size,
epochs=epochs,
validation_data=validation_data,
shuffle=shuffle,
verbose=verbose)
return model
|
a2b093aef1b607cd34dd30e8c5f126e1efb3d409
| 3,647,087
|
def taoyuan_agrichannel_irrigation_transfer_loss_rate():
"""
Real Name: TaoYuan AgriChannel Irrigation Transfer Loss Rate
Original Eqn: 0
Units: m3/m3
Limits: (None, None)
Type: constant
Subs: None
This is "no loss rate" version.
"""
return 0
|
9fd8a84ae79cbeaf8c8259da815f9322f27b253f
| 3,647,088
|
def lambda_handler(event, context):
"""
Find and replace following words and outputs the result.
Oracle -> Oracle©
Google -> Google©
Microsoft -> Microsoft©
Amazon -> Amazon©
Deloitte -> Deloitte©
Example input: “We really like the new security features of Google Cloud”.
Expected output: “We really like the new security features of Google© Cloud”.
"""
# Return 400 if event is none or strToReplace is blank
if not event or not event['strToReplace']:
return {
'statusCode': 400,
'body': "Input string not provided."
}
# Input String
replacementString = event['strToReplace']
# Dictionary of words with replacement words
wordsToReplaceDict = {'Oracle': 'Oracle©', 'Google': 'Google©', 'Microsoft': 'Microsoft©', 'Amazon': 'Amazon©', 'Deloitte': 'Deloitte©'}
# Iterate over all key-value pairs in dictionary
for key, value in wordsToReplaceDict.items():
# Replace words in string
replacementString = replacementString.replace(key, value)
return {
'statusCode': 200,
'body': replacementString
}
|
66dc2914dd04a2e265ed21542bd462b61344d040
| 3,647,089
|
def update_inv(X, X_inv, i, v):
"""Computes a rank 1 update of the the inverse of a symmetrical matrix.
Given a symmerical matrix X and its inverse X^{-1}, this function computes
the inverse of Y, which is a copy of X, with the i'th row&column replaced
by given vector v.
Parameters
----------
X : ndarray, shape (N, N)
A symmetrical matrix.
X_inv : nparray, shape (N, N)
The inverse of X_inv.
i : int
The index of the row/column to replace.
v : ndarray, shape (N,)
The values to replace the row/column with.
Returns
-------
Y_inv : ndarray, shape (N, N)
The inverse of Y.
"""
U = v[:, np.newaxis] - X[:, [i]]
mask = np.zeros((len(U), 1))
mask[i] = 1
U = np.hstack((U, mask))
V = U[:, [1, 0]].T
V[1, i] = 0
C = np.eye(2)
X_inv_U = X_inv.dot(U)
V_X_inv = V.dot(X_inv)
Y_inv = X_inv - X_inv_U.dot(pinv(C + V_X_inv.dot(U))).dot(V_X_inv)
return Y_inv
|
c811dbf699d8f93fa2fa5b3f68c5b23cf4131e9f
| 3,647,090
|
import csv
def read_barcode_lineno_map(stream):
"""Build a map of barcodes to line number from a stream
This builds a one based dictionary of barcode to line numbers.
"""
barcodes = {}
reader = csv.reader(stream, delimiter="\t")
for i, line in enumerate(reader):
barcodes[line[0]] = i + 1
return barcodes
|
545a0d02dd76e774ba0de86431113ad9f36a098e
| 3,647,091
|
def match_in_candidate_innings(entry, innings, summary_innings, entities):
"""
:param entry:
:param innings: innings to be searched in
:param summary_innings: innings mentioned in the summary segment
:param entities: total entities in the segment
:return:
"""
entities_in_summary_inning = set()
for summary_inning in summary_innings:
intersection = get_matching_entities_in_inning(entry, summary_inning, entities)
entities_in_summary_inning.update(intersection)
entities_not_found = entities.difference(entities_in_summary_inning)
matched_inning = -1
if len(entities_not_found) > 1:
remaining_inings = set(innings).difference(set(summary_innings))
orderered_remaining_innings = [inning for inning in innings if inning in remaining_inings]
matched_inning = get_inning_all_entities_set_intersection(entry, orderered_remaining_innings, entities_not_found)
return matched_inning
|
3551212f79c6ecb298ec6b55aa7b68213b950394
| 3,647,092
|
from typing import Optional
from typing import Union
from typing import Callable
from typing import Any
def checkpoint(
name: Optional[str] = None,
on_error: bool = True,
cond: Union[bool, Callable[..., bool]] = False,
) -> Callable[[Callable], Any]:
"""
Create a checkpointing decorator.
Args:
ckpt_name (Optional[str]): Name of the checkpoint when saved.
on_error (bool): Whether to save checkpoint when an error occurs.
cond (Union[bool, Callable[..., bool]]): Condition under which to save checkpoint.
If a Callable, all parameters of the wrapped function should be passed
and it has to return a boolean.
Returns:
A decorator function.
"""
def ckpt_worker(func: Callable):
if name is None:
ckpt_name = func.__name__
else:
ckpt_name = name
return CkptWrapper(func=func, ckpt_name=ckpt_name, on_error=on_error, cond=cond)
return ckpt_worker
|
39bab1a33523c34b04a2ed7f2efd6467de63b27b
| 3,647,093
|
def return_int(bit_len, unsigned=False):
"""
This function return the decorator that change return value to valid value.
The target function of decorator should return only one value
e.g. func(*args, **kargs) -> value:
"""
if bit_len not in VALID_BIT_LENGTH_OF_INT:
err = "Value of bit_len should be the one of {}, but your bit_len={}."
raise ByteDatasValueError(err.format(VALID_BIT_LENGTH_OF_INT, bit_len))
# calculate max_value for changing raw value to valid value
max_value = 2**bit_len
def decorator(function):
"""decorator function"""
@wraps(function)
def wrapper(*args, **kwargs):
"""
change valid to positive if value < 0
check value than call function or return False directly
"""
value = function(*args, **kwargs)
if value >= max_value or value < 0:
err = ("Returned value of {} should be between 0 and {}, but your "
"value = {}.")
raise ByteDatasValueError(err.format(function.__name__, max_value, value))
if unsigned is False:
# if value > max_value//2 , it means the top bit of value is
# 1 , it is a negative value, so we should change it to negative
value = value - max_value if value > max_value//2 else value
return value
return wrapper
return decorator
|
66121d389a389c6152fd4491ed8a698336e042a2
| 3,647,094
|
def get_integral_curve(f, init_xy, x_end, delta):
"""
solve ode 'dy/dx=f(x,y)' with Euler method
"""
(x, y) = init_xy
xs, ys = [x], [y]
for i in np.arange(init_xy[0], x_end, delta):
y += delta*f(x, y)
x += delta
xs.append(x)
ys.append(y)
return xs, ys
|
0526643acd37b8d7c2646d3a21d54e9d9f16ef58
| 3,647,095
|
def compute_atime_posteriors(sg, proposals,
global_srate=1.0,
use_ar=False,
raw_data=False,
event_idx=None):
"""
compute the bayesian cross-correlation (logodds of signal under an AR noise model)
for all signals in the historical library, against all signals in the current SG.
This is quite expensive so should in general be run only once, and the results cached.
"""
atime_lls = []
i = 0
for idx, (x, signals) in enumerate(proposals):
if event_idx is not None and event_idx != idx:
continue
sta_lls = dict()
for (sta, chan, band, phase), c in signals.items():
wns = sg.station_waves[sta]
if len(wns) == 0:
continue
elif len(wns) > 1:
raise Exception("haven't worked out correlation proposals with multiple wns from same station")
wn = wns[0]
if raw_data:
sdata = wn.get_value().data.copy()
sdata[np.isnan(sdata)] = 0.0
else:
sdata = wn.unexplained_kalman()
if use_ar:
lls = ar_advantage(sdata, c, wn.nm)
else:
normed_sdata = sdata / wn.nm_env.c #np.std(sdata)
lls = np.sqrt(iid_advantage(normed_sdata, c)) # sqrt for laplacian noise, essentially
tt_array, tt_mean = build_ttr_model_array(sg, x, sta, wn.srate, phase=phase)
origin_ll, origin_stime = atime_likelihood_to_origin_likelihood(lls, wn.st, wn.srate, tt_mean, tt_array, global_srate)
signal_scale = wn.nm_env.c
sta_lls[(wn.label, phase)] = origin_ll, origin_stime, signal_scale
sg.logger.info("computed advantage for %s %s %s" % (x, wn.label, phase))
i += 1
atime_lls.append((x, sta_lls))
return atime_lls
|
1029f57fe500ef6f08eec56ab34539d3f9a80637
| 3,647,096
|
def search4vowels(pharse :str) -> set:
""""Return any vowels found in a supplied word."""
vowels = set('aeiou')
return vowels.intersection(set(pharse))
|
8a45c50828b6ba8d173572ac771eb8fe5ddc5a42
| 3,647,097
|
def rsort(s):
"""Sort sequence s in ascending order.
>>> rsort([])
[]
>>> rsort([1])
[1]
>>> rsort([1, 1, 1])
[1, 1, 1]
>>> rsort([1, 2, 3])
[1, 2, 3]
>>> rsort([3, 2, 1])
[1, 2, 3]
>>> rsort([1, 2, 1])
[1, 1, 2]
>>> rsort([1,2,3, 2, 1])
[1, 1, 2, 2, 3]
"""
if len(s) <= 1:
return s
else:
return [rmin(s)]+rsort(remove(rmin(s),s))
|
d9f67d713e55d50cd4468ad709f04c7bfea05c71
| 3,647,098
|
import os
def xdg_data_home():
"""Base directory where user specific data files should be stored."""
value = os.getenv('XDG_DATA_HOME') or '$HOME/.local/share/'
return os.path.expandvars(value)
|
db4212def5e4760bbe1da762a74cf09a9ee40d78
| 3,647,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.