content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from pathlib import Path
from typing import Union
def get_resample_bandname(filename: Path, spatial_res: Union[int, float, str]) -> str:
""" Get the basename of the resampled band """
if filename.stem.lower().find("fmask") != -1:
# fmask has a different naming convention
out_base = "fmask-resmpl-{0}m.tif".format(spatial_res)
else:
out_base = "{0}-resmpl-{1}m.tif".format(filename.stem, spatial_res)
return out_base | 5747aa40d42956a6780c8dc4046acbbe321c7f09 | 48,615 |
import os
def file_existed_checker(parser_in, in_file, action_type):
"""Check file exists or not."""
out_file = os.path.realpath(in_file) if not in_file.endswith('/') else os.path.realpath(in_file) + '/'
if not os.path.exists(out_file):
if action_type == 'in':
parser_in.error(f"{out_file} does NOT exist, check it.")
elif not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
return out_file | 3cc7a41d742af4ffc3132ebb719315440c425982 | 48,616 |
import torch
def small_world_chunker(inputs, outputs, nnz):
"""Utility function for small world initialization as presented in the write up Bipartite_small_world_network"""
pair_distance = inputs.view(-1, 1) - outputs
arg = torch.abs(pair_distance) + 1.
# lambda search
L, U = 1e-5, 5.
lamb = 1. # initial guess
itr = 1
error_threshold = 10.
max_itr = 1000
P = arg**(-lamb)
P_sum = P.sum()
error = abs(P_sum - nnz)
while error > error_threshold:
assert itr <= max_itr, 'No solution found; please try different network sizes and sparsity levels'
if P_sum < nnz:
U = lamb
lamb = (lamb + L) / 2.
elif P_sum > nnz:
L = lamb
lamb = (lamb + U) / 2.
P = arg**(-lamb)
P_sum = P.sum()
error = abs(P_sum - nnz)
itr += 1
return P | 5e8d11cc6c85e138ff4c4d144f6b98e86633fd8b | 48,619 |
def database_file_path():
"""
This function returns the database file path
"""
return 'data/library.db' | 88b76438b46f57a99bb0af59e7d67efed5632c99 | 48,620 |
def _digit_to_alpha_num(digit, base=52):
"""Convert digit to base-n."""
base_values = {
26: {j: chr(j + 65) for j in range(0, 26)},
52: {j: chr(j + 65) if j < 26 else chr(j + 71) for j in range(0, 52)},
62: {j: chr(j + 55) if j < 36 else chr(j + 61) for j in range(10, 62)},
}
if base not in base_values:
raise ValueError(
f"Base-{base} system is not supported. Supported bases are: "
f"{list(base_values.keys())}"
)
return base_values[base].get(digit, digit) | 30eb2bb3e9060b1c36ba31e27a61a806308bf72d | 48,621 |
def flatten_lists(listoflists):
"""
Flatten a python list of list
:param listoflists: (list(list))
:return: (list)
"""
return [el for list_ in listoflists for el in list_] | 3bce27cb3775352f646a7450dcd325067c399afe | 48,622 |
def write_file(file_name, chunked_sequence):
"""
Write the chunked codons into a .CSV file.
If a sequence of codons is detected - it will print out the start codon index, stop codon index, the sequence length and the full sequence.
"""
separator = ","
file_name = file_name + ".csv"
print(len(chunked_sequence))
# Add 1 because we are counting from 1, not 0:
index = list(range(1, len(chunked_sequence) + 1))
# print(index)
print("\t-I- Writing file:", file_name)
with open(file_name, 'wt') as output:
for i in index:
output.write(str(i) + separator)
output.write("\n")
for item in chunked_sequence:
output.write(item + separator)
return True | 04459f6019fd246124a7131bb30e9c0bf90cc762 | 48,623 |
def tokenize_sentence(tokenizer, sentence):
""" Tokenizes and lowercases the description using the English
Spacy tokenizer
"""
tokens = tokenizer(sentence)
words = [token.text for token in tokens]
words = [word.lower() for word in words]
return words | ffa8550f6780c79f44d5f4ea3750d992fba9106e | 48,626 |
import random
def random_code6e():
"""
生成随机6位代码 数字+小写字母
:return: string 代码
"""
ret = ""
for i in range(6):
num = random.randint(0, 9)
# num = chr(random.randint(48,57)) #ASCII表示数字
letter = chr(random.randint(97, 122)) # 取小写字母
s = str(random.choice([num, letter]))
ret += s
return ret | 549f32dec6612532256c555301d256408016eaeb | 48,629 |
import numpy
def rad_to_midi_note_number(F0s: numpy.ndarray, fs: int) -> numpy.ndarray:
"""Convert frequencies [rad] into midi note numbers
Args:
F0s (numpy.ndarray): Normalized angular frequencies of pitches [rad]
fs (int): Sampling frequency
Returns:
numpy.ndarray: Midi note numbers
"""
return (numpy.log2(F0s / (2.0 * numpy.pi) * fs / 440) * 12 + 69).astype('i') | f0a98908b97091589a7e88ad6607048bf875dfea | 48,630 |
import pickle
import random
def extrapolate_experiment_eval_data(mnist_path, digit, N_t, pred_angle_id=7, nr_angles=16):
"""
Prepare validation dataset for the extrapolate experiment.
:param mnist_path:
:param digit:
:param N_t: how many angles do we observe for each image in test set
:param pred_angle_id: which angle to leave out for prediction
:param nr_angles: size of object dataset
:return:
"""
eval_data_dict = pickle.load(open(mnist_path + 'eval_data{}_not_shuffled.p'.format(digit), 'rb'))
eval_images, eval_aux_data = eval_data_dict["images"], eval_data_dict["aux_data"]
pred_angle_mask = [pred_angle_id + i * nr_angles for i in range(int(len(eval_aux_data) / nr_angles))]
not_pred_angle_mask = [i for i in range(len(eval_images)) if i not in pred_angle_mask]
observed_images = eval_images[not_pred_angle_mask]
observed_aux_data = eval_aux_data[not_pred_angle_mask]
# randomly drop some observed angles
if N_t < 15:
digit_mask = [True]*N_t + [False]*(15-N_t)
mask = [random.sample(digit_mask, len(digit_mask)) for _ in range(int(len(eval_aux_data)/nr_angles))]
flatten = lambda l: [item for sublist in l for item in sublist]
mask = flatten(mask)
observed_images = observed_images[mask]
observed_aux_data = observed_aux_data[mask]
test_images = eval_images[pred_angle_mask]
test_aux_data = eval_aux_data[pred_angle_mask]
return observed_images, observed_aux_data, test_images, test_aux_data | a734b1af7cd6dd5c8c09eea98cd71e20b81d174b | 48,631 |
import os
def build_relative_path(full_path, prefix='/', split_on='/data/'):
"""
Given a full file path on the hard drive return a relative path to the data directory
ie /full/path/to/data/my/file
:param full_path: The original full path to a file
:param prefix: The prefix of the result
:param split_on: The string in the path to split on, included in the result.
:return: the relative path, ie '/data/my/file
"""
splits = full_path.split(split_on)
return os.path.join(prefix, split_on, splits[-1]) | e1b374959bf86d1b18b3537c1751f651a7d4720b | 48,632 |
def matrix_row_basis_indices(m):
"""For efficient adding a bunch of vectors to a vector"""
good = set(range(m.nrows()))
target_rank = m.rank()
ker = m.left_kernel().matrix()
ker = ker[:,::-1].echelon_form()[:,::-1]
for rel in ker:
for i, take in reversed(list(enumerate(rel))):
if take:
assert i in good
good.remove(i)
break
assert len(good) == target_rank
return good | af23d8b030a6b6a44a6cb8ee34309121f5d77f7c | 48,633 |
def update_t_ci_dir_path(main, file):
""" Update travis folder path in travis file
:param file: travis file path
:return:
"""
if main.replace_in_file(file, ".travis", ".ci"):
main.output_result_update("Update .travis -> .ci in travis file")
return True
return False | 81a7551656f19570e3d347861a7a32c54f7ac2d1 | 48,634 |
import os
def get_filename(filepath):
"""Get the filename from a full filepath."""
filename = os.path.basename(filepath)
return filename | 74738fa1c5c74ef2cc35042f6835e852bb269404 | 48,635 |
def add_multiple_values(*args):
""" Adds a list of integers
Arguments:
args: A list of integers e.g. 1,2,3,4,5
"""
sum_ = 0
for number in args:
sum_ = sum_ + number
return sum_ | 5eb57b8fb23d0468344fb9323705333fb23a3241 | 48,636 |
def check_classes_to_swap(source, target):
"""Check which item classes are better to use on a swap."""
classes = {}
for class_, percentage in source['percentages'].items():
if class_ not in classes:
classes[class_] = 0.0
classes[class_] = percentage - target['percentages'][class_]
diff = sorted([(key, value)
for key, value in classes.items()], key=lambda elm: elm[1])
return int(diff[0][0]), int(diff[-1][0]) | 036af745a30de3087228a64abe66310e0b2dc686 | 48,637 |
import sys
import six
def check_within(x, criterion, criterion_type="range"):
"""
Check if x is "within" the criterion of a specific criterion_type,
the returned bool(s) can be useful in slicing.
Args:
x: can be a number, an array or a column of a dataframe,
even a dataframe is supported, at least in the form
criterion: eg. [3, 5] or [None, 3] or [1, 3, 6, 7]
criterion_type: eg. "range" or "range" or "value", corresponding to
the criterion
Returns: a bool or an array/series/dataframe of bools
"""
if criterion_type is "range":
try:
range_lw = criterion[0] if criterion[0] is not None \
else -sys.float_info.max
range_hi = criterion[1] if criterion[1] is not None \
else sys.float_info.max
except Exception:
raise ValueError("Please input a two-element list "
"if you want a range!")
# return range_lw <= x <= range_hi # Series cannot be written like this
return (x > range_lw) & (x < range_hi)
elif criterion_type is "value":
criterion = criterion if not isinstance(criterion, six.string_types) \
else [criterion]
return x in criterion
else:
raise RuntimeError("Criterion_type {} is not supported yet."
"Please use range or value".
format(criterion_type)) | fb1ff8f9ea4da9a964f4fe3789c74760c543a821 | 48,638 |
def convert_array_list_to_x_y_lists(array_list):
"""
Returns two lists (first, x values, second, y values of array in
array_list.
Parameters
----------
array_list : list (of arrays)
List with numpy.arrays (with x, y coordinates)
Returns
-------
list_x : list (of floats)
List with x coordinates
list_y : list (of floats)
List with y coordinates
"""
list_x = []
list_y = []
for i in range(len(array_list)):
list_x.append(array_list[i][0])
list_y.append(array_list[i][1])
return list_x, list_y | 9bb796566f273eac1ac83bf7eec1e13c39df7b36 | 48,641 |
import os
def get_dir_size(root, inodes):
"""
Get the size of a directory and it's contents
:param root: path to the directory
:param inodes: a set with list of inodes that have been visited
:return: number of bytes used by the directory and it's contents
"""
total_size = 0
entries = os.listdir(root)
for entry in entries:
entry_name = os.path.join(root, entry)
try:
entry_stat = os.stat(entry_name)
except OSError:
continue
if entry_stat.st_ino in inodes:
continue
if os.path.isfile(entry_name) and not os.path.islink(entry_name):
try:
total_size += entry_stat.st_size
inodes.add(entry_stat.st_ino)
except OSError:
continue
total_size += os.stat(root).st_size
return total_size | 17cf4bf9008a4ec49aa349da340fc74e333e7d54 | 48,644 |
def _max(iterable):
"""
Max is zero, even if iterable is empty
>>> _max([])
0
>>> _max([5])
5
>>> _max([1, 2])
2
"""
try:
return max(iterable)
except ValueError:
return 0 | 8d77b443eaa9ee77a3fceecef986c265222af29d | 48,645 |
def _generate_weight(label, nl_symbol, n_fold):
""" Generate weights based on one multi-label label.
label (str): multi-label or multi-class label
nl_symbol (str): the symbol representing no label
n_fold (float): unlabeled has n_fold weight value comparing to labeled
===========================================================================
return (list): weights
"""
weights = [1] * len(label)
for i, lbl in enumerate(label):
if lbl == nl_symbol:
weights[i] = n_fold
return weights | b31c7d8848b46b9e6aebdb60ea5ea461388287dc | 48,646 |
def _handleTextNotes(s):
"""Split text::notes strings."""
ssplit = s.split('::', 1)
if len(ssplit) == 1:
return s
return u'%s<notes>%s</notes>' % (ssplit[0], ssplit[1]) | 11e8d38efc7f67f87185b81f8a5672a21101437f | 48,647 |
import io
import wave
def mono_to_stereo(file: str) -> bytes:
"""Return stereo converted wave data from a monaural wave file. """
with io.BytesIO() as stream, \
wave.open(file, 'rb') as wi, \
wave.open(stream, 'wb') as wo:
wo.setnchannels(2)
wo.setsampwidth(wi.getsampwidth())
wo.setframerate(wi.getframerate())
nframes = wi.getnframes()
wo.setnframes(nframes)
gen_frames = (wi.readframes(1) for _ in range(nframes))
[wo.writeframesraw(f * 2) for f in gen_frames]
return stream.getvalue() | a8ec746d43e2f47c5f01d7eb53147a5fdf84340a | 48,648 |
import random
def sporophyte_fitness(progeny_list):
""" Generate fitness cost of each karyotype in a set of progeny - sum of imbalance costs per chromosome group.
:arg progeny_list holds a list of karyotypes from one parent
:type progeny_list: list[karyotype lists]
"""
costed_progeny = []
random.shuffle(progeny_list)
for progeny in progeny_list:
diffAa = (((progeny[0]).count('A') + 1) - ((progeny[0]).count('a') + 1)) ** 2 # 0, 4, 16
diffBb = (((progeny[1]).count('B') + 1) - ((progeny[1]).count('b') + 1)) ** 2 # 0, 4, 16
diffCc = (((progeny[2]).count('C') + 1) - ((progeny[2]).count('c') + 1)) ** 2 # 0, 4, 16
diffDd = (((progeny[3]).count('D') + 1) - ((progeny[3]).count('d') + 1)) ** 2 # 0, 4, 16
diffEe = (((progeny[4]).count('E') + 1) - ((progeny[4]).count('e') + 1)) ** 2 # 0, 4, 16
diffFf = (((progeny[5]).count('F') + 1) - ((progeny[5]).count('f') + 1)) ** 2 # 0, 4, 16
total_diffs = diffAa + diffBb + diffCc + diffDd + diffEe + diffFf
costed_progeny.append((progeny, total_diffs))
return costed_progeny | 667cc9b9e564e98bfa807959da4f097cecfa46dd | 48,649 |
def generate_html(fields, pidx, appdata):
"""Fun to be had here!"""
html = ""
for arg in appdata['arguments']:
html += "type: %s" % (arg['type'], )
return html | 06780a1b65478470816ca0516588f655235abd07 | 48,650 |
import time
import statistics
def timereps(reps, func):
"""Helper function to call a function multiple times for benchmarking."""
times = []
for i in range(0, reps):
start = time.time()
func()
times.append(time.time() - start)
return statistics.mean(times) | 3af30c3743b0f6af02a2b3441a7a651484c7305c | 48,651 |
import os
import sys
import subprocess
def getDllVersion(filename):
""" Returns the DLL version number in the indicated DLL, as a
string of comma-separated integers. Windows only. """
# This relies on the VBScript program in the same directory as
# this script.
thisdir = os.path.split(sys.argv[0])[0]
versionInfo = os.path.join(thisdir, 'VersionInfo.vbs')
tempfile = 'tversion.txt'
tempdata = open(tempfile, 'w+')
cmd = 'cscript //nologo "%s" "%s"' % (versionInfo, filename)
print(cmd)
result = subprocess.call(cmd, stdout = tempdata)
if result:
sys.exit(result)
tempdata.seek(0)
data = tempdata.read()
tempdata.close()
os.unlink(tempfile)
return ','.join(data.strip().split('.')) | c49e390f42ca26373071f54dd703c1837c6234f3 | 48,652 |
def reduce_terms(df_frequency, max_df=1.0, min_df=1, max_terms=None, keep_less_freq=False):
"""Remove unimportant terms from term-by-document matrix.
Parameters
----------
df_frequency : pd.DataFrame
max_df : float , between [0, 1]
Terms that appear in more % of documents will be ignored
min_df : int
Terms that appear in <= number of documents will be ignored
max_terms : int , None
If not None or 0, only top `max_terms` terms will be returned.
keep_less_freq : bool
Decides wherever to keep most frequent or least frequent words when `max_terms` < len.
"""
df = df_frequency.copy()
corpus_size = df.shape[1]
df['doc_apperance'] = df.fillna(0).astype(bool).sum(axis=1)
df['doc_frequency'] = df['doc_apperance'] / corpus_size
df = df[df.doc_frequency <= max_df]
df = df[df.doc_apperance > min_df]
if max_terms is not None and max_terms != 0 and max_terms < df.shape[0]:
df = df.sort_values('doc_frequency', ascending=keep_less_freq)
df = df.head(max_terms)
return df.drop('doc_apperance', axis=1) | d245ba4d768797a2ad5c0c3483b6467221610a22 | 48,653 |
import glob
def get_files(path_name, file_ext='json'):
"""
get the list of files in the path name
:param path_name: <str> file path name to search.
:param file_ext: <str> file extension to save.
:return: <list> array of files found.
"""
return glob.glob(path_name + '/*{}'.format(file_ext)) | 51400d0e2dbce21370e731c09638ac27e914d258 | 48,654 |
import time
def __cpu_time_deltas(sample_duration):
"""Return a sequence of cpu time deltas for a sample period.
elapsed cpu time samples taken at 'sample_time (seconds)' apart.
each value in the sequence is the amount of time, measured in units
of USER_HZ (1/100ths of a second on most architectures), that the system
spent in each cpu mode: (user, nice, system, idle, iowait, irq, softirq, [steal], [guest]).
on SMP systems, these are aggregates of all processors/cores.
"""
with open('/proc/stat') as f1:
with open('/proc/stat') as f2:
line1 = f1.readline()
time.sleep(sample_duration)
line2 = f2.readline()
deltas = [int(b) - int(a) for a, b in zip(line1.split()[1:], line2.split()[1:])]
return deltas | 136aaffca61330ba12991e533f8b12e95fe3cc9d | 48,655 |
def I(pcset):
"""Returns inversion of pcset."""
return [(12-x)%12 for x in pcset] | 11bb86a57616400ef033507460c8bd871f048458 | 48,657 |
def is_type(dtype):
"""
:param dtype: type or tuple of type.
:return: is_{type}
"""
def is_a_type(obj):
"""
:param obj: object.
:return: True or Flase
"""
return isinstance(obj, dtype)
return is_a_type | 7d3444bb8ffe3bc1026ad6cd5dddca9dc9c9853e | 48,658 |
import argparse
def parseargs():
"""
Parse the commandline arguments
"""
descriptors = ['sift', 'surf', 'orb']
matchers = ['flann', 'brute']
parser = argparse.ArgumentParser(description='ImageMatching Experimentation')
parser.add_argument('-f', '--feature',
choices=descriptors,
default='sift',
dest='descriptor',
help='The descripter to use' )
parser.add_argument('-m', '--matcher',
choices=matchers,
default='flann',
dest='matcher',
help='The matcher to use')
parser.add_argument('-p', '--matcher_parameters',
dest='matcher_params',
help='A parameter string or file (JSON or YAML) parameterization for the matcher')
parser.add_argument('-d', '--descriptor_parameters',
dest='descriptor_params',
help='A parameter string or file (JSON or YAML) parameterization for the descriptor')
parser.add_argument('--images',
nargs='*',
dest='images',
help='Comma separated list of images')
parser.add_argument('--imagelist',
dest='imagelist',
help='A file containing a list of images')
parser.add_argument('--minmatch',
dest='min_match_count',
default=10,
help='The minimum number of matches after outlier detection')
return parser.parse_args() | 87eb6cb2fa9bbb275a72f6b64686a4728eb5626e | 48,660 |
def codegen_reload_data():
"""
Parameters required reload codegen for the fn_splunk_integration package
"""
return {
"package": u"fn_splunk_integration",
"message_destinations": [u"splunk_es_rest"],
"functions": [u"splunk_update_notable", u"splunk_add_intel_item", u"splunk_delete_threat_intel_item", u"splunk_search"],
"workflows": [u"splunk_update_notable", u"search_splunk_ip_intel", u"splunk_add_new_ip_intel", u"example_of_deleting_an_intel_entry_in_splunk_es"],
"actions": [u"Search Splunk ES for an artifact", u"Delete an intel entry in Splunk ES", u"Add artifact to Splunk ES", u"Update Splunk ES notable event"],
"incident_fields": [],
"incident_artifact_types": [],
"datatables": [u"splunk_intel_results"],
"automatic_tasks": [],
"scripts": []
} | 286938fba27b97abe7180eda8e7eef89191c46f2 | 48,661 |
import re
import sys
def check_peak_files( column_name_list, samplesheet_row_list ):
"""
Check peak file names and exit on error.
"""
bad_peak_files_dict = {}
for row_elements in samplesheet_row_list:
for i in range( len( row_elements ) ):
column_name_dict = column_name_list[i]
element_string = row_elements[i]
if( column_name_dict['type'] != 'peak_file' ):
continue
if( ( len( element_string ) > 0 ) and ( re.search(r'[\0]+', element_string ) or re.match(r'[^/]', element_string ) ) ):
bad_peak_files_dict.setdefault( element_string, True )
if( len( bad_peak_files_dict.keys() ) > 0 ):
print('Unacceptable peak file names (must start with \'/\' and must not contain null characters):')
for bad_peak_file in bad_peak_files_dict.keys():
print( ' \'%s\'' % ( bad_peak_file ) )
sys.exit( -1 )
return( 0 ) | 42ff3b7993e532ce39dfd4b73f0248732410b11b | 48,662 |
def verifica_repetido(valor,resultado = 0):
"""verifica se só tem numeros repetidos no cpf.
modifiquei para uma igualdade, entre os valores multiplicados
de valor X ele mesmo e o resultado
pois pode dar um falso negativo. como o exemplo:
o valor da 55, e 55 é divisivel por 11 com resto 0, ai dava erro.
valor = 55 % 11 == 0 >> TRUE
Args:
valor string: os numeros do cpf
resultado int: inicializa o resultado.
Returns:
bollean: verdedeiro ou falso dependendo se só tiver numeros repetidos
"""
for i in range(len(valor)):
resultado += int(valor[i])
if resultado != (len(valor)*int(valor[0])):
return True
else:
return False | 2e129af090b25286692c7cebbe4061e32b4e230b | 48,663 |
import torch
def outputs_as_triplets(output, number_negative_samples):
"""
Reverses the process of self.triplets_as_batches by splitting the tensors into anchor, pos and neg again.
The incoming 'output' vector is assumed to contain the image encodings of each image only.
Either the encodings are stored in a list with the n-th entry being the n-th encoding for one image or we only
have one encoding per image.
:param output: image encodings
:return: dictionary with format
{
"anchor": all anchor encodings in a stacked tensor of size <batch_size>,
"pos": all positive encodings in a stacked tensor of size <batch_size>,
"neg": all negative encodings in a stacked tensor of size <batch_size>*self.number_negative_samples
}
"""
if (isinstance(output, list)):
out = []
for i in range(len(output)):
out_i = output[i]
bs = int(out_i.shape[0] / (number_negative_samples + 2))
if number_negative_samples > 0:
x = torch.split(out_i, [bs, bs, number_negative_samples * bs], dim=0)
else:
x = torch.split(out_i, [bs, bs], dim=0)
out_i = {}
out_i["anchor"] = x[0]
out_i["pos"] = x[1]
if number_negative_samples > 0:
out_i["neg"] = [x[2][i * bs:(i + 1) * bs] for i in range(number_negative_samples)]
out.append(out_i)
return out
else:
bs = int(output.shape[0] / (number_negative_samples + 2))
if number_negative_samples > 0:
x = torch.split(output, [bs, bs, number_negative_samples * bs], dim=0)
else:
x = torch.split(output, [bs, bs], dim=0)
out = {}
out["anchor"] = x[0]
out["pos"] = x[1]
if number_negative_samples > 0:
out["neg"] = [x[2][i * bs:(i + 1) * bs] for i in range(number_negative_samples)]
return [out] | d761bfa91b72094005d9c752c3c4d6927438b208 | 48,664 |
def get_suits(cards):
"""
Returns a list of strings containing the suit of each card in cards.
ex.
get_ranks(['2S','3C','5C','4D','6D'])
returns ['S','C','C','D','D']
"""
return [card[-1] for card in cards] | dbbcfb89f134b3114ec2ea4afcdb86b6bae17399 | 48,666 |
def configure_redshift(redshift, *structs):
"""
Check and obtain a redshift from given default and structs.
Parameters
----------
redshift : float
The default redshift to use
structs : list of :class:`~_utils.OutputStruct`
A number of output datasets from which to find the redshift.
Raises
------
ValueError :
If both `redshift` and *all* structs have a value of `None`, **or** if any of them
are different from each other (and not `None`).
"""
zs = {s.redshift for s in structs if s is not None and hasattr(s, "redshift")}
zs = list(zs)
if len(zs) > 1 or (len(zs) == 1 and redshift is not None and zs[0] != redshift):
raise ValueError("Incompatible redshifts in inputs")
elif len(zs) == 1:
return zs[0]
elif redshift is None:
raise ValueError(
"Either redshift must be provided, or a data set containing it."
)
else:
return redshift | 5842ab07306a6fd0d28d6752cd1a927882ff8b64 | 48,667 |
def file2text(filepath, verbose=True):
"""Read all lines of a file into a string.
Note that we destroy all the new line characters
and all the whitespace charecters on both ends
of the line. Note that this is very radical
for source code of programming languages or
similar.
Parameters
----------
filepath : pathlib.Path
Path to the file
verbose : bool
If True, we print the name of the file.
Returns
-------
text : str
All the text found in the input file.
"""
with filepath.open("r") as f:
texts = [line.strip() for line in f.readlines()]
texts = [x for x in texts if x and not x.isspace()]
if verbose:
print(filepath.name)
return " ".join(texts) | c002a94238e064e6993b7a8fa2942eafb615a24f | 48,668 |
def post_list_mentions(db, usernick, limit=50):
"""Return a list of posts that mention usernick, ordered by date
db is a database connection (as returned by COMP249Db())
return at most limit posts (default 50)
Returns a list of tuples (id, timestamp, usernick, avatar, content)
"""
list = []
cursor = db.cursor()
sql = """select id, timestamp, usernick, avatar, content from posts,
users where posts.usernick = users.nick order by timestamp desc"""
cursor.execute(sql)
query = cursor.fetchall()
length = len(query)
for index in range(length):
if usernick in query[index][4]:
list.append(query[index])
return list | 1b50f9199b946ff9013d86ff504aa42f01680e7b | 48,669 |
def get_random_urls(prefix='http://www.example-shop.com/product/', size=1000, start_index=None):
"""
Create random url endpoints.
Args:
size (int): number of urls to be created.
start_index (int): optional argument for starting number.
"""
if not start_index:
start_index = 1
end_index = start_index + size
urls = [prefix + str(url_suffix) for url_suffix in range(start_index, end_index + 1)]
return urls | e302b4f1003391e1eb6e4e400b9d49e0710782fd | 48,670 |
def exists(*, uri: str) -> bool:
"""Check for the existance of a local/remote file.
Usage::
fs.exists("file:///...")
fs.exists("s3://...")
Args:
uri: URI to the file to check.
Returns:
bool: True if the file exists, else False.
"""
try:
with open(uri, "rb"):
return True
except IOError:
return False | b037fa08fdc559fffbed30f54bc61979fd6d2442 | 48,671 |
def generate_serializer(option):
"""
Return serializer by option object:
"""
if option.serializer_class:
ResultClass = option.serializer_class
else:
class ResultClass(option.base_serializer):
class Meta:
model = option.model_class
fields = option.fields
exclude = option.exclude
def get_fields(self):
fields = super().get_fields()
for key, value in option.nested_fields.items():
fields[key] = generate_serializer(value)
return fields
return ResultClass(**option.field_options) | 928ae96e69031242ae89fc7b309c431e6b383fac | 48,672 |
import torch
def pairwise_distances_batch(x, y):
"""
Input: x dim (B, M, Q).
Input: y dim (B, N, Q).
Output: dist is a (C, M, N) , C distance matrices.
"""
b = x.shape[0]
m = x.shape[1]
n = y.shape[1]
x_norm = (x ** 2).sum(-1).view(b, m, 1)
y_t = torch.transpose(y, -1, -2)
y_norm = (y ** 2).sum(-1).view(b, 1, n)
dist = x_norm + y_norm - 2.0 * torch.bmm(x, y_t)
return dist | 9898db2f5c1b7edd8e1508ab56e3e3165931fb6c | 48,673 |
import random
def random_seq(length: int,
charset: str = "ATCG") -> str:
"""Get a random sequence.
:param length: Length of sequence.
:param charset: Char population.
:return: Random sequence
"""
return "".join(random.choices(charset, k=length)) | 036742e9777d15a813b99b61e54dfb03f8a7ab80 | 48,674 |
import re
def filter_ref_replace(data):
"""Convert Periscope-specific syntax into raw SQL code for further execution.
Args:
data (array of dict): original data
Returns:
data (array of dict): data after removing all Periscope-specific SQL syntax
"""
# for date aggregations
exp1 = r"\[([^][:]*)\:(sec|min|hour|day|date|week|month|year|month_of_year|aggregation)\]"
# for [channel_grouping] reference in Periscope
exp2 = r"\[channel_grouping\]"
# for elements like where ... [] group by (order by):
exp3 = r"(where((?!where).)*\[[^0-9].*?\].*?(?=(group\s*by|order\s*by|union|qualify|\)\s*\,|\)\s*order|\)\s*select)))|(where((?!where).)*\[[^0-9].*?\].*$)"
# for elements like [X|Y]
exp4 = r"\[([^][|]*)\|([^][|]*)\]"
for row in data:
row['SQL_CODE_RAW'] = re.sub(exp4, r"\2", row['SQL_CODE_RAW'], flags = re.S|re.I)
row['SQL_CODE_RAW'] = re.sub(exp1, r"\1", row['SQL_CODE_RAW'], flags = re.S|re.I)
row['SQL_CODE_RAW'] = re.sub(exp2, r"null", row['SQL_CODE_RAW'], flags = re.S|re.I)
row['SQL_CODE_RAW'] = re.sub(exp3, r"", row['SQL_CODE_RAW'], flags = re.S|re.I)
return data | 847e193745753a7a82ff0fb3fbc9e36a27a952c7 | 48,675 |
import os
import logging
import json
def get_config():
"""
Loads the ckanext-dataoverheid config.json file and returns its 'validation'
key as a dictionary.
:rtype: dict[str, Any]
:return: The configuration dictionary containing the vocabularies and
taxonomies
"""
config_file = os.path.join(os.path.dirname(__file__), '..', '..', '..',
'config.json')
logging.info('config source %s', config_file)
with open(config_file, 'r') as config_contents:
config = json.load(config_contents)['validation']
return config | 1191732497c97857622c8ccdfac1c2d892400341 | 48,677 |
from typing import Any
import os
import pickle
def restore_data(path: str) -> Any:
""" Reads cached data from the given path
Args:
path: Path where cached data is stored
Returns:
returns data as a tuple of images and image type or name
"""
data = dict()
if os.path.isfile(path):
file = open(path, 'rb')
data = pickle.load(file)
return data | 42cce93313a4c3bca484df7e69327f29904826b2 | 48,678 |
def bed_lines(request):
"""Return a iterable with bed lines"""
lines = [
"#Chromosome/scaffold name\tStart (bp)\tGene End (bp)\tHGNC symbol\tGene ID",
"13\t23708313\t23708703\tHMGA1P6\tENSG00000233440\n",
"13\t23726725\t23726825\tRNY3P4\tENSG00000207157\n",
"13\t23743974\t23744736\tLINC00362\tENSG00000229483\n"
]
return lines | 5703b724ee2de490e256f0ae6eeadb0b4bb30ead | 48,680 |
from typing import Any
from typing import Literal
def _return_true(*_: Any) -> Literal[True]:
"""Always return true, discarding any arguments"""
return True | a72e74ecc942a36d472bae7e957a7a6fb0879403 | 48,682 |
import json
def json_response():
""" fixture to return info body"""
body = json.dumps({'name': 'Rosette API', 'versionChecked': True})
return body | cac1f85cd3735b90e07634779aa253a0855b94f3 | 48,683 |
def strike_water_temp(init_grain_temp: float,
strike_water_vol: float,
grain_wt: float,
target_mash_temp,
equip_temp_loss: float = 1) -> float:
"""Computes strike water temperature based primarily on initial mash temp.
Calculation here http://howtobrew.com/book/section-3/
the-methods-of-mashing/calculations-for-boiling-water-additions
Args:
init_grain_temp: Initial grain temp in F
strike_water_vol: Strike water volume in gal
grain_wt: Grain weight in lbs
target_mash_temp: Target mash temp in F
equip_temp_loss: Equipment temperature loss in F (default is 1)
Returns:
Strike water temperature in F i.e. temp to heat water before adding grain
"""
# multiply vol by 4 to convert to quarts per pound
r = strike_water_vol * 4.0/grain_wt
t2 = target_mash_temp + equip_temp_loss
return (0.2/r) * (t2 - init_grain_temp) + t2 | 224dc1608735a6e1431d531371afdafeb2245ae3 | 48,684 |
def readInput(spectrum):
"""Read the input from textbox and convert them to float type
"""
return map(float, spectrum.strip().split("\t")) | f1968de2f2a6d31fdace5d2215df3a30fea505f9 | 48,685 |
from typing import Iterable
def check_for_func(sequence: Iterable) -> bool:
"""Used to determine if a list or tuple of
columns passed into sql function has
parethesis '()' which indicate a function
that needs to be parsed out
Args:
sequence (Iterable): list/tupe of column names
Returns:
bool: True if function found
"""
# make all elements strings
it = map(str, sequence)
combined = "".join(it)
return "(" in combined | 8832c70f98a36eb761fdec5b47385575b92c6ca8 | 48,686 |
def ashape(a):
"""Get an array shape.
Handles NNVM brain-damage around empty shapes.
"""
shp = a.shape
if shp == ():
return (1,)
return shp | ce5a6669ad950845427520f7a32e359ceb0a471d | 48,688 |
def _ambisonics_channel_count_from_order(order, three_dim=True):
"""
Helper function that computes the number of channels for a given ambisonics order.
"""
return (order + 1)**2 if three_dim else (2 * order + 1) | c4393b29ca4adef07fefb706b73ea0b33d1f2049 | 48,690 |
def grouped(iterable, n):
"""
s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ... \n
Turns a list into a list of tuples to then be iterated over\n
source: "https://stackoverflow.com/questions/5389507/iterating-over-every-two-elements-in-a-list"
:param iterable: The iterable object to be paired up
:param n: how many objects per grouping
:return: an iterable of grouped members
Examples:\n
l = [1,2,3,4,5,6]\n
for i,k in grouped(l,2):
print str(i), '+', str(k), '=', str(i+k)\n
Output:\n
1+2=3\n
3+4=7\n
5+6=11\n
"""
return zip(*[iter(iterable)] * n) | fdf06efa3d65440894afe819b06c972fb7d9ab3c | 48,693 |
import requests
def request_waterinfo_timeseries(station_id, period="P3D"):
"""Request time series data from VMM Waterinfo
Parameters
----------
station_id : string
ts_id as defined by Waterinfo/Kisters/dbase
period : string
define the timeframe to request data from. e.g. 1 day: P1D or 10 days: P10D,
default to 3 days period
Returns
-------
dict with the data and metadata fields from the request
"""
returnfields = ["Timestamp", "Value", "Quality Code"]
r = requests.get("http://download.waterinfo.be/tsmdownload/KiWIS/KiWIS",
params={"datasource": 0, "type": "queryServices",
"service": "kisters",
"request": "getTimeseriesValues",
"ts_id": station_id, "format": "json",
"period": period, "metadata": "true",
"returnfields": ','.join(returnfields)})
return r.json()[0] | be1822e85adafc45f55194e315ce2db51db7d664 | 48,694 |
def complete(arg):
"""Turns all characters to an string
Args:
arg (str): string to be converted
Returns:
str: converted string on string
"""
return '*' * len(str(arg)) | 04fe87834aa743afc9a1debca8d757aa742e0abc | 48,695 |
import uuid
def _parse_legacy_uuid(doc):
"""Decode a JSON legacy $uuid to Python UUID."""
if len(doc) != 1:
raise TypeError('Bad $uuid, extra field(s): %s' % (doc,))
return uuid.UUID(doc["$uuid"]) | 6a238a637f98a5f0a7c79374f09bb72fc7b17ebe | 48,696 |
import numpy
def _get_id_mass(mass_db):
"""
Parse mass_db file (f_string) into two numpy arrays
"""
ids = []; mass = []
f = open(mass_db); count = 0
while True:
line = f.readline()
if not line:
break
words = line.split()
k = words[0]; m = float(words[1])
ids.append(k); mass.append(m)
count = count +1
ids = numpy.array(ids).reshape((count,1))
mass = numpy.array(mass).reshape((count,1))
return ids,mass | 2fd8f34efc2a7e745ab22d46812fe6b7ae9f5af8 | 48,697 |
def concatenate(*a, **kw):
"""
helper function to concatenate all arguments with added (optional)
newlines
"""
newline = kw.get('newline', False)
string = ''
for item in a:
if newline:
string += item + '\n'
else:
string += item
return string | c92ac51b75c9621711e6505cf7418cad9bcabf67 | 48,698 |
def _get_datasets(dataref):
"""
Generate a list containing the names of available sample datasets.
Parameters
----------
dataref: str
Location of dataset reference.
Returns
-------
list
Names of available sample datasets.
"""
return(list(dataref.keys())) | 9363983f6137f700b065881f6edd809dbeb657e2 | 48,699 |
def add_relative_src_offset(metadata):
"""Get the source offset relative to the lens center
Parameters
----------
metadata : pd.DataFrame
the metadata generated by Baobab
Returns
-------
pd.DataFrame
metadata augmented with relative source offset columns added
"""
metadata['src_light_pos_offset_x'] = metadata['src_light_center_x'] - metadata['lens_mass_center_x']
metadata['src_light_pos_offset_y'] = metadata['src_light_center_y'] - metadata['lens_mass_center_y']
return metadata | 0f312cd36ab291f070667190947d2b3d24c93998 | 48,700 |
def sponsor_image_url(sponsor, name):
"""Returns the corresponding url from the sponsors images"""
if sponsor.files.filter(name=name).exists():
# We avoid worrying about multiple matches by always
# returning the first one.
return sponsor.files.filter(name=name).first().item.url
return '' | 9646a456ec6137a1616fcdec2f98fdd7827b8303 | 48,702 |
import uuid
def random_name(module_name):
"""Generate node name."""
return f"{module_name}_{str(uuid.uuid4()).split('-')[0]}" | 07b9b91a016249b7ab83b87f12cf43c44298712e | 48,703 |
def heat_exchanger_utilities_from_units(units):
"""Return a list of heat utilities from all heat exchangers,
including the condensers and boilers of distillation columns and
flash vessel heat exchangers."""
heat_utilities = sum([i.heat_utilities for i in units], ())
return [i for i in heat_utilities if i.heat_exchanger] | 4292c455b97625cf16e0efe42c860f918607b34b | 48,704 |
def create(*, ModelClass, call_full_clean=True, save_kwargs=None, **values):
"""
Create a new model instance with optional validate before create.
"""
instance = ModelClass(**values)
if call_full_clean:
instance.full_clean(validate_unique=False) # Don't create non-valid instances
if save_kwargs is None:
save_kwargs = {}
instance.save(force_insert=True, **save_kwargs)
return instance | 2bb3dd3229ca61f43682923ee49e37350f9c2e07 | 48,705 |
import os
def _get_abspath_relative_to_file(filename, ref_filename):
"""
Returns the absolute path of a given filename relative to a reference
filename (ref_filename).
"""
# we only handle files
assert os.path.isfile(ref_filename)
old_cwd = os.getcwd() # save CWD
os.chdir(os.path.dirname(ref_filename)) # we're in context now
abspath = os.path.abspath(filename) # bing0!
os.chdir(old_cwd) # restore CWD
return abspath | caa8323a7c58c257e49f11767f738a11537336e8 | 48,706 |
import os
def parse_filename(filepath):
"""Base filename without extension"""
return os.path.splitext(os.path.basename(filepath))[0] | 88a29eb8cd10d659f96b10944b295a65b99f6fac | 48,707 |
def indicator(condition):
"""
:param condition: A boolean statement on which to evaluate inputs.
:type condition: bool
:return: 1 if the condition holds, 0 otherwise.
:rtype: float
Implementation of the indicator function 1_{condition}(x)
"""
return float(condition) | 8e20424efa5c48087bf3ffa958bf1fc27d179cb9 | 48,708 |
def general_checks(all_methods_df):
"""
Checks for the Pandas.DataFrame
Parameters
----------
all_methods_df : Pandas.DataFrame
Returns
-------
tuple
- counts: Pandas.DataFrame containing the amount of entries for each column and method
- nans: Pandas.DataFrame containing the amount of NaNs for each column and method
- times: Pandas.Series containing the time required for each method to perform the alignments of the sample set
"""
counts = all_methods_df.groupby("method").count()
nans = all_methods_df[all_methods_df["SI"].isnull()].groupby("method").count()
times = round(all_methods_df.groupby("method")["time"].sum() / 60, 2)
return counts, nans, times | 140017f4c7df941b1f3e153f5b6cb1c1b4600e1b | 48,710 |
def find_child(parent, child_tag, id=None):
"""
Find an element with *child_tag* in *parent* and return ``(child, index)``
or ``(None, None)`` if not found. If *id* is provided, it will be searched
for, otherwise the first child will be returned.
"""
for i, child in enumerate(list(parent)):
if child.tag == child_tag:
if id is None:
return (child, i)
child_id = child.find(f'{child_tag}ID').text
if child_id == id:
return (child, i)
if child_id.split(',')[-1] == id.split(',')[-1]:
return (child, i)
return (None, None) | ebc9b9bb6ba7ed78efc7a094478b22de1769ccb9 | 48,712 |
def create_reduce_job(results, wuid=None, job_num=None):
"""Test function for reduce job json creation."""
if wuid is None or job_num is None:
raise RuntimeError("Need to specify workunit id and job num.")
args = [result['payload_id'] for result in results]
return {
"job_type": "job:parpython_reduce_job",
"job_queue": "factotum-job_worker-large",
"payload": {
# sciflo tracking info
"_sciflo_wuid": wuid,
"_sciflo_job_num": job_num,
"_command": "/usr/bin/echo {}".format(' '.join(args)),
# job params
"results": ' '.join(args),
}
} | 4c6bfa4bcfec7b61ff296ea158a37328d4a735a1 | 48,714 |
def calcular_matrices_acoplamiento_transpuestas(matrices_acoplamiento):
"""
Funcion que calcula las matrices de acoplamiento transpuestas
Parametros de entrada:
* matrices_acoplamiento: DataFrame que contiene todas las
matrices de acoplamiento
Salida:
* matrices_acoplamiento_transpuestas
"""
# Se genera una copia de las matrices de acoplamiento para reservar el espacio en memoria y preservar el multiindex
matrices_acoplamiento_transpuestas = matrices_acoplamiento.copy()
# Se crea el index M1,M2,M3,...Mn
index = matrices_acoplamiento.index.droplevel(level=1).drop_duplicates()
for M in index:
# Se llenan las matrices transpuestas
matrices_acoplamiento_transpuestas.loc[M] = matrices_acoplamiento.loc[M].T.to_numpy()
return matrices_acoplamiento_transpuestas | 67b8bb902e452abe54862c239f32cbacdb0f85f7 | 48,715 |
def PCopyTables (inUV, outUV, exclude, include, err):
""" Copy Tables from one image to another
inUV = Python UV object
outUV = Output Python UV object, must be defined
exclude = list of table types to exclude (list of strings)
has priority
include = list of table types to include (list of strings)
err = Python Obit Error/message stack
"""
################################################################
if ('myClass' in inUV.__dict__) and (inUV.myClass=='AIPSUVData'):
raise TypeError("Function unavailable for "+inUV.myClass)
return inUV.CopyTables (outUV, exclude, include, err)
# end PCopyTables | 04112aea5039b84a6baec3c1948af617376d4d06 | 48,716 |
import traceback
def get_wage_replacement_details(obj):
"""报销与销账,明细
:param obj:
:return:
"""
try:
return obj.WageReplacementDetails.count()
except:
traceback.print_exc()
return 0 | 64cafe63cd8e10ac48401c5b01eb397aa50727b2 | 48,717 |
def traverse_list(n):
"""
Traverse the whole list. Return the last node and the number of steps to
the last node.
"""
steps = 0
while n.next is not None:
n = n.next
steps += 1
return n, steps | cd7a3a17f99e9acc54c41b105c4c3befda377f4d | 48,718 |
import torch
import sys
def compute_train_graph_metrics(params, trained_model, mr_dataloader_valid, mr_dataset_valid, metrics_logger, epoch, minibatch_number):
"""
Compute loss and precision on a held out validation set using the train graph on a general MR set
"""
total_items = 0
valid_steps = 0.0
total_items_to_validate = params.validation_batches * params.batch_size_validation
max_data_size = len(mr_dataset_valid)
if max_data_size < total_items_to_validate:
total_items_to_validate = max_data_size
if params.local_rank == 0 and params.node_rank == 0:
print('SystemLog: ---------------------------------------------------------')
print('SystemLog: Computing Validation scores on %d items' % total_items_to_validate)
for _, sample_batched in enumerate(mr_dataloader_valid):
if sample_batched is not None:
if len(sample_batched['rsp_batch_tuple'][0]) > 1:
with torch.no_grad():
output = trained_model(sample_batched) # need to update for matching_model and cvae
loss_dict = trained_model.loss(output)
total_items += len(sample_batched['msg_batch_tuple'][0]) # corresponds to the message
valid_steps += 1
metrics_logger.update_validation_run_stats(len(sample_batched['rsp_batch_tuple'][0]), loss_dict, epoch, minibatch_number)
if total_items >= total_items_to_validate:
break
sys.stdout.flush()
metrics_logger.set_validation_running_stats(params, epoch, minibatch_number)
return metrics_logger | 115516dacf3752bddce5005fdef675b33249d9eb | 48,719 |
def freeze(target):
"""
freeze and thaw are used together to create a "frozen" target, which
can be connected to pipelines multiple times. Feeding values
to a frozen target does not result in anything. Only when the target is "thawed",
a result is returned.
Examples:
>>> # Print the smallest value in three files
>>> a = freeze(min_())
>>> for f_name in ['data1.txt', 'data2.txt', 'data3.txt']:
... stream_vals(f_name) | cast(float) | a
>>> print(thaw(a))
2.0
>>> lists = [range(100), range(10), range(500)]
>>> # Calculate the mean and standard deviation of the elements in lists
>>> # which are longer than 100.
>>> a = freeze(mean() + stddev())
>>> for l in lists:
... if len(l) > 100:
... source(l) | a
>>> # Get the mean and standard deviation
>>> m, s = thaw(a)
"""
return target.freeze() | 29950dbed71373fc63f38d783613787cd9841b55 | 48,720 |
import torch
def get_top_spans(_mention_probs,
k=10, filter_spans=True):
"""
:param _mention_probs: max_passage_len x max_passage_len
:param k: top k spans
:param filter_spans: prevent nested mention spans
:return:
"""
spans = _mention_probs.nonzero(as_tuple=False)
scores = _mention_probs[_mention_probs.nonzero(as_tuple=True)]
spans_scores = torch.cat((spans, scores.unsqueeze(-1)), -1)
sorted_spans_scores = spans_scores[spans_scores[:, -1].argsort(0, True)]
selected_spans = []
for start, end, s in sorted_spans_scores:
start = start.long()
end = end.long()
if start.item() == 0 and end.item() == 0:
break
if filter_spans and any(start.item() <= selected_start <=
selected_end <= end.item()
or selected_start <= start.item() <= end.item() <= selected_end
for selected_start, selected_end, _ in
selected_spans):
continue
selected_spans.append([start.item(), end.item(), s.item()])
if len(selected_spans) == k:
break
selected_spans = torch.tensor(selected_spans)
return selected_spans | 38243dc1bade78d90067c72bdb3bc4717eca022e | 48,723 |
def compare_lists(la, lb):
"""Compare two lists."""
if len(la) != len(lb):
return 1
for i in range(0, len(la)):
sa = la[i]
sb = lb[i]
if sa != sb:
return 1
return 0 | ebd8ef918a226755cd614f2e281f6c86e3e59677 | 48,725 |
def str_len(str_in):
"""
返回字符串实际长度
"""
try:
row_l = len(str_in)
utf8_l = len(str_in.encode('utf-8'))
return (utf8_l - row_l) / 2 + row_l
except:
return None
return None | 7289ce4ed5c8765c8ae60893c12b3124f96fc60e | 48,726 |
def _deserialize_input_argument(input_data, param_type, param_name):
"""
function to deserialize / convert input to exact type described in schema
:param input_data:
:param param_type: subclass of AbstractParameterType
:param param_name:
:return:
"""
sample_data_type = param_type.sample_data_type
if sample_data_type is dict:
if not isinstance(input_data, dict):
raise ValueError("Invalid input data type to parse. Expected: {0} but got {1}".format(
sample_data_type, type(input_data)))
sample_data_type_map = param_type.sample_data_type_map
# parameters other than subclass of AbstractParameterType will not be handled
for k, v in sample_data_type_map.items():
if k not in input_data.keys():
raise Exception('Invalid input. Expected: key "{0}" in "{1}"'.format(k, param_name))
input_data[k] = _deserialize_input_argument(input_data[k], v, k)
elif sample_data_type in (list, tuple):
sample_data_type_list = param_type.sample_data_type_list
if not isinstance(input_data, list) and not isinstance(input_data, tuple):
raise ValueError("Invalid input data type to parse. Expected: {0} but got {1}".format(
sample_data_type, type(input_data)))
# OpenAPI 2.x does not support mixed type in array
if len(sample_data_type_list):
input_data = [_deserialize_input_argument(x, sample_data_type_list[0], param_name) for x in input_data]
else:
# non-nested input will be deserialized
if not isinstance(input_data, sample_data_type):
input_data = param_type.deserialize_input(input_data)
return input_data | d8ab7d910b01eb40735c86749557364ab595dbb8 | 48,727 |
import torch
def omega_norm(Z, Omega):
"""
Calculates norm(Omega * Z)
:param Z: delta perturbation
:param Omega: transformation matrix
:return: l2 norm of delta multiplied by Omega
"""
return torch.matmul(Omega, Z.view(Z.shape[0], -1).T).T.norm(dim=1) | fb413fe446ecd70122445087fe945b8685387934 | 48,728 |
def replace_nones_in_dict(target, replace_value):
"""Recursively replaces Nones in a dictionary with the given value."""
for k in target:
if target[k] is None:
target[k] = replace_value
elif type(target[k]) is list:
result = []
for e in target[k]:
if type(e) is dict:
result.append(replace_nones_in_dict(e, replace_value))
else:
if e is None:
result.append(replace_value)
else:
result.append(e)
target[k] = result
elif type(target[k]) is dict:
replace_nones_in_dict(target[k], replace_value)
return target | 4a6dda79e60cbb5760ff524b5be4399232d8fe4f | 48,729 |
import copy
def create_unmodified_census(census):
"""
This creates a cell census, ignoring any modifications. The
output is suitable for use as input to `simulate_water_quality`.
"""
unmod = copy.deepcopy(census)
unmod.pop('modifications', None)
return unmod | bec0c27ed1993cb70bc6b88027c54349d7cae6a0 | 48,730 |
def pairwise_difference(array):
"""
Take difference between every element pair.
"""
return array - array[:, None] | 2435a1f2b5203f4908881d3f0f2f44b179b5c19e | 48,731 |
def greet(name):
"""Return a string to greet someone.
Arguments:
Name – name of the person to greet
""" #es una buena practica usar el docstring#
return "Hello, " + name + "." | 3312eeb562f9218aa65724c49a72088d6147bf13 | 48,732 |
def percentage(x, pos):
"""
Adds percentage sign to plot ticks.
"""
return '%.0f%%' % x | 66a0c001bc289ffd2b41feada434b635fabc7391 | 48,733 |
import logging
def getLogger(cfg):
"""Gets logger configured according to data from ConfigParser.
:param cfg: Application configuration.
:type cfg: ConfigParser
:returns: Logger for this application.
"""
logger = logging.getLogger(cfg.get('log', 'loggerName'))
logger.setLevel(logging.INFO)
return logger | 2be78f6f71a30dc5bad41ee3436baa11ae864453 | 48,734 |
import numpy
def load_signal(filename):
"""loads float valued signal from a file, one point per line"""
return numpy.fromfile(filename, 'f', sep='\n') | 77c50c8e9d010128ba334a33950ef56ba29db006 | 48,735 |
def _resize_row(array, new_len):
"""Alter the size of a list to match a specified length
If the list is too long, trim it. If it is too short, pad it with Nones
Args:
array (list): The data set to pad or trim
new_len int): The desired length for the data set
Returns:
list: A copy of the input `array` that has been extended or trimmed
"""
current_len = len(array)
if current_len > new_len:
return array[:new_len]
else:
# pad the row with Nones
padding = [None] * (new_len - current_len)
return array + padding | 810be7f94234e428135598dc32eca99dd0d3fcca | 48,738 |
def pdact(a: float) -> float:
"""Partial derivative of activation function."""
return a * (1 - a) | 98007d118b050911a16776cf604fcbbe9898bc15 | 48,740 |
def deep_encode(e, encoding='ascii'):
"""
Encodes all strings using encoding, default ascii.
"""
if isinstance(e, dict):
return dict((i, deep_encode(j, encoding)) for (i, j) in e.items())
elif isinstance(e, list):
return [deep_encode(i, encoding) for i in e]
elif isinstance(e, str):
e = e.encode(encoding)
return e | 6541695e741cb2f47d7d2fe87a45f1d8d8ca67fe | 48,742 |
import subprocess
import time
def check_for_zoom():
"""
Check to see if the user is on an active Zoom call by looking for the CptHost process
This is done to ensure we don't interrupt an active call with an update prompt
Loop every 30 seconds for up to 5 minutes in case a call is about to end and we can catch them right after
If CptHost is not running, continue
Otherwise, exit silently
"""
for i in range(10):
zoomprocess = subprocess.Popen('pgrep CptHost', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
zoompid, err = zoomprocess.communicate()
if i == 9 and zoompid:
return True
if not zoompid:
return False
else:
print("User is in a Zoom call, waiting...")
time.sleep(30) | 5079005b17de63d4c67cc676aca6265cfddeb128 | 48,743 |
import os
import pickle
def read_dict(name,PATH,silent=False):
"""
write_dict: read dict() from pickle file
input:
name : of file
PATH : file path
silent: if False no print()
output:
path: dict() or None
"""
path = PATH+name+'.pickle'
if os.path.isfile(path):
with open(path, 'rb') as fp:
data = pickle.load(fp)
if not silent:
print('read:', path)
return data
else:
print('error:', name,PATH,silent)
return None | 159a414dbaddbb9acc1b54acfb716ab08a5a0ae6 | 48,744 |
import difflib
def make_diff(content1, content2, mode="Differ"):
"""
returns the diff as a String, made with difflib.Differ.
"""
def prepare(content):
if isinstance(content, (list, tuple)):
return content
return content.splitlines()
content1 = prepare(content1)
content2 = prepare(content2)
if mode == "Differ":
diff = difflib.Differ().compare(content1, content2)
elif mode == "HtmlDiff":
diff = difflib.HtmlDiff(tabsize=4).make_table(content1, content2)
elif mode == "unified":
diff = difflib.unified_diff(content1, content2)
else:
raise AssertionError("diff mode %r unknown." % mode)
return diff | fa9d2611e8de92f2741bbc6e31e32609cb83231b | 48,745 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.