content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def add_X_to_both_sides(latex_dict: dict) -> str:
"""
https://docs.sympy.org/latest/gotchas.html#double-equals-signs
https://stackoverflow.com/questions/37112738/sympy-comparing-expressions
Given a = b
add c to both sides
get a + c = b + c
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex('a'), 'RHS': parse_latex('b')}]
>>> latex_dict['feed'] = [parse_latex('c')]
>>> latex_dict['output'] = [{'LHS': parse_latex('a + c'), 'RHS': parse_latex('b + c')}]
>>> add_X_to_both_sides(latex_dict)
'step is valid'
"""
trace_id = str(random.randint(1000000, 9999999))
logger.info("[trace start " + trace_id + "]")
d1 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["LHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["LHS"]
)
d2 = sympy.simplify(
sympy.Add(latex_dict["input"][0]["RHS"], latex_dict["feed"][0])
- latex_dict["output"][0]["RHS"]
)
if (d1 == 0) and (d2 == 0):
logger.info("[trace end " + trace_id + "]")
return "valid"
else:
logger.info("[trace end " + trace_id + "]")
return "LHS diff is " + str(d1) + "\n" + "RHS diff is " + str(d2)
| 5,337,500
|
def init_var_dict(init_args, var_list):
"""Init var with different methods.
"""
var_map = {}
_, max_val = init_args
for i, _ in enumerate(var_list):
key, shape, method = var_list[i]
if key not in var_map.keys():
if method in ['random', 'uniform']:
var_map[key] = Parameter(initializer(Uniform(max_val), shape, ms_type), name=key)
elif method == "one":
var_map[key] = Parameter(initializer("ones", shape, ms_type), name=key)
elif method == "zero":
var_map[key] = Parameter(initializer("zeros", shape, ms_type), name=key)
elif method == 'normal':
var_map[key] = Parameter(Tensor(np.random.normal(loc=0.0, scale=0.01, size=shape).
astype(dtype=np_type)), name=key)
return var_map
| 5,337,501
|
def get_member_name(refobject):
""" return the best readable name
"""
try:
member_name = refobject.__name__
except AttributeError:
member_name = type(refobject).__name__
except Exception as error:
logger.debug('get_member_name :'+str(error))
member_name = str(refobject)
return member_name
| 5,337,502
|
def test_get_timeseries_cum():
"""Test if get_timeseries_cum returns the right timeseries list
Given an in_list"""
in_list = [[1, 245], [5, 375], [10, 411]]
duration = 13
x = an.get_timeseries_cum(in_list, duration, False)
answer = [0, 245, 245, 245, 245, 245 + 375, 245 + 375,
245 + 375, 245 + 375, 245 + 375, 245 + 375 + 411,
245 + 375 + 411, 245 + 375 + 411]
assert x == answer
| 5,337,503
|
def _check_path(path=None):
"""
Returns the absolute path corresponding to ``path`` and creates folders.
Parameters
----------
path : None, str or list(str)
Absolute path or subfolder hierarchy that will be created and returned.
If None, os.getcwd() is used.
"""
if path is None:
return os.getcwd()
if isinstance(path, str):
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
if not os.path.isdir(path):
os.mkdir(path)
return path
elif isinstance(path, list):
abs_path = ''
for partial_path in path:
abs_path = _check_path(os.path.join(abs_path, partial_path))
return abs_path
else:
message = 'Variable ``path`` is neither a string or a list of string.'
warnings.warn(message, UserWarning)
| 5,337,504
|
def _eval_bernstein_1d(x, fvals, method="binom"):
"""Evaluate 1-dimensional bernstein polynomial given grid of values.
experimental, comparing methods
Parameters
----------
x : array_like
Values at which to evaluate the Bernstein polynomial.
fvals : ndarray
Grid values of coefficients for Bernstein polynomial basis in the
weighted sum.
method: "binom", "beta" or "bpoly"
Method to construct Bernstein polynomial basis, used for comparison
of parameterizations.
- "binom" uses pmf of Binomial distribution
- "beta" uses pdf of Beta distribution
- "bpoly" uses one interval in scipy.interpolate.BPoly
Returns
-------
Bernstein polynomial at evaluation points, weighted sum of Bernstein
polynomial basis.
"""
k_terms = fvals.shape[-1]
xx = np.asarray(x)
k = np.arange(k_terms).astype(float)
n = k_terms - 1.
if method.lower() == "binom":
# Divide by 0 RuntimeWarning here
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
poly_base = stats.binom.pmf(k, n, xx[..., None])
bp_values = (fvals * poly_base).sum(-1)
elif method.lower() == "bpoly":
bpb = interpolate.BPoly(fvals[:, None], [0., 1])
bp_values = bpb(x)
elif method.lower() == "beta":
# Divide by 0 RuntimeWarning here
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
poly_base = stats.beta.pdf(xx[..., None], k + 1, n - k + 1) / (n + 1)
bp_values = (fvals * poly_base).sum(-1)
else:
raise ValueError("method not recogized")
return bp_values
| 5,337,505
|
def exp_bar(self, user, size=20):
"""\
Returns a string visualizing the current exp of the user as a bar.
"""
bar_length = user.exp * size // exp_next_lvl(user.lvl)
space_length = size - bar_length
bar = '#' * bar_length + '.' * space_length
return '[' + bar + ']'
| 5,337,506
|
def test_get_bucket_vs_certs():
"""Integration test for bucket naming issues."""
import boto.s3.connection
aws_access_key = os.getenv('AWS_ACCESS_KEY_ID')
# Add dots to try to trip up TLS certificate validation.
bucket_name = 'wal-e.test.dots.' + aws_access_key.lower()
with pytest.raises(boto.https_connection.InvalidCertificateException):
with FreshBucket(bucket_name, calling_format=SubdomainCallingFormat()):
pass
| 5,337,507
|
def linear(input_, output_size, scope=None, stddev=0.02, with_w=False):
"""Define lienar activation function used for fc layer.
Args:
input_: An input tensor for activation function.
output_dim: A output tensor size after passing through linearity.
scope: variable scope, if None, used independently.
stddev : user defined standard deviation for initialization.
with_w: if the weight is also needed as output.
Returns: logits of weights and biases.
"""
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(0.0))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
| 5,337,508
|
def generate_patches(patch_cache_location,
axis,
image_input_channels,
brain_mask_channel,
classification_mask,
patch_size,
k_fold_count,
patients=None,
excluded_patients=None):
"""Generate new patch sets for testing and training for given input channels"""
if excluded_patients is not None:
excluded_patients = np.array(excluded_patients)
patient_nrs = None
if patients: # patient override
print('Patient override:\n')
print(patients)
patient_nrs = np.array(patients)
else: # loop over patient nrs in input channel dirs
for input_channel in image_input_channels:
# get all dirs in given input channel path
input_channel_path = Path(input_channel['path'])
dirs = [f for f in input_channel_path.iterdir() if f.is_dir()]
# get all patient ids listed in input channel
new_patients = []
for pat_dir in dirs:
pat_id = basename(normpath(pat_dir))
new_patients.append(pat_id)
# calculate intersect in arrays so final patient nrs list only contains patients
# which are in all of the given input channels
if patient_nrs is not None:
patient_nrs = np.intersect1d(patient_nrs, np.array(new_patients))
else:
patient_nrs = np.array(new_patients)
patient_nrs.sort()
patient_nrs = np.array(patient_nrs)
if excluded_patients is not None:
excluded_indices = np.isin(patient_nrs, excluded_patients)
patient_nrs = np.delete(patient_nrs, excluded_indices.nonzero(), 0)
patient_shuffle = np.arange(patient_nrs.shape[0])
np_random_shuffle(patient_shuffle)
patient_nrs = patient_nrs[patient_shuffle]
del patient_shuffle
json_image_channels = json.dumps(image_input_channels, sort_keys=True).encode('utf-8')
input_channel_hash = str(hashlib.md5(json_image_channels).hexdigest())
pat_size_hashed_cache_path = join(patch_cache_location, input_channel_hash)
if not isdir(pat_size_hashed_cache_path):
makedirs(pat_size_hashed_cache_path)
with open(join(patch_cache_location,
input_channel_hash,
'_image_channels.json'), 'w') as o_file:
json.dump(image_input_channels, o_file)
fold_data_sets = []
fold_size = patient_nrs.shape[0] / k_fold_count
start = 0
for fold in range(k_fold_count):
fold_patients = patient_nrs[start:start+math.ceil(fold_size)]
start += math.ceil(fold_size)
if fold < (k_fold_count - 1):
fold_size = (patient_nrs.shape[0] - start) / (k_fold_count - (fold + 1))
fold_patches, fold_labels = patients_patches(fold_patients,
pat_size_hashed_cache_path,
image_input_channels,
brain_mask_channel,
classification_mask,
patch_size,
axis)
perm0 = np.arange(fold_patches.shape[0])
np_random_shuffle(perm0)
fold_patches = fold_patches[perm0]
fold_labels = fold_labels[perm0]
fold_data_set = DataWrapper(fold_patches,
fold_labels,
reshape=False,
patients=fold_patients)
fold_data_sets.append(fold_data_set)
print('Fetched all patient data')
for fold in range(k_fold_count):
print('\nFold {} Patches'.format(fold))
print(fold_data_sets[fold].images.shape)
print(fold_data_sets[fold].labels.shape)
return fold_data_sets
| 5,337,509
|
def _parse_locals_to_data_packet(locals_dict):
"""
Takes the locals object (i.e. function inputs as a dict), maps keys from.
TODO retire this function, its pretty hacky
:param locals_dict:
:return: parsed locals object
"""
if 'self' in locals_dict:
locals_dict.pop('self')
if 'kwargs' in locals_dict:
kwargs = locals_dict.pop('kwargs')
locals_dict.update(kwargs)
return {(param_map[k] if k in param_map else k): v for k, v in locals_dict.items() if v is not None}
| 5,337,510
|
def query_user_joins(user_group: Union[User, Sequence[User], None]) \
-> List[JoinRecord]:
"""
:param user_group: User or user group as an iterable of users.
:return:
"""
# Input validation
user_list = [user_group] if isinstance(user_group, User) else user_group
# Query
query = session.query(JoinRecord)
if user_list:
# noinspection PyUnresolvedReferences
query = query.filter(JoinRecord.user_id.in_(u.user_id for u in user_list))
results = query.order_by(JoinRecord.timestamp).all()
logger.info("query_user_joins: "
"Found {:d} records for user group: {!r}".format(len(results), user_group))
return results
| 5,337,511
|
def plotexpwake(Re_D, quantity, z_H=0.0, save=False, savepath="",
savetype=".pdf", newfig=True, marker="--ok",
fill="none", figsize=(10, 5)):
"""Plots the transverse wake profile of some quantity. These can be
* meanu
* meanv
* meanw
* stdu
"""
U = Re_D/1e6
label = "Exp."
folder = exp_path + "/Wake/U_" + str(U) + "/Processed/"
z_H_arr = np.load(folder + "z_H.npy")
i = np.where(z_H_arr==z_H)
q = np.load(folder + quantity + ".npy")[i]
y_R = np.load(folder + "y_R.npy")[i]
if newfig:
plt.figure(figsize=figsize)
plt.plot(y_R, q/U, marker, markerfacecolor=fill, label=label)
plt.xlabel(r"$y/R$")
plt.ylabel(ylabels[quantity])
plt.grid(True)
| 5,337,512
|
def cd(b, d, n):
"""Try to cd to the given path. In case of an error go back to ../../Scripts
and try again (maybe the last run had an error or
the script did not reach the end)."""
# Check if already there
try:
if [b, d, n] == get_input():
# print('Already there:', os.getcwd())
return
else:
os.chdir("../../Scripts")
# print('Went back to Scripts')
except AttributeError as e:
pass
# The script should be in the Scripts directory
if exists(b, d, n):
os.chdir('../Output/B' + str(b) + ' D' + str(d) + ' N' + str(n))
# print('Succes: ', os.getcwd())
else:
print('The specified directory does not exist!\n',
'Now in: ' + os.getcwd())
| 5,337,513
|
def save_vocab(count=[], name='vocab.txt'):
"""Save the vocabulary to a file so the model can be reloaded.
Parameters
----------
count : a list of tuple and list
count[0] is a list : the number of rare words\n
count[1:] are tuples : the number of occurrence of each word\n
e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]
Examples
---------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = \
... tl.nlp.build_words_dataset(words, vocabulary_size, True)
>>> tl.nlp.save_vocab(count, name='vocab_text8.txt')
>>> vocab_text8.txt
... UNK 418391
... the 1061396
... of 593677
... and 416629
... one 411764
... in 372201
... a 325873
... to 316376
"""
pwd = os.getcwd()
vocabulary_size = len(count)
with open(os.path.join(pwd, name), "w") as f:
for i in xrange(vocabulary_size):
f.write("%s %d\n" % (tf.compat.as_text(count[i][0]), count[i][1]))
print("%d vocab saved to %s in %s" % (vocabulary_size, name, pwd))
| 5,337,514
|
def is_running(process):
"""Returns True if the requested process looks like it's still running"""
if not process[0]:
return False # The process doesn't exist
if process[1]:
return process[1].poll() == None
try:
# check if the process is active by sending a dummy signal
os.kill(process[0]['pid'], 0)
except ProcessLookupError:
return False
return True
| 5,337,515
|
def rec_test(test_type: str):
"""
Rec test decorator
"""
def decorator(f):
@wraps(f)
def w(*args, **kwargs):
return f(*args, **kwargs)
# add attributes to f
w.is_test = True
w.test_type = test_type
try:
w.test_desc = f.__doc__.lstrip().rstrip()
except:
w.test_desc = ""
try:
# python 3
w.name = w.__name__
except:
# python 2
w.name = w.__func__.func_name
return w
return decorator
| 5,337,516
|
def display_convw(w, s, r, c, fig, vmax=None, vmin=None, dataset='mnist', title='conv_filters'):
"""
w2 = np.zeros(w.shape)
d = w.shape[1]/3
print w.shape
for i in range(w.shape[0]):
for j in range(w.shape[1]/3):
w2[i, j] = w[i, 3*j]
w2[i, j + d] = w[i, 3*j+1]
w2[i, j + 2*d] = w[i, 3*j+2]
w = w2
"""
numhid = w.shape[0]
size_x = s
size_y = s # For now.
num_channels = w.shape[1] / (size_x*size_y)
assert num_channels == 3
assert w.shape[1] % size_x*size_y == 0
if isinstance(w, np.ndarray):
vh = w.reshape(size_x*numhid*num_channels, size_y)
else:
vh = w.asarray().reshape(size_x*numhid*num_channels, size_y)
pvh = np.zeros((size_x*r, size_y*c, num_channels))
for i in range(r):
for j in range(c):
for ch in range(num_channels):
pvh[i*size_x:(i+1)*size_x, j*size_y:(j+1)*size_y, ch] = \
vh[(num_channels*(i*c+j)+ch)*size_x:(num_channels*(i*c+j)+ch+1)*size_x,:]
# pvh /= np.std(pvh)
plt.figure(fig)
plt.clf()
plt.title(title)
plt.imshow(pvh, vmax=vmax, vmin=vmin)
scale = 1
xmax = size_x*c
ymax = size_y*r
color = 'k'
for x in range(0, c):
plt.axvline(x=x*size_x/scale, ymin=0,ymax=ymax/scale, color = color)
for y in range(0, r):
plt.axhline(y=y*size_y/scale, xmin=0,xmax=xmax/scale, color = color)
plt.draw()
return pvh
| 5,337,517
|
def get_optional_list(all_tasks=ALL_TASKS, grade=-1, *keys) -> list:
"""获取可选的任务列表
:param keys: 缩小范围的关键字,不定长,定位第一级有一个键,要定位到第二级就应该有两个键
:param all_tasks: dict,两级, 所有的任务
:param grade: 字典层级 第0层即为最外层,依次向内层嵌套,默认值-1层获取所有最内层的汇总列表
:return:
"""
optional_list = []
# 按照指定层级获取相应的可选任务列表
if grade == -1:
# 获取最内层所有的具体任务
for key_grade_1 in all_tasks.keys():
for key_grade_2 in all_tasks[key_grade_1].keys():
optional_list.extend(all_tasks[key_grade_1][key_grade_2])
elif grade == 0:
# 获取最外层的宽泛任务
optional_list.extend(all_tasks.keys())
elif grade == 1:
key_grade_1 = keys[0] # 需取第一层级的值,就必须提供第0层的key
optional_list.extend(all_tasks[key_grade_1].keys())
elif grade == 2:
key_grade_1, key_grade_2 = keys[0], keys[1] # 需取第二层级的值,就必须提供第0层和第1层的key
optional_list.extend(all_tasks[key_grade_1][key_grade_2])
else:
print("超出任务字典的层级范围了哦")
return optional_list
| 5,337,518
|
def process_genotypes(filepath, snp_maf, snp_list=None, **kwargs):
"""
Process genotype file.
:param filepath:
:param snp_maf:
:param snp_list: get specified snp if provided
:param bool genotype_label: True if first column is the label of specimen, default False
:param bool skip_none_rs: True if skip None genotype, default True
:param bool fill_none: True if auto fill None genotype with most frequent genotype by MAF, default True
:return:
"""
conf = dict({
'genotype_label': False,
'skip_none_rs': True
}, **kwargs)
with open(filepath, encoding='utf-8') as fh:
if conf['genotype_label']:
df = genotype_with_label(fh, snp_maf=snp_maf, snp_list=snp_list, **conf)
else:
df = genotype_without_label(fh, snp_maf=snp_maf, snp_list=snp_list, **conf)
return df
| 5,337,519
|
def stop_trigger():
"""
Stops the Glue trigger so that the trigger does not run anymore.
"""
glue.stop_trigger(Name=GLUE_TRIGGER)
| 5,337,520
|
def check_output(*cmd):
"""Log and run the command, raising on errors, return output"""
print >>sys.stderr, 'Run:', cmd
return subprocess.check_output(cmd)
| 5,337,521
|
def table_exists(conn, table_name, schema=False):
"""Checks if a table exists.
Parameters
----------
conn
A Psycopg2 connection.
table_name : str
The table name.
schema : str
The schema to which the table belongs.
"""
cur = conn.cursor()
table_exists_sql = ('select * from information_schema.tables '
f'where table_name={table_name!r}')
if schema:
table_exists_sql += f' and table_schema={schema!r}'
cur.execute(table_exists_sql)
return bool(cur.rowcount)
| 5,337,522
|
def _dict_from_dir(previous_run_path):
"""
build dictionary that maps training set durations to a list of
training subset csv paths, ordered by replicate number
factored out as helper function so we can test this works correctly
Parameters
----------
previous_run_path : str, Path
path to directory containing dataset .csv files
that represent subsets of training set, created by
a previous run of ``vak.core.learncurve.learning_curve``.
Typically directory will have a name like ``results_{timestamp}``
and the actual .csv splits will be in sub-directories with names
corresponding to the training set duration
Returns
-------
train_dur_csv_paths : dict
where keys are duration in seconds of subsets taken from training data,
and corresponding values are lists of paths to .csv files containing
those subsets
"""
train_dur_csv_paths = {}
train_dur_dirs = previous_run_path.glob("train_dur_*s")
for train_dur_dir in train_dur_dirs:
train_dur = re.findall(TRAIN_DUR_PAT, train_dur_dir.name)
if len(train_dur) != 1:
raise ValueError(
f"did not find just a single training subset duration in filename:\n"
f"{train_subset_path}\n"
f"Instead found: {train_dur}"
)
train_dur = int(train_dur[0])
# sort by increasing replicate number -- numerically, not alphabetically
replicate_dirs = sorted(
train_dur_dir.glob("replicate_*"),
key=lambda dir_path: int(dir_path.name.split("_")[-1]),
)
train_subset_paths = []
for replicate_dir in replicate_dirs:
train_subset_path = sorted(replicate_dir.glob("*prep*csv"))
if len(train_subset_path) != 1:
raise ValueError(
f"did not find just a single training subset .csv in replicate directory:\n"
f"{replicate_dir}\n"
f"Instead found: {train_subset_path}"
)
train_subset_path = train_subset_path[0]
train_subset_paths.append(train_subset_path)
train_dur_csv_paths[train_dur] = train_subset_paths
return train_dur_csv_paths
| 5,337,523
|
def aggregate_pixel(arr,x_step,y_step):
"""Aggregation code for a single pixel"""
# Set x/y to zero to mimic the setting in a loop
# Assumes x_step and y_step in an array-type of length 2
x = 0
y = 0
# initialize sum variable
s = 0.0
# sum center pixels
left = int(ceil(x_step[x]))
right = int(floor(x_step[x+1]))
top = int(ceil(y_step[y]))
bottom = int(floor(y_step[y+1]))
s += arr[left:right,top:bottom].sum()
# Find edge weights
wl = left - x_step[x]
wr = x_step[x+1] - right
wt = top - y_step[y]
wb = y_step[y+1] - bottom
# sum edges - left
s += arr[left-1:left,top:bottom].sum() * wl
# sum edges - right
s += arr[right:right+1,top:bottom].sum() * wr
# sum edges - top
s += arr[left:right,top-1:top].sum() * wt
# sum edges - bottom
s += arr[left:right,bottom:bottom+1].sum() * wb
# sum corners ...
# ul
s += arr[left-1:left,top-1:top].sum() * wl * wt
# ur
s += arr[right:right+1,top-1:top].sum() * wr * wt
# ll
s += arr[left-1:left,bottom:bottom+1].sum() * wl * wb
# lr
s += arr[right:right+1,bottom:bottom+1].sum() * wr * wb
# calculate weight
weight = (x_step[x+1]-x_step[x])*(y_step[y+1]-y_step[y])
return s/float(weight)
| 5,337,524
|
def plot_offer_utilization(df_offer):
"""
Make a plot for distribution of offer utilization.
Parameters
----------
df_offer: pandas.DataFrame
The data set of offer.
Returns
-------
None
"""
offer_use = df_offer.groupby(['person', 'is_offer_used']).count()['offer_id'].unstack().reset_index().fillna(0)
offer_use.columns = ['person', 'not_used', 'used']
offer_use['utilization'] = offer_use['used'] / (offer_use['not_used'] + offer_use['used'])
offer_use['utilization'].hist(bins=60)
plt.xlabel('Utilization')
plt.ylabel('Count')
plt.title('Offer Utilization Distribution')
plt.show()
| 5,337,525
|
def SetTexNodeColorSpace(texNode):
""" set Base Color to sRGB and all others to Non-Color """
try:
if texNode.label == 'Base Color':
texNode.image.colorspace_settings.name = 'sRGB'
else:
texNode.image.colorspace_settings.name = 'Non-Color'
except Exception:
print('Non-Standard Color Space Detected -- Please manually select')
| 5,337,526
|
def simplify_datatype(config):
""" Converts ndarray to list, useful for saving config as a yaml file """
for k, v in config.items():
if isinstance(v, dict):
config[k] = simplify_datatype(v)
elif isinstance(v, tuple):
config[k] = list(v)
elif isinstance(v, np.ndarray):
config[k] = v.tolist()
else:
config[k] = v
return config
| 5,337,527
|
def _strict_random_crop_image(image,
boxes,
labels,
is_crowd,
difficult,
masks=None,
sem_seg=None,
min_object_covered=1.0,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.1, 1.0),
overlap_thresh=0.3):
"""Performs random crop.
Note: boxes will be clipped to the crop. Keypoint coordinates that are
outside the crop will be set to NaN, which is consistent with the original
keypoint encoding for non-existing keypoints. This function always crops
the image and is supposed to be used by `random_crop_image` function which
sometimes returns image unchanged.
Args:
image: rank 3 float32 tensor containing 1 image -> [height, width, channels]
with pixel values varying between [0, 1].
boxes: rank 2 float32 tensor containing the bounding boxes with shape
[num_instances, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
labels: rank 1 int32 tensor containing the object classes.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
min_object_covered: the cropped image must cover at least this fraction of
at least one of the input bounding boxes.
aspect_ratio_range: allowed range for aspect ratio of cropped image.
area_range: allowed range for area ratio between cropped image and the
original image.
overlap_thresh: minimum overlap thresh with new cropped
image to keep the box.
Returns:
image: image which is the same rank as input image.
boxes: boxes which is the same rank as input boxes.
Boxes are in normalized form.
labels: new labels.
If masks is not None, the function also returns:
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
"""
with tf.name_scope('RandomCropImage', values=[image, boxes]):
image_shape = tf.shape(image)
# boxes are [N, 4]. Lets first make them [1, N, 4].
boxes_expanded = tf.expand_dims(
tf.clip_by_value(
boxes, clip_value_min=0.0, clip_value_max=1.0), 0)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
image_shape,
bounding_boxes=boxes_expanded,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=100,
use_image_if_no_bounding_boxes=True)
im_box_begin, im_box_size, im_box = sample_distorted_bounding_box
new_image = tf.slice(image, im_box_begin, im_box_size)
new_image.set_shape([None, None, image.get_shape()[2]])
# [1, 4]
im_box_rank2 = tf.squeeze(im_box, squeeze_dims=[0])
# [4]
im_box_rank1 = tf.squeeze(im_box)
boxlist = box_list.BoxList(boxes)
boxlist.add_field('labels', labels)
boxlist.add_field('is_crowd', is_crowd)
boxlist.add_field('difficult', difficult)
if masks is not None:
boxlist.add_field('masks', masks)
im_boxlist = box_list.BoxList(im_box_rank2)
# remove boxes that are outside cropped image
boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window(
boxlist, im_box_rank1)
# remove boxes that are outside image
overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(
boxlist, im_boxlist, overlap_thresh)
# change the coordinate of the remaining boxes
new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,
im_box_rank1)
new_boxes = new_boxlist.boxes
new_boxes = tf.clip_by_value(
new_boxes, clip_value_min=0.0, clip_value_max=1.0)
new_boxes.set_shape([None, 4])
result = [
new_image,
new_boxes,
overlapping_boxlist.get_field('labels'),
overlapping_boxlist.get_field('is_crowd'),
overlapping_boxlist.get_field('difficult'),
]
if masks is not None:
masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids)
masks_of_boxes_completely_inside_window = tf.gather(
masks_of_boxes_inside_window, keep_ids)
masks_box_begin = [0, im_box_begin[0], im_box_begin[1]]
masks_box_size = [-1, im_box_size[0], im_box_size[1]]
new_masks = tf.slice(
masks_of_boxes_completely_inside_window,
masks_box_begin, masks_box_size)
result.append(new_masks)
if sem_seg is not None:
sem_seg = tf.expand_dims(sem_seg, axis=-1)
new_sem_seg = tf.slice(sem_seg, im_box_begin, im_box_size)
new_sem_seg = tf.squeeze(new_sem_seg, axis=-1)
new_sem_seg.set_shape([None, None])
result.append(new_sem_seg)
return tuple(result)
| 5,337,528
|
def aggregate_by_player_id(statistics, playerid, fields):
"""
Inputs:
statistics - List of batting statistics dictionaries
playerid - Player ID field name
fields - List of fields to aggregate
Output:
Returns a nested dictionary whose keys are player IDs and whose values
are dictionaries of aggregated stats. Only the fields from the fields
input will be aggregated in the aggregated stats dictionaries.
"""
players = {}
# create nested dict with outer keys of player ids and inner dict of fields
for dic in statistics:
if dic[playerid] not in players:
players[dic[playerid]] = {playerid: dic[playerid]}
for field in fields:
players[dic[playerid]][field] = 0
# loop through statistics again, incrementing field values
for dic in statistics:
for field in fields:
players[dic[playerid]][field] += int(dic[field])
return players
| 5,337,529
|
def temp_volttron_home(request):
"""
Create a VOLTTRON_HOME and includes it in the test environment.
Creates a volttron home, config, and platform_config.yml file
for testing purposes.
"""
dirpath = tempfile.mkdtemp()
os.environ['VOLTTRON_HOME'] = dirpath
with open(os.path.join(dirpath, "platform_config.yml"), 'w') as fp:
fp.write(PLATFORM_CONFIG)
with open(os.path.join(dirpath, "config"), "w") as fp:
fp.write("[volttron]\n")
fp.write("instance-name = {}\n".format(INSTANCE_NAME))
yield dirpath
shutil.rmtree(dirpath, ignore_errors=True)
assert not os.path.exists(dirpath)
| 5,337,530
|
async def gtfo(ctx):
"""Makes Botboy leave (go offline)"""
conn.close()
await ctx.send("Bye!")
await bot.logout()
quit()
| 5,337,531
|
def _visualize(ax, data, labels, centers):
"""
将模型结果可视化
"""
colors = ["#82CCFC", "k", "#0C5FFA"]
ax.scatter(data[:, 0], data[:, 1], c=[colors[i] for i in labels], marker="o", alpha=0.8)
ax.scatter(centers[:, 0], centers[:, 1], marker="*", c=colors, edgecolors="white",
s=700., linewidths=2)
y_len = data[:, 1].max() - data[:, 1].min()
x_len = data[:, 0].max() - data[:, 0].min()
lens = max(y_len + 1, x_len + 1) / 2.
ax.set_xlim(data[:, 0].mean() - lens, data[:, 0].mean() + lens)
ax.set_ylim(data[:, 1].mean() - lens, data[:, 1].mean() + lens)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
| 5,337,532
|
def test_21_upgrade_baseline_current(capsys):
"""Verify baseline-current and baseline-info and get_version()"""
try:
os.unlink(TEST_DB_FILE)
except:
pass
config = pydbvolve.initialize(TEST_CONFIG_FILE, 'upgrade', 'r1.1.0', True, False)
assert (config is not None)
rc = pydbvolve.run_migration(TEST_CONFIG_FILE, 'upgrade', pydbvolve.LATEST_VERSION, True, False)
assert (rc == 0)
rc = pydbvolve.run_migration(TEST_CONFIG_FILE, 'baseline', pydbvolve.CURRENT_VERSION, True, False)
assert (rc == 0)
curr = pydbvolve.get_current(config)
assert curr is not None
base = pydbvolve.get_baseline(config)
assert base is not None
assert curr['version'] == base['version']
os.unlink(TEST_DB_FILE)
| 5,337,533
|
def loadStatesFromFile(filename):
"""Loads a list of states from a file."""
try:
with open(filename, 'rb') as inputfile:
result = pickle.load(inputfile)
except:
result = []
return result
| 5,337,534
|
def get_configuration_item(configuration_file, item, default_values):
"""Return configuration value on file for item or builtin default.
configuration_file Name of configuration file.
item Item in configuation file whose value is required.
default_values dict of default values for items.
Return "" if configuration file cannot be opened or read, after showing
a dialogue to tell the user.
Return "" if the item exists but has no value.
Return default value if the item does not exist and a default value exists.
Return "" if the item does not exist and a default value does not exist.
Return the item value if there is one.
Items occupy a single line formatted as (?P<item>[^/s]*)/s*(?P<value>.*)
"""
try:
of = open(configuration_file)
try:
config_text = of.read()
except Exception as exc:
tkinter.messagebox.showinfo(
parent=parent,
message="".join(
(
"Unable to read from\n\n",
configuration_file,
"\n\n",
str(exc),
'\n\n"" will be returned as value of ',
item,
)
),
title="Read File",
)
return ""
finally:
of.close()
except Exception as exc:
tkinter.messagebox.showinfo(
parent=parent,
message="".join(
(
"Unable to open\n\n",
configuration_file,
"\n\n",
str(exc),
'\n\n"" will be returned as value of ',
item,
)
),
title="Open File",
)
return ""
key = None
for i in config_text.splitlines():
i = i.split(maxsplit=1)
if not i:
continue
if i[0].startswith("#"):
continue
if i[0] != item:
continue
key = item
if len(i) == 1:
value = ""
else:
value = i[1].strip()
if key is None:
for k, v in default_values:
if k == item:
key = item
value = v
if key is None:
value = ""
return value
| 5,337,535
|
def tflite_stream_state_external_model_accuracy(
flags,
folder,
tflite_model_name='stream_state_external.tflite',
accuracy_name='tflite_stream_state_external_model_accuracy.txt',
reset_state=False):
"""Compute accuracy of streamable model with external state using TFLite.
Args:
flags: model and data settings
folder: folder name where model is located
tflite_model_name: file name with tflite model
accuracy_name: file name for storing accuracy in path + accuracy_name
reset_state: reset state between testing sequences.
If True - then it is non streaming testing environment: state will be
reseted in the beginning of every test sequence and will not be
transferred to another one (as it is done in real streaming).
Returns:
accuracy
"""
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
path = os.path.join(flags.train_dir, folder)
logging.info('tflite stream model state external with reset_state %d',
reset_state)
audio_processor = input_data.AudioProcessor(flags)
set_size = audio_processor.set_size('testing')
interpreter = tf.lite.Interpreter(
model_path=os.path.join(path, tflite_model_name))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
inputs = []
for s in range(len(input_details)):
inputs.append(np.zeros(input_details[s]['shape'], dtype=np.float32))
total_accuracy = 0.0
count = 0.0
inference_batch_size = 1
for i in range(0, set_size, inference_batch_size):
test_fingerprints, test_ground_truth = audio_processor.get_data(
inference_batch_size, i, flags, 0.0, 0.0, 0, 'testing', 0.0, 0.0, sess)
# before processing new test sequence we can reset model state
# if we reset model state then it is not real streaming mode
if reset_state:
for s in range(len(input_details)):
inputs[s] = np.zeros(input_details[s]['shape'], dtype=np.float32)
if flags.preprocess == 'raw':
out_tflite = inference.run_stream_inference_classification_tflite(
flags, interpreter, test_fingerprints, inputs)
out_tflite_argmax = np.argmax(out_tflite)
else:
for t in range(test_fingerprints.shape[1]):
# get new frame from stream of data
stream_update = test_fingerprints[:, t, :]
stream_update = np.expand_dims(stream_update, axis=1)
# [batch, time=1, feature]
stream_update = stream_update.astype(np.float32)
# set input audio data (by default input data at index 0)
interpreter.set_tensor(input_details[0]['index'], stream_update)
# set input states (index 1...)
for s in range(1, len(input_details)):
interpreter.set_tensor(input_details[s]['index'], inputs[s])
# run inference
interpreter.invoke()
# get output: classification
out_tflite = interpreter.get_tensor(output_details[0]['index'])
# get output states and set it back to input states
# which will be fed in the next inference cycle
for s in range(1, len(input_details)):
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
inputs[s] = interpreter.get_tensor(output_details[s]['index'])
out_tflite_argmax = np.argmax(out_tflite)
total_accuracy = total_accuracy + (
test_ground_truth[0] == out_tflite_argmax)
count = count + 1
if i % 200 == 0 and i:
logging.info(
'tflite test accuracy, stream model state external = %f %d out of %d',
*(total_accuracy * 100 / count, i, set_size))
total_accuracy = total_accuracy / count
logging.info(
'tflite Final test accuracy, stream model state external = %.2f%% (N=%d)',
*(total_accuracy * 100, set_size))
with open(os.path.join(path, accuracy_name), 'wt') as fd:
fd.write('%f on set_size %d' % (total_accuracy * 100, set_size))
return total_accuracy * 100
| 5,337,536
|
def rm_magic(kernel, args):
"""Remove files on microcontroller
If path is a directory and the option -f is not specified, the command is sliently ignored.
Examples:
%rm a # delete file a if it exists, no action if it's a directory, error otherwise
%rm -f a # delete file or directory a
%rm -rf a # delete a, if it's a directory, also delete contents
%rm -r a # delete a, if it's a directory, also delete all files but not directories recursively
%rm -rf / # wipe everything, really!
"""
try:
with kernel.device as repl:
for p in args.path:
repl.rm_rf(p, r=args.recursive, f=args.force)
except RemoteError as e:
kernel.stop(f"{str(e).splitlines()[-1]}")
| 5,337,537
|
def sexa2deg(ra, dec):
"""Convert sexagesimal to degree; taken from ryan's code"""
ra = coordinates.Angle(ra, units.hour).degree
dec = coordinates.Angle(dec, units.degree).degree
return ra, dec
| 5,337,538
|
def verify_grad(fun, pt, n_tests=2, rng=None, eps=None,
out_type=None, abs_tol=None,
rel_tol=None, mode=None, cast_to_output_type=False):
"""Test a gradient by Finite Difference Method. Raise error on failure.
Example:
>>> verify_grad(theano.tensor.tanh,
(numpy.asarray([[2,3,4], [-1, 3.3, 9.9]]),),
rng=numpy.random)
Raises an Exception if the difference between the analytic gradient and
numerical gradient (computed through the Finite Difference Method) of a
random projection of the fun's output to a scalar exceeds the given
tolerance.
:param fun: a Python function that takes Theano variables as inputs,
and returns a Theano variable. For instance, an Op instance with
a single output.
:param pt: the list of numpy.ndarrays to use as input values.
These arrays must be either float32 or float64 arrays.
:param n_tests: number of times to run the test
:param rng: random number generator used to sample u, we test gradient
of sum(u * fun) at pt
:param eps: stepsize used in the Finite Difference Method (Default
None is type-dependent)
Raising the value of eps can raise or lower the absolute and
relative errors of the verification depending on the
Op. Raising eps does not lower the verification quality. It
is better to raise eps than raising abs_tol or rel_tol.
:param out_type: dtype of output, if complex (i.e. 'complex32' or
'complex64')
:param abs_tol: absolute tolerance used as threshold for gradient
comparison
:param rel_tol: relative tolerance used as threshold for gradient
comparison
:note: WARNING to unit-test writers: if `op` is a function that builds
a graph, try to make it a SMALL graph. Often verify grad is run
in debug mode, which can be very slow if it has to verify a lot of
intermediate computations.
:note: This function does not support multiple outputs. In
tests/test_scan.py there is an experimental verify_grad that
covers that case as well by using random projections.
"""
# The import is here to prevent circular import.
from theano import compile, shared
import theano.tensor
from theano.tensor import as_tensor_variable, TensorType
assert isinstance(pt, (list, tuple))
pt = [numpy.array(p) for p in pt]
for i, p in enumerate(pt):
if p.dtype not in ('float32', 'float64'):
raise TypeError(('verify_grad can work only with floating point '
'inputs, but input %i has dtype "%s".') % (i, p.dtype))
_type_tol = dict( # relative error tolerances for different types
float32=1e-2,
float64=1e-4)
if abs_tol is None:
abs_tol = __builtin__.max(_type_tol[str(p.dtype)] for p in pt)
if rel_tol is None:
rel_tol = __builtin__.max(_type_tol[str(p.dtype)] for p in pt)
if rng is None:
raise TypeError(('rng should be a valid instance of '
'numpy.random.RandomState. You may '
'want to use theano.tests.unittest'
'_tools.verify_grad instead of '
'theano.gradient.verify_grad.'))
# We allow input downcast in function, because numeric_grad works in the
# most precise dtype used among the inputs, so we may need to cast some.
def function(inputs, output):
if mode is None:
f = compile.function(inputs, output, accept_inplace=True,
allow_input_downcast=True,
on_unused_input='ignore')
else:
f = compile.function(inputs, output, accept_inplace=True,
allow_input_downcast=True, mode=mode,
on_unused_input='ignore')
return f
tensor_pt = [TensorType(
as_tensor_variable(p).dtype,
as_tensor_variable(p).broadcastable)(name='input %i' % i)
for i, p in enumerate(pt)]
# fun can be either a function or an actual Op instance
o_output = fun(*tensor_pt)
if isinstance(o_output, list):
raise NotImplementedError(('cant (yet) autotest gradient of fun '
'with multiple outputs'))
# we could make loop over outputs making random projections R for each,
# but this doesn't handle the case where not all the outputs are
# differentiable... so I leave this as TODO for now -JB.
o_fn = function(tensor_pt, o_output)
o_fn_out = o_fn(*[p.copy() for p in pt])
if isinstance(o_fn_out, tuple) or isinstance(o_fn_out, list):
raise TypeError('It seems like you are trying to use verify_grad '
'on an op or a function which outputs a list: there should'
' be a single (array-like) output instead')
# random_projection should not have elements too small,
# otherwise too much precision is lost in numerical gradient
def random_projection():
plain = rng.rand(*o_fn_out.shape) + 0.5
if cast_to_output_type:
return numpy.array(plain, o_output.dtype)
return plain
t_r = shared(random_projection())
t_r.name = 'random_projection'
# random projection of o onto t_r
# This sum() is defined above, it's not the builtin sum.
cost = theano.tensor.sum(t_r * o_output)
cost_fn = function(tensor_pt, cost)
symbolic_grad = grad(cost, tensor_pt,
disconnected_inputs='ignore')
grad_fn = function(tensor_pt, symbolic_grad)
for test_num in xrange(n_tests):
try:
num_grad = numeric_grad(cost_fn, [p.copy() for p in pt],
eps, out_type)
analytic_grad = grad_fn(*[p.copy() for p in pt])
# Since `tensor_pt` is a list, `analytic_grad` should be one too.
assert isinstance(analytic_grad, list)
max_arg, max_err_pos, max_abs_err, max_rel_err = num_grad.max_err(
analytic_grad, abs_tol, rel_tol)
if max_abs_err > abs_tol and max_rel_err > rel_tol:
raise verify_grad.E_grad(max_arg, max_err_pos,
max_abs_err, max_rel_err,
abs_tol, rel_tol)
# get new random projection for next test
if test_num < n_tests - 1:
t_r.set_value(random_projection(), borrow=True)
except Exception, e:
e.args += ("\nThe error happened with the following inputs:", pt,
"\nThe value of eps is:", eps,
"\nThe out_type is:", out_type)
raise
| 5,337,539
|
def get_filenames():
""" get file names given path """
files = []
for file in os.listdir(cwd):
if file.endswith(".vcf"):
fullPath = cwd + file
files.append(fullPath)
return files
| 5,337,540
|
def is_mismatch_before_n_flank_of_read(md, n):
"""
Returns True if there is a mismatch before the first n nucleotides
of a read, or if there is a mismatch before the last n nucleotides
of a read.
:param md: string
:param n: int
:return is_mismatch: boolean
"""
is_mismatch = False
flank_mm_regex = r"^(\d+).*[ACGT](\d+)$"
flank_mm = re.findall(flank_mm_regex,md)
if flank_mm:
flank_mm = flank_mm[0]
if flank_mm[1]:
if int(flank_mm[1]) < n:
is_mismatch = True
if flank_mm[0]:
if int(flank_mm[0]) < n:
is_mismatch = True
return is_mismatch
| 5,337,541
|
def get_county() -> Dict:
"""Main method for populating county data"""
api = SocrataApi('https://data.marincounty.org/')
notes = ('This data only accounts for Marin residents and does not '
'include inmates at San Quentin State Prison. '
'The tests timeseries only includes the number of tests '
'performed and not how many were positive or negative. '
'Demographic breakdowns for testing are not available.')
return {
'name': 'Marin',
'update_time': get_latest_update(api).isoformat(),
# The county's data dashboard is at:
# https://coronavirus.marinhhs.org/surveillance
# Which links to the data portal category with the data sets we
# actually use at:
# https://data.marincounty.org/browse?q=covid
'source_url': 'https://coronavirus.marinhhs.org/surveillance',
'meta_from_source': '',
'meta_from_baypd': notes,
'series': {
'cases': get_timeseries_cases(api),
'deaths': get_timeseries_deaths(api),
'tests': get_timeseries_tests(api),
},
'case_totals': get_case_totals(api),
'death_totals': get_death_totals(api),
# Marin does not currently provide demographic breakdowns for
# testing, so no test totals right now.
}
| 5,337,542
|
def optimize_player_strategy(
player_cards: List[int], opponent_cards: List[int], payoff_matrix: Matrix
) -> Strategy:
"""
Get the optimal strategy for the player, by solving
a simple linear program based on payoff matrix.
"""
lp = mip.Model("player_strategy", solver_name=mip.CBC)
lp.verbose = False # the problems are simple and we don't need to see the output
x = [lp.add_var(f"x_{card}", var_type=mip.CONTINUOUS) for card in player_cards]
v = lp.add_var("v", var_type=mip.CONTINUOUS, lb=-mip.INF)
for opponent_card in opponent_cards:
transposed_row = [
payoff_matrix[(player_card, opponent_card)] for player_card in player_cards
]
constraint = (
mip.xsum(transposed_row[i] * x_i for i, x_i in enumerate(x)) - v >= 0
)
lp += constraint, f"strategy_against_{opponent_card}"
logging.debug(f"constraint={constraint}")
lp += mip.xsum(x) == 1, "probability_distribution"
lp.objective = mip.maximize(v)
# all variables are continuous so we only need to solve relaxed problem
lp.optimize(max_seconds=30, relax=True)
if lp.status is not mip.OptimizationStatus.OPTIMAL:
logging.error(f"lp.status={lp.status}")
raise RuntimeError(
f"Solver couldn't optimize the problem and returned status {lp.status}"
)
strategy = Strategy(
card_probabilities={
card: lp.var_by_name(f"x_{card}").x for card in player_cards
},
expected_value=lp.var_by_name("v").x,
)
logging.debug(f"strategy.expected_value={strategy.expected_value}")
logging.debug("\n")
return strategy
| 5,337,543
|
def _write_dihedral_information(gsd_snapshot, structure):
"""Write the dihedrals in the system.
Parameters
----------
gsd_snapshot :
The file object of the GSD file being written
structure : parmed.Structure
Parmed structure object holding system information
Warnings
--------
Not yet implemented for gmso.core.topology objects
"""
# gsd_snapshot.dihedrals.N = len(structure.rb_torsions)
# unique_dihedral_types = set()
# for dihedral in structure.rb_torsions:
# t1, t2 = dihedral.atom1.type, dihedral.atom2.type
# t3, t4 = dihedral.atom3.type, dihedral.atom4.type
# if [t2, t3] == sorted([t2, t3], key=natural_sort):
# dihedral_type = ('-'.join((t1, t2, t3, t4)))
# else:
# dihedral_type = ('-'.join((t4, t3, t2, t1)))
# unique_dihedral_types.add(dihedral_type)
# unique_dihedral_types = sorted(list(unique_dihedral_types), key=natural_sort)
# gsd_snapshot.dihedrals.types = unique_dihedral_types
# dihedral_typeids = []
# dihedral_groups = []
# for dihedral in structure.rb_torsions:
# t1, t2 = dihedral.atom1.type, dihedral.atom2.type
# t3, t4 = dihedral.atom3.type, dihedral.atom4.type
# if [t2, t3] == sorted([t2, t3], key=natural_sort):
# dihedral_type = ('-'.join((t1, t2, t3, t4)))
# else:
# dihedral_type = ('-'.join((t4, t3, t2, t1)))
# dihedral_typeids.append(unique_dihedral_types.index(dihedral_type))
# dihedral_groups.append((dihedral.atom1.idx, dihedral.atom2.idx,
# dihedral.atom3.idx, dihedral.atom4.idx))
# gsd_snapshot.dihedrals.typeid = dihedral_typeids
# gsd_snapshot.dihedrals.group = dihedral_groups
pass
| 5,337,544
|
def saveFIG(filename='tmp.pdf',
axis=False,
transparent=True):
"""save fig for publication
Args:
filename (str, optional): filename to save figure. (Default value = 'tmp.pdf')
axis (bool, optional): if True then show axis. (Default value = False)
transparent (bool, optional): if True background is transparent. (Default value = True)
Returns:
"""
import pylab as plt
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0,
hspace = 0, wspace = 0)
plt.margins(0,0)
if not axis:
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig(filename,dpi=300, bbox_inches = 'tight',
pad_inches =.1,transparent=transparent)
return
| 5,337,545
|
def address_book(request):
"""
This Endpoint is for getting contact
details of all people at a time.
We will paginate this for 10 items at a time.
"""
try:
paginator = PageNumberPagination()
paginator.page_size = 10
persons = Person.objects.all()
paginated_persons = paginator.paginate_queryset(persons, request)
serializer = PersonDetailSerializer(paginated_persons, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
except:
print(traceback.format_exc())
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
| 5,337,546
|
def decrement_items (inventory, items):
"""
:param inventory: dict - inventory dictionary.
:param items: list - list of items to decrement from the inventory.
:return: dict - updated inventory dictionary with items decremented.
"""
return add_or_decrement_items (inventory, items, 'minus')
| 5,337,547
|
def global_ave_pool(x):
"""Global Average pooling of convolutional layers over the spatioal dimensions.
Results in 2D tensor with dimension: (batch_size, number of channels) """
return th.mean(x, dim=[2, 3])
| 5,337,548
|
def train_models(models, train_data, target, logger, dask_client=None, randomized_search=False, scoring_metric=None):
"""Trains a set of models on the given training data/labels
:param models: a dictionary of models which need to be trained
:param train_data: a dataframe containing all possible features ( actual features used are specific to model) of train data
:param target: the column which contains the actual labels for training data
:param logger: logger object passed from the calling module
:param dask_client: ask client to use for training, esting, defaults to None
:param randomized_search: flag specifying to tune params by randomized search
:param scoring_metric: scoring metric to be used for randomized hyper param search
:return: a dictionary of models after fitting training params on the data
"""
for modelkey, model_metadata in models.items():
logger.info("Training started for " + modelkey)
#model_metadata["MRR"] = 0
# resolve feature data/params
features = model_metadata["features"]
X_train = train_data[features]
y_train = train_data[target]
model_params = model_metadata["param_grid"]
model_type = model_metadata["type"]
if model_type == "classical":
model_pipeline = get_prediction_pipeline(model_metadata)
if dask_client:
with joblib.parallel_backend("dask"):
model_pipeline.fit(X_train, y_train)
else:
if randomized_search and modelkey not in ['LinReg']:
# randomized_search if time permits fix bugs
search = RandomizedSearchCV(estimator = model_pipeline,
param_distributions = model_params,
n_iter = 50,
cv = 5,
verbose=10,
random_state=35,
n_jobs = -1,
scoring = scoring_metric
)
try:
search.fit(X_train, y_train)
best_params = str(search.best_params_)
model_pipeline.set_params(**literal_eval(best_params))
model_pipeline.fit(X_train, y_train)
except Exception as e:
logger.info(" Exception {} while param search for {} switching to default fit".format(e, modelkey))
model_pipeline.fit(X_train, y_train)
else:
model_pipeline.fit(X_train, y_train)
if model_type == "DL":
model_fitted_params = fit_dl_model(
X_train,
y_train,
param_dict=model_params,
logger=logger,
scoring_metric = scoring_metric
)
for key in model_fitted_params:
models[modelkey][key] = model_fitted_params[key]
logger.info("Training ended for " + modelkey)
return models
| 5,337,549
|
def get_output(interpreter, top_k=1, score_threshold=0.0):
"""Returns no more than top_k classes with score >= score_threshold."""
scores = output_tensor(interpreter)
classes = [
Class(i, scores[i])
for i in np.argpartition(scores, -top_k)[-top_k:]
if scores[i] >= score_threshold
]
return sorted(classes, key=operator.itemgetter(1), reverse=True)
| 5,337,550
|
def bag_of_words_features(data, binary=False):
"""Return features using bag of words"""
vectorizer = CountVectorizer(
ngram_range=(1, 3), min_df=3, stop_words="english", binary=binary
)
return vectorizer.fit_transform(data["joined_lemmas"])
| 5,337,551
|
def gen_batch_iter(random_instances, batch_s=BATCH_SIZE):
""" a batch 2 numpy data.
"""
num_instances = len(random_instances)
offset = 0
while offset < num_instances:
batch = random_instances[offset: min(num_instances, offset + batch_s)]
num_batch = len(batch)
lengths = np.zeros(num_batch, dtype=np.int)
for i, (_, word_ids, _, _, _, _) in enumerate(batch):
lengths[i] = len(word_ids)
max_seq_len = lengths.max()
# if max_seq_len >= MAX_SEQ_LEN:
# offset = offset + batch_s
# continue
words_all, word_inputs, word_elmo_embeds, pos_inputs, graph_inputs, masks \
= data_ids_prep(num_batch, max_seq_len, batch)
offset = offset + batch_s
# numpy2torch
word_inputs = torch.from_numpy(word_inputs).long()
word_elmo_embeds = torch.from_numpy(word_elmo_embeds).float()
pos_inputs = torch.from_numpy(pos_inputs).long()
graph_inputs = torch.from_numpy(graph_inputs).byte()
masks = torch.from_numpy(masks).byte()
if USE_GPU:
word_inputs = word_inputs.cuda(CUDA_ID)
word_elmo_embeds = word_elmo_embeds.cuda(CUDA_ID)
pos_inputs = pos_inputs.cuda(CUDA_ID)
graph_inputs = graph_inputs.cuda(CUDA_ID)
masks = masks.cuda(CUDA_ID)
yield words_all, word_inputs, word_elmo_embeds, pos_inputs, graph_inputs, masks
| 5,337,552
|
def duration(func):
"""
计时装饰器
"""
def wrapper(*args, **kwargs):
print('2')
start = time.time()
f = func(*args, **kwargs)
print(str("扫描完成, 用时 ") + str(int(time.time()-start)) + "秒!")
return f
return wrapper
| 5,337,553
|
def enumerate_assignments(max_context_number):
"""
enumerate all possible assignments of contexts to clusters for a fixed
number of contexts. Has the hard assumption that the first context belongs
to cluster #1, to remove redundant assignments that differ in labeling.
:param max_context_number: int
:return: list of lists, each a function that takes in a context id
number and returns a cluster id number
"""
cluster_assignments = [{}] # context 0 is always in cluster 1
for contextNumber in range(0, max_context_number):
cluster_assignments = augment_assignments(cluster_assignments, contextNumber)
return cluster_assignments
| 5,337,554
|
def main():
""" Execute package updater
"""
try:
updater = PackageUpdater()
updater.run(sys.argv[1:])
except Exception as error:
print(termcolor.colored("ERROR: {}".format(error), 'red'))
sys.exit(1)
| 5,337,555
|
def KL_monte_carlo(z, mean, sigma=None, log_sigma=None):
"""Computes the KL divergence at a point, given by z.
Implemented based on https://www.tensorflow.org/tutorials/generative/cvae
This is the part "log(p(z)) - log(q(z|x)) where z is sampled from
q(z|x).
Parameters
----------
z : (B, N)
mean : (B, N)
sigma : (B, N) | None
log_sigma : (B, N) | None
Returns
-------
KL : (B,)
"""
if log_sigma is None:
log_sigma = tf.math.log(sigma)
zeros = tf.zeros_like(z)
log_p_z = log_multivar_gaussian(z, mean=zeros, log_sigma=zeros)
log_q_z_x = log_multivar_gaussian(z, mean=mean, log_sigma=log_sigma)
return log_q_z_x - log_p_z
| 5,337,556
|
def get_order_discrete(p, x, x_val, n_full=None):
""" Calculate the order of the discrete features according to the alt/null ratio
Args:
p ((n,) ndarray): The p-values.
x ((n,) ndarray): The covaraites. The data is assumed to have been preprocessed.
x_val ((n_val,) ndarray): All possible values for x, sorted in ascending order.
n_full (int): Total number of hypotheses before filtering.
Returns:
x_order ((d,) ndarray): the order (of x_val) from smallest alt/null ratio to
the largest.
"""
n_val = x_val.shape[0]
# Separate the null and the alt proportion.
_, t_BH = bh_test(p, alpha=0.1, n_full=n_full)
x_null, x_alt = x[p>0.75], x[p<t_BH]
# Calculate the alt/null ratio。
cts_null = np.zeros([n_val], dtype=int)
cts_alt = np.zeros([n_val], dtype=int)
for i,val in enumerate(x_val):
cts_null[i] = np.sum(x_null==val)+1
cts_alt[i] = np.sum(x_alt==val)+1
p_null = cts_null/np.sum(cts_null)
p_alt = cts_alt/np.sum(cts_alt)
p_ratio = p_alt/p_null
# Calculate the order of x_val based on the ratio.
x_order = p_ratio.argsort()
return x_order
| 5,337,557
|
def revoke_grant(KeyId=None, GrantId=None):
"""
Revokes a grant. You can revoke a grant to actively deny operations that depend on it.
See also: AWS API Documentation
Examples
The following example revokes a grant.
Expected Output:
:example: response = client.revoke_grant(
KeyId='string',
GrantId='string'
)
:type KeyId: string
:param KeyId: [REQUIRED]
A unique identifier for the customer master key associated with the grant. This value can be a globally unique identifier or the fully specified ARN to a key.
Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012
:type GrantId: string
:param GrantId: [REQUIRED]
Identifier of the grant to be revoked.
:return: response = client.revoke_grant(
# The identifier of the grant to revoke.
GrantId='0c237476b39f8bc44e45212e08498fbe3151305030726c0590dd8d3e9f3d6a60',
# The identifier of the customer master key (CMK) associated with the grant. You can use the key ID or the Amazon Resource Name (ARN) of the CMK.
KeyId='1234abcd-12ab-34cd-56ef-1234567890ab',
)
print(response)
"""
pass
| 5,337,558
|
def _read_txt(file_path: str) -> str:
"""
Read specified file path's text.
Parameters
----------
file_path : str
Target file path to read.
Returns
-------
txt : str
Read txt.
"""
with open(file_path) as f:
txt: str = f.read()
return txt
| 5,337,559
|
def init_statick():
"""Fixture to initialize a Statick instance."""
args = Args("Statick tool")
return Statick(args.get_user_paths(["--user-paths", os.path.dirname(__file__)]))
| 5,337,560
|
def BssResultComparison(S_synth, tc_synth, S_pca, tc_pca, S_ica, tc_ica, pixel_mask, title):
""" A function to plot the results of PCA and ICA against the synthesised sources
Inputs:
S_synth | rank 2 array | synthesised sources images as rows (e.g. 2 x 5886)
tc_synth | rank 2 array | synthesised time courses as columns (e.g. 19 x 2)
S_pca | PCA sources
tc_pca | PCA time courses
S_ica | ica sources
tc_ica | ica time courses
pixel_mask | rank 2 pixel mask | used to covnert columns arrays to 2d masked arrays (ie for ifgs)
title | string | figure name
"""
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.gridspec as gridspec
def source_row(sources, tcs, grid):
""" Given a grid object, plot up to 6 spatial maps and time courses in a 2x6 grid
"""
def linegraph(sig, ax):
""" signal is a 1xt row vector """
times = sig.size
if times > 20:
times = 20
a = np.arange(times)
ax.plot(a,sig[:20], color='k')
ax.axhline(y=0, color='k', alpha=0.4)
ax.set_xticks([])
ax.set_yticks([])
#ax.set_aspect(1)
def ifg(sig, ax):
""" signal is a 1xt row vector """
from small_plot_functions import col_to_ma
#ax.imshow(col_to_ma(sig, pixel_mask), cmap = matplotlib.cm.coolwarm, vmin = -1, vmax = 1)
ax.imshow(col_to_ma(sig, pixel_mask), cmap = matplotlib.cm.coolwarm)
ax.set_xticks([])
ax.set_yticks([])
grid_inner = gridspec.GridSpecFromSubplotSpec(2, 6, subplot_spec=grid, wspace=0.0, hspace=0.0)
for j in np.arange(0,6):
if j < np.size(sources, axis= 0):
ax_ifg = plt.subplot(grid_inner[0, j])
ax_line = plt.subplot(grid_inner[1, j])
ifg(sources[j,:], ax_ifg)
linegraph(tcs[:,j], ax_line)
fig_extraSources_comps = plt.figure(title, figsize=(8,8))
grid_rows = gridspec.GridSpec(3, 1)
source_row(S_synth, tc_synth, grid_rows[0])
source_row(S_pca, tc_pca, grid_rows[1])
source_row(S_ica, tc_ica, grid_rows[2])
fig_extraSources_comps.tight_layout(rect =[0.05,0,1,1])
fig_extraSources_comps.text(0.05, 0.88, 'Sources', fontsize=12, rotation = 90, horizontalalignment='center')
fig_extraSources_comps.text(0.05, 0.55, 'sPCA', fontsize=12, rotation = 90, horizontalalignment='center')
fig_extraSources_comps.text(0.05, 0.24, 'sICA', fontsize=12, rotation = 90, horizontalalignment='center')
| 5,337,561
|
def test_can_tests_load():
"""Make sure pytest finds this test."""
print("I am a test.")
assert 1 == 1
| 5,337,562
|
def dedupe(entries):
"""
Uses fuzzy matching to remove duplicate entries.
"""
return thefuzz.process.dedupe(entries, THRESHOLD, fuzz.token_set_ratio)
| 5,337,563
|
def generate_openssl_rsa_refkey(key_pub_raw, # pylint: disable=too-many-locals, too-many-branches, too-many-arguments, too-many-statements
keyid_int, refkey_file,
key_size, encode_format="", password="nxp",
cert=""):
"""
Generate rsa reference key using openssl
:param key_pub_raw: Retrieved public key
:param keyid_int: Key index
:param refkey_file: File name to store reference key
:param key_size: RSA key size
:param encode_format: Encode format to store file
:param password: Password for encryption of pkcs12 reference key
:param cert: Input certificate
:return: Status
"""
# generate rsa key pair
key_openssl = rsa.generate_private_key(public_exponent=65537, key_size=key_size,
backend=default_backend())
key_prv_bytes = key_openssl.private_bytes(encoding=Encoding.DER,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
key_openssl_hex = binascii.hexlify(key_prv_bytes)
key_openssl_list = list()
for k in range(0, len(key_openssl_hex), 2):
key_openssl_list.append(key_openssl_hex[k:k + 2])
# convert the retrieved public key to hex format
key_pub_list = list(key_pub_raw)
# trim the header of public key
if key_size == 1024:
key_pub_no_header_list = key_pub_list[25:]
elif key_size in [2048, 3072, 4096]:
key_pub_no_header_list = key_pub_list[28:]
else:
log.error("key size: %s is not supported. Should be one of 1024, 2048, 3072, 4096",
(str(key_size),))
return apis.kStatus_SSS_Fail
key_pub_str_list = list()
for key_pub_no_header_item in key_pub_no_header_list:
key_pub_no_header_item = format(key_pub_no_header_item, 'x')
if len(key_pub_no_header_item) == 1:
key_pub_no_header_item = "0" + key_pub_no_header_item
key_pub_str_list.append(key_pub_no_header_item)
openssl_index = 7
# Public Key section
retrieved_pub_len = get_length(key_pub_str_list)
openssl_pub_len = get_length(key_openssl_list[openssl_index:])
key_openssl_list = replace_bytes(key_openssl_list, openssl_pub_len, openssl_index,
key_pub_str_list, retrieved_pub_len)
openssl_index += retrieved_pub_len
# publicExponent section
openssl_index += get_length(key_openssl_list[openssl_index:])
# Private key Exponent section
openssl_index += get_length(key_openssl_list[openssl_index:])
# prime1 section
magic_prime1_data = ['02', '01', '01']
openssl_prime1_len = get_length(key_openssl_list[openssl_index:])
key_openssl_list = replace_bytes(key_openssl_list, openssl_prime1_len, openssl_index,
magic_prime1_data, len(magic_prime1_data))
openssl_index += len(magic_prime1_data)
# convert keyID to hex format and add TLV
keyid_str = format("%08x" % keyid_int)
key_id_list = ['02']
if len(keyid_str) < 31:
key_id_len = int(len(keyid_str) / 2)
key_id_len_hex = format("%x" % key_id_len)
if len(key_id_len_hex) == 1:
key_id_len_hex = "0" + key_id_len_hex
key_id_list.append(key_id_len_hex)
for i in range(0, len(keyid_str), 2):
key_id_list.append(keyid_str[i:i + 2])
# prime 2 section
openssl_prime2_len = get_length(key_openssl_list[openssl_index:])
key_openssl_list = replace_bytes(key_openssl_list, openssl_prime2_len,
openssl_index, key_id_list, len(key_id_list))
openssl_index += len(key_id_list)
# exponent1 section
openssl_index += get_length(key_openssl_list[openssl_index:])
# exponent2 section
openssl_index += get_length(key_openssl_list[openssl_index:])
# coefficient section
magic_mod_p = ['02', '04', 'a5', 'a6', 'b5', 'b6']
openssl_coefficient_len = get_length(key_openssl_list[openssl_index:])
key_openssl_list = replace_bytes(key_openssl_list, openssl_coefficient_len,
openssl_index, magic_mod_p,
len(magic_mod_p))
# Recalculate total length of the key
key_openssl_len = len(key_openssl_list) - 4
key_openssl_len_str = format("%04x" % key_openssl_len)
total_len_list = []
for i in range(0, len(key_openssl_len_str), 2):
total_len_list.append(key_openssl_len_str[i:i + 2])
key_openssl_list[2] = total_len_list[0]
key_openssl_list[3] = total_len_list[1]
# convert key to der or pem format
key_der_hex = ""
for key_openssl_item in key_openssl_list:
if isinstance(key_openssl_item, bytes):
key_der_hex += bytes.decode(key_openssl_item)
else:
key_der_hex += key_openssl_item
key_der = binascii.unhexlify(key_der_hex)
key_pem_obj = openssl.backend.load_der_private_key(key_der, None)
key_pem = key_pem_obj.private_bytes(Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
NoEncryption())
status = write_refkey_to_file(refkey_file, password,
key_pem, key_der, cert, encode_format)
return status
| 5,337,564
|
async def fetch_ongoing_alerts(
requester=Security(get_current_access, scopes=[AccessType.admin, AccessType.user]),
session=Depends(get_session)
):
"""
Retrieves the list of ongoing alerts and their information
"""
if await is_admin_access(requester.id):
query = (
alerts.select().where(
alerts.c.event_id.in_(
select([events.c.id])
.where(events.c.end_ts.is_(None))
)))
return await crud.base.database.fetch_all(query=query)
else:
retrieved_alerts = (session.query(models.Alerts)
.join(models.Events)
.filter(models.Events.end_ts.is_(None))
.join(models.Devices)
.join(models.Accesses)
.filter(models.Accesses.group_id == requester.group_id))
retrieved_alerts = [x.__dict__ for x in retrieved_alerts.all()]
return retrieved_alerts
| 5,337,565
|
def data_to_graph_csvs(corpus_context, data):
"""
Convert a DiscourseData object into CSV files for efficient loading
of graph nodes and relationships
Parameters
----------
data : :class:`~polyglotdb.io.helper.DiscourseData`
Data to load into a graph
directory: str
Full path to a directory to store CSV files
"""
directory = corpus_context.config.temporary_directory('csv')
rfs = {}
rel_writers = {}
token_headers = data.token_headers
for s in data.speakers:
for x in data.annotation_types:
path = os.path.join(directory, '{}_{}.csv'.format(s, x))
rfs[s, x] = open(path, 'a', newline='', encoding='utf8')
rel_writers[s, x] = csv.DictWriter(rfs[s, x], token_headers[x], delimiter=',')
subanno_files = {}
subanno_writers = {}
for sp in data.speakers:
for k, v in data.hierarchy.subannotations.items():
for s in v:
path = os.path.join(directory, '{}_{}_{}.csv'.format(sp, k, s))
subanno_files[sp, k, s] = open(path, 'a', newline='', encoding='utf8')
header = ['id', 'begin', 'end', 'annotation_id', 'label']
subanno_writers[sp, k, s] = csv.DictWriter(subanno_files[sp, k, s], header, delimiter=',')
segment_type = data.segment_type
for level in data.highest_to_lowest():
for d in data[level]:
if d.begin is None or d.end is None:
continue
token_additional = dict(zip(d.token_keys(), d.token_values()))
if d.super_id is not None:
token_additional[data[level].supertype] = d.super_id
s = d.speaker
if s is None:
s = 'unknown'
rel_writers[s, level].writerow(dict(begin=d.begin, end=d.end,
type_id=d.sha(corpus=corpus_context.corpus_name),
id=d.id, speaker=s, discourse=data.name,
previous_id=d.previous_id,
**token_additional))
if d.subannotations:
for sub in d.subannotations:
row = {'begin': sub.begin, 'end': sub.end, 'label': sub.label,
'annotation_id': d.id, 'id': sub.id}
subanno_writers[s, level, sub.type].writerow(row)
for x in rfs.values():
x.close()
for x in subanno_files.values():
x.close()
| 5,337,566
|
def process_discover(data_export, file, limit, environment_id):
"""
Convert the discovery query to a CSV, writing it to the provided file.
"""
try:
processor = DiscoverProcessor(
discover_query=data_export.query_info, organization_id=data_export.organization_id
)
except ExportError as error:
metrics.incr("dataexport.error", tags={"error": six.text_type(error)}, sample_rate=1.0)
logger.info("dataexport.error: {}".format(six.text_type(error)))
capture_exception(error)
raise error
writer = create_writer(file, processor.header_fields)
iteration = 0
with snuba_error_handler(logger=logger):
is_completed = False
while not is_completed:
offset = SNUBA_MAX_RESULTS * iteration
next_offset = SNUBA_MAX_RESULTS * (iteration + 1)
is_exceeding_limit = limit and limit < next_offset
raw_data_unicode = processor.data_fn(offset=offset, limit=SNUBA_MAX_RESULTS)["data"]
# TODO(python3): Remove next line once the 'csv' module has been updated to Python 3
# See associated comment in './utils.py'
raw_data = convert_to_utf8(raw_data_unicode)
raw_data = processor.handle_fields(raw_data)
if is_exceeding_limit:
# Since the next offset will pass the limit, just write the remainder
writer.writerows(raw_data[: limit % SNUBA_MAX_RESULTS])
else:
writer.writerows(raw_data)
iteration += 1
# If there are no returned results, or we've passed the limit, stop iterating
is_completed = len(raw_data) == 0 or is_exceeding_limit
| 5,337,567
|
def main():
"""
Test running the function in script mode
"""
process_cloudwatch_metric_event()
| 5,337,568
|
def breweryBeers(id):
"""Finds the beers that belong to the brewery with the id provided
id: string
return: json object list or empty json list
"""
try:
# [:-1:] this is because the id has a - added to the end to indicate
# that it is for this method, removes the last character from a string
return BreweryDb.brewery(id[:-1:] + "/beers")['data']
except Exception:
return id[:-1:] + "/beers"
| 5,337,569
|
def min_max_date(rdb, patient):
""" Returns min and max date for selected patient """
sql = """SELECT min_date,max_date FROM patient WHERE "Name"='{}'""".format(patient)
try:
df = pd.read_sql(sql, rdb)
min_date, max_date = df['min_date'].iloc[0].date(), df['max_date'].iloc[0].date()
except:
min_date, max_date = '', ''
return min_date, max_date
| 5,337,570
|
def integrate(f, a, b, N, method):
"""
@param f: function to integrate
@param a: initial point
@param b: end point
@param N: number of intervals for precision
@param method: trapeze, rectangle, Simpson, Gauss2
@return: integral from a to b of f(x)
"""
h = (b-a)/(N)
if method == "trapeze":
for i in range(0,n-1):
xi = a+i*h
Lhf += f(xi)+f(xi+h)
Lhf *= h/2
elif method == "rectangle":
for i in range(0,n-1):
xi = a+i*h
Lhf += f(xi)+h/2
Lhf *= h
elif method == "Simpson":
for i in range(0,n-1):
xi = a+i*h
Lhf += f(xi)+4*f(xi+h/2)+f(xi+h)
Lhf *= h/6
elif method == "Gauss2"
for i in range(0,n-1):
xi = a+i*h
Lhf += f(xi+h*(1/2)*(1-(1/sqrt(3))))+f(xi+h*(1/2)*(1-(1/sqrt(3))))
Lhf *= h/2
return Lhf
| 5,337,571
|
def test_get_all_names(code, target):
"""Tests get_all_names function."""
res = kale_ast.get_all_names(code)
assert sorted(res) == sorted(target)
| 5,337,572
|
def sum_naturals(n):
"""Sum the first N natural numbers.
>>> sum_naturals(5)
15
"""
total, k = 0, 1
while k <= n:
total, k = total + k, k + 1
return total
| 5,337,573
|
def load_data(data_map,config,log):
"""Collect data locally and write to CSV.
:param data_map: transform DataFrame map
:param config: configurations
:param log: logger object
:return: None
"""
for key,df in data_map.items():
(df
.coalesce(1)
.write
.csv(f'{config["output"]}/{key}', mode='overwrite', header=True))
return None
| 5,337,574
|
def getAdjacentes(qtde_v, MATRIZ):
"""Método getAdjacentes p/ pegar os adjacentes do Grafo"""
aMATRIZ = []
for i in range(qtde_v):
linha = []
for j in range(qtde_v):
if MATRIZ[i][j] == 1:
linha.append("v" + str(j))
aMATRIZ.append(linha)
y = 0
for i in aMATRIZ:
print("v" + str(y) + ": ", i)
y+=1
| 5,337,575
|
def root(ctx, sources, output, _open):
"""
Computes and shows the root of the biggest tree on a bibliography collection.
"""
show("root", ctx.obj["sapper"], sources, output, _open)
| 5,337,576
|
def get_config(config_file, exp_dir=None, is_test=False):
""" Construct and snapshot hyper parameters """
# config = edict(yaml.load(open(config_file, 'r'), Loader=yaml.FullLoader))
config = edict(yaml.load(open(config_file, 'r'), Loader=yaml.FullLoader))
# create hyper parameters
config.run_id = str(os.getpid())
config.exp_name = '_'.join([
config.model.model_name, config.dataset.dataset_name,
time.strftime('%Y-%b-%d-%H-%M-%S'), config.run_id
])
if config.train.is_resume and not is_test:
config.save_dir = config.train.resume_dir
save_name = os.path.join(config.save_dir, 'config_resume_{}.yaml'.format(config.run_id))
else:
config.save_dir = os.path.join(config.exp_dir, config.exp_name)
save_name = os.path.join(config.save_dir, 'config.yaml')
mkdir(config.exp_dir)
mkdir(config.save_dir)
yaml.dump(edict2dict(config), open(save_name, 'w'), default_flow_style=False)
#Seed and GPU
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
config.use_gpu = config.use_gpu and torch.cuda.is_available()
return config
| 5,337,577
|
def _filter_credential_warning(record) -> bool:
"""Rewrite out credential not found message."""
if (
not record.name.startswith("azure.identity")
or record.levelno != logging.WARNING
):
return True
message = record.getMessage()
if ".get_token" in message:
if message.startswith("EnvironmentCredential"):
print("Attempting to sign-in with environment variable credentials...")
if message.startswith("AzureCliCredential"):
print("Attempting to sign-in with Azure CLI credentials...")
if message.startswith("ManagedIdentityCredential"):
print("Attempting to sign-in with Managed Instance credentials...")
print("Falling back to interactive logon.")
return not message
| 5,337,578
|
def import_module_from_path(mod_name, mod_path):
"""Import module with name `mod_name` from file path `mod_path`"""
spec = importlib.util.spec_from_file_location(mod_name, mod_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
| 5,337,579
|
def test_optimizer_can_fit():
"""Test that TestOptimizer can call fit with the proper API"""
syms = ['A', 'B', 'C']
targ = [-100, 0, 10]
opt = TestOptimizer(Database())
opt.fit(syms, {}, targ)
for sym, t in zip(syms, targ):
assert sym in opt.dbf.symbols
assert np.isclose(opt.dbf.symbols[sym], t)
| 5,337,580
|
def preprocessing(text, checkpoint_dir, minocc):
"""
This time, we cannot leave the file as it is. We have to modify it first.
- replace "\n" by " \n " -> newline is a word
- insert space between punctuation and last word of sentence
- create vocab, but only for those words that occur more than once
- replace all words that occur too seldomly with "<unk>"
returns the list of integers we will use as the dataset as well as char2idx and idx2char
"""
splitted = prepare_text(text)
print("Total number of words:",len(splitted))
occurences = dict()
for word in splitted:
if word in list(occurences.keys()):
occurences[word] += 1
else:
occurences[word] = 1
vocab = ["<unk>"]
for word in list(occurences.keys()):
if occurences[word] > minocc:
vocab.append(word)
splitted = remove_unknowns(vocab, splitted) # removing words that appear less than two times
print(splitted[0:250])
print("Number of unique relevant words:", len(vocab))
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
pickle_rick(checkpoint_dir, char2idx, 'char2idx')
pickle_rick(checkpoint_dir, idx2char, 'idx2char')
pickle_rick(checkpoint_dir, splitted, 'dataset')
return splitted, char2idx, idx2char
| 5,337,581
|
def test():
"""Run the prediction and routing tests"""
printf("Testing benzlim...")
test_predict()
test_route()
| 5,337,582
|
def run_histogram_extr(splopter=None, z_high=370.0, z_low=70.0, fig=None, show=False, normalise_v=True, species=2,
t_flag=False, fitter=None):
"""
NOTE: This has been implemented more generally as a method - splopter.extract_histograms() - and therefore this
function has now been deprecated.
"""
# if not splopter:
# splopter = spl.Splopter('bin/data_local/', 'benchmarking/', 'disttest_fullnogap/', prepare=False)
#
# region = {'Region': [z_low, z_high, y_low, y_high]}
# v_scale = 1000
# mass = {1: n.ELECTRON_MASS * splopter.denormaliser.mu,
# 2: n.ELECTRON_MASS}
#
# hists = splopter.extract_histograms(region, denormalise=normalise_v, v_scale=v_scale)
# hist_bins, hist = hists
#
# fitdata = get_histogram_fit(splopter, hist, hist_bins, fitter=fitter, v_scale=v_scale)
#
# if t_flag:
# T_e = (fitdata.fit_params[0].value * mass[species]) / (2 * n.ELEM_CHARGE)
# T_e_err = (fitdata.fit_params[0].error * mass[species]) / (2 * n.ELEM_CHARGE)
# print('T_e = {} +/- {}'.format(T_e, T_e_err))
# else:
# fitdata.fit_params[0].value *= v_scale ** 2
# fitdata.fit_params[0].error *= v_scale ** 2
# # fitdata.fit_params[1].value *= v_scale
# # fitdata.fit_params[1].error *= v_scale
# fitdata.print_fit_params()
#
# if not fig:
# fig = plt.figure()
#
# plt.plot(hist_bins, hist, label='z: {} - {}'.format(z_low, z_high))
# plt.plot(hist_bins, fitdata.fit_y, label="T_e = {t:2.1f}eV".format(t=fitdata.get_param(c.ELEC_TEMP, errors_fl=False)))
# plt.xlabel(r'Velocity ($m s^{-1}$)')
# plt.ylabel(r'Normalised f(v)')
# plt.legend()
#
# if show:
# plt.show()
#
# return fitdata
pass
| 5,337,583
|
def dwave_chimera_graph(
m,
n=None,
t=4,
draw_inter_weight=draw_inter_weight,
draw_intra_weight=draw_intra_weight,
draw_other_weight=draw_inter_weight,
seed=0,
):
"""
Generate DWave Chimera graph as described in [1] using dwave_networkx.
Parameters
----------
m: int
Number of cells per column
n: int
Number of cells per row
t: int
Number of nodes on each side of a bipartite cell subgraph
draw_inter_weight: function (seed) -> number
Function to call for weights of inter-cell edges
draw_intra_weight: function (seed) -> number
Function to call for weights of intra-cell edges
draw_other_weight: function (seed) -> number
Function to call for weights of intra-cell edges
seed: integer, random_state, or None
Indicator of random number generation state
Returns
-------
graph: nx.Graph
The generated Chimera graph
References
----------
..[1] https://docs.ocean.dwavesys.com/en/latest/concepts/topology.html
"""
if not n:
n = m
g = dwave.chimera_graph(m, n, t)
_initialize_weights_chimera(
chimera_graph=g,
size=m,
draw_inter_weight=lambda: draw_inter_weight(seed),
draw_intra_weight=lambda: draw_intra_weight(seed),
draw_other_weight=lambda: draw_other_weight(seed),
)
return g
| 5,337,584
|
def extract_first_value_in_quotes(line, quote_mark):
"""
Extracts first value in quotes (single or double) from a string.
Line is left-stripped from whitespaces before extraction.
:param line: string
:param quote_mark: type of quotation mark: ' or "
:return: Dict: 'value': extracted value;
'remainder': the remainder after extraction
'error' empty string if success or 'syntax' otherwise;
"""
line = line.lstrip()
result = {'value': '', 'remainder': line, 'error': 'syntax'}
if len(line) < 2:
return result
if line[0] != quote_mark:
return result
next_qm_pos = line.find(quote_mark, 1)
if next_qm_pos == -1:
return result
result['value'] = line[1:next_qm_pos]
result['remainder'] = line[next_qm_pos + 1:]
result['error'] = ''
return result
| 5,337,585
|
def test_check_param_grids_single():
"""Test the check of a single parameter grid."""
init_param_grids = {'svr__C': [0.1, 1.0], 'svr__kernel': ['rbf', 'linear']}
param_grids = check_param_grids(init_param_grids, ['lr', 'svr', 'dtr'])
exp_param_grids = [
{'svr__C': [0.1], 'svr__kernel': ['rbf'], 'est_name': ['svr']},
{'svr__C': [1.0], 'svr__kernel': ['rbf'], 'est_name': ['svr']},
{'svr__C': [0.1], 'svr__kernel': ['linear'], 'est_name': ['svr']},
{'svr__C': [1.0], 'svr__kernel': ['linear'], 'est_name': ['svr']},
{'est_name': ['dtr']},
{'est_name': ['lr']},
]
assert all([param_grid in exp_param_grids for param_grid in param_grids])
| 5,337,586
|
def dynamic(graph):
"""Returns shortest tour using dynamic programming approach.
The idea is to store lengths of smaller sub-paths and re-use them
to compute larger sub-paths.
"""
adjacency_M = graph.adjacency_matrix()
tour = _dynamic(adjacency_M, start_node=0)
return tour
| 5,337,587
|
def perform_test_complete_operations(test_stat):
"""
Performs all operations related to end quiz
:param test_stat: TestStat object
:return: None
"""
if test_stat.has_completed:
return
test_stat.has_completed = True
test_stat.save()
send_test_complete_email(test_stat)
| 5,337,588
|
def read_login_file():
"""
Parse the credentials file into username and password.
Returns
-------
dict
"""
with open('.robinhood_login', 'r') as login_file:
credentials = yaml.safe_load(login_file)
return credentials
| 5,337,589
|
def flatten(ls):
"""
Flatten list of list
"""
return list(chain.from_iterable(ls))
| 5,337,590
|
def test_get_smoothies_recipes(test_client):
"""
GIVEN a Flask application configured for testing
WHEN the '/smoothies/' page is requested (GET)
THEN check the response is valid
"""
recipes = [b'Berry Smoothie', b'Chocolate Milk Shake']
response = test_client.get('/smoothies/')
assert response.status_code == 200
for recipe in recipes:
assert recipe in response.data
| 5,337,591
|
def gaussian_kernel(size, size_y=None):
""" Gaussian kernel.
"""
size = int(size)
if not size_y:
size_y = size
else:
size_y = int(size_y)
x, y = np.mgrid[-size:size+1, -size_y:size_y+1]
g = np.exp(-(x**2/float(size)+y**2/float(size_y)))
fwhm = size
fwhm_aper = photutils.CircularAperture((frame_center(g)), fwhm/2.)
fwhm_aper_phot = photutils.aperture_photometry(g, fwhm_aper)
g_norm = g/np.array(fwhm_aper_phot['aperture_sum'])
return g_norm/g_norm.max()
| 5,337,592
|
def test_remove_news_articles():
""" Checks that the function can remove news articles"""
news_articles.clear()
test_article = {
'title': 'test title',
'content': 'test content'
}
prev_removed_article = {
'title': 'previously removed',
'content': 'previously removed'
}
news_articles.append(test_article)
news_articles.append(prev_removed_article)
removed_articles.append(prev_removed_article)
# This should only remove the test article, because update=False
remove_news_articles(article2remove = 'test title', update = False)
assert test_article not in news_articles
# Since update=True this should remove prev_removed_article from
# news_articles, but not from removed_articles
remove_news_articles(update = True)
assert prev_removed_article not in news_articles
# Finally since prev_removed_article in not in news_articles and
# update=True, prev_removed_article is removed from removed_articles
remove_news_articles(update = True)
assert prev_removed_article not in removed_articles
| 5,337,593
|
def parse_property_value(prop_tag: int, raw_values: list, mem_id: int = 0) -> Any:
"""
Parse property raw values
:param prop_tag: The property tag, see 'PropertyTag' enum
:param raw_values: The property values
:param mem_id: External memory ID (default: 0)
"""
if prop_tag not in PROPERTIES.keys():
return None
cls = PROPERTIES[prop_tag]['class'] # type: ignore
kwargs = PROPERTIES[prop_tag]['kwargs'] # type: ignore
kwargs['mem_id'] = mem_id # type: ignore
return cls(prop_tag, raw_values, **kwargs)
| 5,337,594
|
async def kickme(leave):
""" Basically it's .kickme command """
await leave.edit("**Nope, no, no, I go away**")
await leave.client.kick_participant(leave.chat_id, "me")
| 5,337,595
|
def scan_stanzas_string(
s: str,
*,
separator_regex: Optional[RgxType] = None,
skip_leading_newlines: bool = False,
) -> Iterator[List[Tuple[str, str]]]:
"""
.. versionadded:: 0.4.0
Scan a string for zero or more stanzas of RFC 822-style header fields and
return a generator of lists of ``(name, value)`` pairs, where each list
represents a stanza of header fields in the input.
The stanzas are terminated by blank lines. Consecutive blank lines between
stanzas are treated as a single blank line. Blank lines at the end of the
input are discarded without creating a new stanza.
.. deprecated:: 0.5.0
Use `scan_stanzas()` instead
:param s: a string which will be broken into lines on CR, LF, and CR LF
boundaries and passed to `scan_stanzas()`
:param kwargs: Passed to the `Scanner` constructor
:rtype: generator of lists of pairs of strings
:raises ScannerError: if the header section is malformed
"""
return scan_stanzas( # pragma: no cover
s,
separator_regex=separator_regex,
skip_leading_newlines=skip_leading_newlines,
)
| 5,337,596
|
def _StripLinkerAddedSymbolPrefixes(raw_symbols):
"""Removes prefixes sometimes added to symbol names during link
Removing prefixes make symbol names match up with those found in .o files.
"""
for symbol in raw_symbols:
full_name = symbol.full_name
if full_name.startswith('startup.'):
symbol.flags |= models.FLAG_STARTUP
symbol.full_name = full_name[8:]
elif full_name.startswith('unlikely.'):
symbol.flags |= models.FLAG_UNLIKELY
symbol.full_name = full_name[9:]
elif full_name.startswith('rel.local.'):
symbol.flags |= models.FLAG_REL_LOCAL
symbol.full_name = full_name[10:]
elif full_name.startswith('rel.'):
symbol.flags |= models.FLAG_REL
symbol.full_name = full_name[4:]
| 5,337,597
|
def format_dependency(dependency: str) -> str:
"""Format the dependency for the table."""
return "[coverage]" if dependency == "coverage" else f"[{dependency}]"
| 5,337,598
|
def _addSuffixToFilename(suffix, fname):
"""Add suffix to filename, whilst preserving original extension, eg:
'file.ext1.ext2' + '_suffix' -> 'file_suffix.ext1.ext2'
"""
head = op.split(fname)[0]
fname, ext = _splitExts(fname)
return op.join(head, fname + suffix + ext)
| 5,337,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.