content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def getCountrangepartition(ratingstablename, numberofpartitions, openconnection):
"""
Get number of rows for each partition
:param ratingstablename:
:param numberofpartitions:
:param openconnection:
:return:
"""
cur = openconnection.cursor()
countList = []
interval = 5.0 / numberofpartitions
cur.execute("select count(*) from {0} where rating >= {1} and rating <= {2}".format(ratingstablename,0, interval))
countList.append(int(cur.fetchone()[0]))
lowerbound = interval
for i in range(1, numberofpartitions):
cur.execute("select count(*) from {0} where rating > {1} and rating <= {2}".format(ratingstablename,
lowerbound,
lowerbound + interval))
lowerbound += interval
countList.append(int(cur.fetchone()[0]))
cur.close()
return countList | 8bcd96eb8e53a608fd98d2b58ea32d26becd2846 | 35,244 |
def get_default_app_name(site, user):
"""get the default app for the currrent user"""
if user.is_authenticated:
default_app = site.home_app_name
else:
default_app = site.index_app_name
return default_app | f24f23cd1ec8fcc28d5000c2f8edeb389015867e | 35,245 |
def h2(tabuleiro):
"""
Distância de Manhattan
"""
h = 0
for i in range(3):
for j in range(3):
if tabuleiro[i][j] == 0:
h = h + abs(0 - i) + abs(0 - j)
if tabuleiro[i][j] == 1:
h = h + abs(0 - i) + abs(1 - j)
if tabuleiro[i][j] == 2:
h = h + abs(0 - i) + abs(2 - j)
if tabuleiro[i][j] == 3:
h = h + abs(1 - i) + abs(0 - j)
if tabuleiro[i][j] == 4:
h = h + abs(1 - i) + abs(1 - j)
if tabuleiro[i][j] == 5:
h = h + abs(1 - i) + abs(2 - j)
if tabuleiro[i][j] == 6:
h = h + abs(2 - i) + abs(0 - j)
if tabuleiro[i][j] == 7:
h = h + abs(2 - i) + abs(1 - j)
if tabuleiro[i][j] == 8:
h = h + abs(2 - i) + abs(2 - j)
return h | 8498afe7884e61e883a11cc94957a7043b239936 | 35,247 |
def is_iterable(candidate)->bool:
"""Return boolean representing whetever given object is an iterable.
candidate, object to identify iterability property of.
"""
try:
iter(candidate)
return True
except TypeError:
return False | 15591fe231de3aa4efa900abb1c6273100dea399 | 35,248 |
def clip_min_max(img, val_min, val_max):
"""
Clips the signal at the specified limits.
"""
img[img < val_min] = val_min
img[img > val_max] = val_max
return img | 8508e0f5ef3868b0a8b4bb3f931b518c17eff043 | 35,249 |
def create_demand_evaluator(data):
"""Creates callback to get demands at each location."""
_demands = data['demands']
def demand_evaluator(manager, node):
"""Returns the demand of the current node"""
return _demands[manager.IndexToNode(node)]
return demand_evaluator | 30f18d8ae57eda136c7a0d2114c38a7e5e378f47 | 35,253 |
def chunkify(list, n):
"""Yield total of n splits for the given list (not in order)"""
return [list[i::n] for i in range(n)] | 6490f0af5200bdb5355fa042a4cd2e5de4ee0343 | 35,254 |
def from_args(args, key):
"""
Lazy look into args for key.
:param args:
:param key:
:return:
"""
return args[key] if args.__contains__(key) else f'ERROR' | 52d9558cc73540bd1f9c9cf794b7b59d7ede68e8 | 35,255 |
import os
def getDemoFontPath():
"""Return the path to Data/DemoFont.ufo/."""
testdata = os.path.join(os.path.dirname(__file__), "testdata")
return os.path.join(testdata, "DemoFont.ufo") | b75d9a4f824b3317e4e9a1b15912b35578b61a17 | 35,257 |
def multiply_2(factor):
"""
Example without using annotations. Better example of how to use assert.
:param factor:
:return: results: int
"""
assert type(factor) is int or type(factor) is float, "MAMMA MIA!"
# Above example shows that we need that type of variable factor has to either int or float.
# Otherwise will print out an extra annotation.
results = factor * factor * factor
return results | e4b7c54a56ba7dea85e37c66326c6075639e43f2 | 35,258 |
def xywh2xyxy(bbox):
"""
Coordinate conversion xywh -> xyxy
"""
bbox_ = bbox.clone()
if len(bbox_.size()) == 1:
bbox_ = bbox_.unsqueeze(0)
xc, yc = bbox_[..., 0], bbox_[..., 1]
half_w, half_h = bbox_[..., 2] / 2, bbox_[..., 3] / 2
bbox_[..., 0] = xc - half_w
bbox_[..., 1] = yc - half_h
bbox_[..., 2] = xc + 2 * half_w
bbox_[..., 3] = yc + 2 * half_h
return bbox_ | 6111f1a6c1a35390d853ce72fe21e0a8123bdcde | 35,259 |
def is_backbone(atom, element, minimal=False):
"""
Whether `atom` is a protein backbone atom or not.
Parameters
----------
atom : str
The atom name.
element : str
The element name.
minimal : bool
If `True` considers only `C` and `N` elements.
`False`, considers also `O`.
"""
e = element.strip()
a = atom.strip()
elements = {
True: ('N', 'C'),
False: ('N', 'C', 'O'),
}
# elements is needed because of atoms in HETATM entries
# for example 'CA' is calcium
return a in ('N', 'CA', 'C', 'O') and e in elements[minimal] | 29def3cf99de683f8dee48f534661b89ae22d2b2 | 35,260 |
def knapsack(limit, values, weights):
"""Returns the maximum value that can be reached using given weights"""
n = len(weights)
dp = [0]*(limit+1)
for i in range(n):
for j in range(limit, weights[i]-1, -1):
dp[j] = max(dp[j], values[i] + dp[j - weights[i]])
return dp[-1] | 0d9c9d32342f18ee7aab8477d5ed8bf49fce7a9c | 35,261 |
def hello(buffer, args, content_type):
"""参数处理"""
return "hello " + args.get("name", "world") | cf90d8b5a0b045622cffc790659b839813889b1b | 35,262 |
import math
def rotate(x, y, beta):
"""
Rotate vector(x,y) by beta radians counterclockwise
https://matthew-brett.github.io/teaching/rotation_2d.html
"""
x2 = math.cos(beta)*x - math.sin(beta)*y
y2 = math.sin(beta)*x + math.cos(beta)*y
return (x2, y2) | c42a52d2225814601a13d80d7a875bd6d1bf6862 | 35,263 |
def references(name, tag):
"""Provides suggested references for the specified data set
Parameters
----------
name : str
Instrument name
tag : str
Instrument tag
Returns
-------
refs : str
Suggested Instrument reference(s)
"""
refs = {'tec':
{'vtec': "Rideout and Coster (2006) doi:10.1007/s10291-006-0029-5"}}
return refs[name][tag] | 9452192d5cac1097c59ff0521fbeb7e62f4ab7e3 | 35,265 |
def _make_alias_docstring(new_name, func_or_class):
"""Make deprecation alias docstring."""
if func_or_class.__doc__:
lines = func_or_class.__doc__.split('\n')
lines[0] += ' (deprecated)'
else:
lines = ['DEPRECATED CLASS']
first_line = lines[0]
notice_lines = [
('Warning: THIS CLASS IS DEPRECATED. It will be removed in a future '
'version.'),
'Please use %s instead.' % new_name
]
remaining_lines = []
remaining_lines_string = '\n'.join(lines[1:]).strip()
if remaining_lines_string:
remaining_lines = remaining_lines_string.split('\n')
lines = ([first_line, ''] + notice_lines +
(([''] + remaining_lines) if remaining_lines else []))
return '\n'.join(lines) | 7f28908e09d690937a55d2bf1cc392857eb2c0ac | 35,267 |
import click
def get_definition(spec_data, endpoint):
"""
Returns object name by endpoint.
"""
if endpoint in spec_data['paths']:
endpoint_spec = spec_data['paths'].get(endpoint)
else:
raise click.ClickException('API endpoint {} does not found'.format(endpoint))
for param in endpoint_spec['post'].get('parameters', ''):
if '$ref' in param.get('schema', ''):
return param.get('schema')['$ref'].split('/')[-1] | e81a6914e6d53bdd44f75fe2beb104af9874a350 | 35,268 |
def adj_r2(r2: float, sample_size: int, n_features: int) -> float:
"""
>>> round(adj_r2(0.8, 100, 5), 3)
0.789
>>> round(adj_r2(0.8, 20, 5), 3)
0.729
"""
return 1 - ((sample_size - 1) / (sample_size - n_features - 1)) * (1 - r2) | 9496b8263427ee5e879d838422d5c2c152daf33b | 35,271 |
def remove_quotes(string):
"""
This function is used here to remove quotes from
paths used in this script.
:param string: Path with quotes.
:return: Path without quotes.
"""
if string.startswith('"'):
string = string[1:]
if string.endswith('"'):
string = string[:-1]
return string | acc09bcf6ecfaf5a26332abbb2632be6cb671286 | 35,272 |
def normaliseMot(listeFonction,str):
"""
Fonction qui va normaliser une donner.
Cette fonction va normaliser une donner en fonction de notre liste de fonction a normaliser
param : liste[fonction(string)] : liste de fonction qu'on souhaite appliquer a un mot
param : str : string -> chaine de caractere a normaliser
return : liste[string] -> chaine de caractere normaliser.
"""
for fonctionNormalise in listeFonction :
str = fonctionNormalise(str)
return str | 38823dab9a3c03bba23161c9257074ce9988d416 | 35,273 |
def get_digits_from_left_to_right(number, lst=None):
"""Return digits of an integer excluding the sign."""
if lst is None:
lst = list()
number = abs(number)
if number < 10:
lst.append(number)
return tuple(lst)
get_digits_from_left_to_right(number // 10, lst)
lst.append(number % 10)
return tuple(lst) | cbac7cfadd43902cfa45bfea7b86197576e338aa | 35,274 |
def split_route(route):
"""
Split a full route into single nest to nest subroute
"""
routes = []
i = -1
for node in route:
if node.id == 0:
# start a new subroute in position i
if i != -1:
# close the old subroute
routes[i].append(route[0])
# start the new subroute
routes.append([node])
i += 1
else:
# add node to the i-subroute
routes[i].append(node)
# return all the subroute except for the last one, which is composed only by the nest (node 0)
return routes[:-1] | 98e8c167dadeff4825fdffee571277e7b8ec5bcb | 35,275 |
def split_variant(variant):
"""
Splits a multi-variant `HGVS` string into a list of single variants. If
a single variant string is provided, it is returned as a singular `list`.
Parameters
----------
variant : str
A valid single or multi-variant `HGVS` string.
Returns
-------
list[str]
A list of single `HGVS` strings.
"""
prefix = variant[0]
if len(variant.split(";")) > 1:
return ["{}.{}".format(prefix, e.strip()) for e in variant[3:-1].split(";")]
return [variant] | aeaa13400333b5ee0a02b88f54399ed3ba40cc04 | 35,277 |
import json
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
default=None, sort_keys=False, **kw):
"""Serializes JSON into string."""
return json.dumps(obj, skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular,
allow_nan=allow_nan, cls=cls, indent=indent, separators=separators,
default=default, sort_keys=sort_keys, **kw) | d69ef615af0c369ffc4cea4fc24e7a6826c36b6b | 35,280 |
def is_itits(node):
"""
Searches for pronouns "it", and "its", returning True if found.
"""
if len(node) != 1:
return False
target = node[0].lower()
pos = node.label()
return ((pos == 'PRP') and (target == 'it')) or \
((pos == 'PRP$') and (target == 'its')) | a449e584920d465d5a877f480aa685fb8c309a21 | 35,281 |
def computePrediction(state, action, valueNetwork, device):
""" a wrapper for the forward method of ValueNetwork
return: tensor
"""
assert len(state.shape) == 2 # state is a 2-D tensor
assert action in (0, 1, 2, 3) # action is an int in [0, 1, 2, 3]
action_values = valueNetwork(state)
return action_values[:, action] | 04c9ebd3a5a0c1b9678f4e6f0f96d1f6f126c7ba | 35,283 |
import re
def ShortenOnSentence(source, lengthHint=250):
"""Shorten source at a sentence boundary.
Args:
source: input text to shorten.
lengthHint: length at which the input should be shortened.
Returns:
shortened text
"""
if source and len(source) > lengthHint:
source = source.strip()
sentEnd = re.compile('[.!?]')
sentList = sentEnd.split(source)
com=""
count = 0
while count < len(sentList):
if(count > 0 ):
if len(com) < len(source):
com += source[len(com)]
com += sentList[count]
count += 1
if count == len(sentList):
if len(com) < len(source):
com += source[len(source) - 1]
if len(com) > lengthHint:
if len(com) < len(source):
com += source[len(com)]
break
if len(source) > len(com) + 1:
com += ".."
source = com
return source | 5209d1933db0a27f6bb3b89c8a9cf45ed4141c8c | 35,284 |
def stack_follow(deck_size, position, *_):
"""Get new position after stacking deck."""
return deck_size - position - 1 | 3d766864097a96c76b646a6a3b982ed879526af3 | 35,288 |
def get_lowres_image(img, mode):
"""Return noisy image to use as model input."""
if mode == 'denoise':
size = (1024, 720)
img = img.resize(size)
elif mode == 'delight':
img = img
return img | b81c2054136de8cf2f673dcc356ddbd646abcafb | 35,289 |
import os
def path_to_dep(path):
"""Convert a recipe path to a dependency string.
:param path: str - Path to recipe.
:return: str - <cookbook>::<recipe>
"""
cookbook, _, recipe = path.split(os.path.sep)[-3:]
return "{}::{}".format(cookbook, recipe.split('.')[0]) | 510840f7a3ba5e73cb4837f451ebb7d25f9bae66 | 35,290 |
import os
import logging
def createDirectory(newDirectoryPath):
"""
createDirectory attempts to create a directory using the absolute path passed in as a param
:testedWith: None - too small to be necessary
:param newDirectoryPath: absolute path of new directory
:return: bool signalling the success of the operation
"""
try:
os.mkdir(newDirectoryPath)
# log action
logging.info(f"Created directory: {newDirectoryPath}")
return True
except FileExistsError:
return False | d310ea27274da8516b9eb1578719d83aa8804b5d | 35,294 |
import re
def CleanText(text):
"""Cleans provided text by lower casing words, removing punctuation, and
normalizing spacing so that there is exactly one space between each word.
Args:
text: Raw text to be cleaned.
Returns:
Cleaned version of text.
"""
pretty_issue = text.lower().strip()
quoteless_issue = re.sub('\'', '', pretty_issue)
no_punctuation_issue = re.sub('[^\w\s]|_+', ' ', quoteless_issue)
one_space_issue = ' '.join(no_punctuation_issue.split())
return one_space_issue | 573bab3645a774096959b7957e02cba07539a74d | 35,296 |
from functools import reduce
def replace_all(s, reps):
"""
>>> replace_all("a", ("ab", "bc", "cd"))
'd'
>>> replace_all("ab", (("b", "c"), ("a", "B"), ("c", "A")))
'BA'
"""
return reduce(lambda x, y: x.replace(*y), reps, s) | 16cb8b7e9076e8ff232600782ea78268ad211f83 | 35,297 |
def sum_even_fibonacci(n):
"""sum of the even-valued terms of the fibonacci sequence not exceeding n"""
result = 0
if n >= 2:
x, y = 1, 1
for _ in range(n):
x, y = y, x + y
if x > n:
break
if x % 2 == 0:
# print(x, y)
result += x
return result | 0cfc53863115aa462f6d0558cc3a5018507c737f | 35,298 |
import os
def _is_executable(path):
"""
Tests whether path exists and is executable.
>>> Path('/usr/bin/python').is_executable()
True
"""
return os.access(str(path), os.X_OK) | 28e7be211ec59b01fc3419ad6b37274f1e772d6a | 35,300 |
def bin_frequencies(df, how='max', bin_size=5, n_bins=None):
"""
bins spectral data frequencies to the specified bin size or number of bins
:param df: dataframe of spectral data from single sensor
:param how: how to aggregate the intensities for each bins. Any numpy aggregate function, default is max
:param bin_size: size of frequency bins, default is 5. Overriden by n_bins if specified
:param n_bins: number of bins of equal size to return. Overrides bin_size. Default is None
:return: dataframe with same number or rows but reduced number of columns
"""
df = df.T.reset_index()
df['index'] = df['index'].astype('float')
if n_bins:
f_min = df['index'].min()
f_max = df['index'].max()
bin_size = (f_max - f_min) // n_bins
df['freq_bin'] = (df['index'] // bin_size) * bin_size
else:
df['freq_bin'] = (df['index'] // bin_size) * bin_size
df = df.groupby('freq_bin').agg(how).drop('index', axis=1).T
return df | 0929e1539d43a379c4ff1a3e348056dfa2e333eb | 35,302 |
import torch
def squash(inputs, axis=-1):
"""
The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0
:param inputs: vectors to be squashed
:param axis: the axis to squash
:return: a Tensor with same size as inputs
"""
norm = torch.norm(inputs, p=2, dim=axis, keepdim=True)
scale = norm**2 / (1 + norm**2) / (norm + 1e-8)
return scale * inputs | ff639bec8d0c9acbc1daf6f7f7918c517cc6642f | 35,303 |
import os
def convert_df_to_manifest(df,dataset_root):
"""
Converts a dataframe in the format of a VOiCES index file to a JSON string
compatible with the NeMo ASR manifest format.
Inputs:
df - A pandas dataframe representing the index file of the dataset,
with the default columns of VOiCES index files.
dataset_root - A string with the absolute path to the root folder of the
VOiCES dataset.
Outputs:
record_string - A string representing the nemo manifest, in newline
delimited format.
"""
# Keep only the filename, noisy recording time, and transcript columns
df = df[['filename','noisy_time','transcript']]
# Rename columns
df=df.rename(columns={"filename":"audio_filepath",
"noisy_time":"duration","transcript":"text"})
# Add the dataset root to every every entry in the filename column
df['audio_filepath'] = df['audio_filepath'].apply(lambda x: os.path.join(dataset_root,x))
# Output to JSON
record_string = df.to_json(orient='records',lines=True)
return record_string | 1755630712a2bf9eb54926e7d1800ecf4f8af301 | 35,304 |
def convert_sec_to_time(duration_in_sec: float):
"""converts time in seconds to HH:MM:SS
Args:
duration_in_sec (float): duration in seconds
Returns:
(str): the time in the format: HH:MM:SS
"""
hours = int(duration_in_sec/3600)
remainder = duration_in_sec%3600
minutes = int(remainder/60)
seconds = int(duration_in_sec%60)
return f"{hours:02d}:{minutes:02d}:{seconds:02d}" | fcc96b55843555425048e36274656f19b2a879df | 35,306 |
import json
import requests
def start_stream(project, location, token, s_config):
"""
This function will start the stream in Google Cloud DataStream
:param project: Google Cloud project id mentioned in variables.py
:param location: Google Cloud resource location, for example us-central1
:param token: Google Cloud auth token
:param s_config: stream config from variables.py
:return: True or False
"""
stream_id = s_config["stream_id"]
name = s_config["stream_name"]
url = f"https://datastream.googleapis.com/v1/" \
f"projects/{project}/locations/{location}/streams/{stream_id}?" \
"updateMask=state"
payload = json.dumps({
"state": "RUNNING"
})
headers = {
'Authorization': token,
'Content-Type': 'application/json'
}
response = requests.request("PATCH", url, headers=headers, data=payload)
if response.status_code == 200:
print(f"Stream {name} started successfully")
start_stream_stat = True
else:
print(f"Issue while starting stream: {response.text}")
start_stream_stat = False
return start_stream_stat | d9b2e74582f0ae2ae66219e5b121801116f70078 | 35,307 |
def to_device(data, device, print_flag=False):
"""Move tensor(s) to the chosen device"""
if print_flag:
print(f"Moving a tensor to device ({device})")
if isinstance(data, (list,tuple)):
return [to_device(x, device, print_flag) for x in data]
return data.to(device, non_blocking=True) | 19c90c154a4af6bbe5b9de4e10e6830661642ae2 | 35,309 |
import re
def strip_json(string_value):
"""Strip a string containing a JSON document and remove all redundant white-space symbols.
:param string_value: String containing a JSON document
:type string_value: str
:return: String containing a JSON document without redundant white-space symbols
:rtype: str
"""
result = string_value.replace("\n", "")
result = re.sub(r"{\s+", "{", result)
result = re.sub(r"\s+}", "}", result)
result = re.sub(r",\s+", ", ", result)
return result | c8f6b37d5ca72dcfcfb70dbe9b156727752bdf0e | 35,310 |
def _rsfx_count(oracle, s, count, hist, ab):
""" Accumulate counts for context """
trn_data = [oracle.data[n] for n in oracle.trn[s]]
for k in trn_data:
hist[ab[k]] += 1.0
count += 1.0
rsfx_candidate = oracle.rsfx[s][:]
while rsfx_candidate:
s = rsfx_candidate.pop(0)
trn_data = [oracle.data[n] for n in oracle.trn[s]]
for k in trn_data:
hist[ab[k]] += 1.0
count += 1.0
rsfx_candidate.extend(oracle.rsfx[s])
return count, hist | c3ccf3e29ae57a208ea1ac01680e0713bea0f0e3 | 35,311 |
import builtins
def _is_valid_padding(kernel_sdims, strides, padding):
"""Returns True if `padding` corresponds to "VALID" padding for a transposed convolution."""
# This is simply the padding == 'VALID' part of lax._conv_transpose_padding.
for (begin, end), k, s in zip(padding, kernel_sdims, strides):
pad_len = k + s - 2 + builtins.max(k - s, 0)
pad_a = k - 1
pad_b = pad_len - pad_a
if begin != pad_a or end != pad_b:
return False
return True | 8e3351110a8b7b06eff6432e0cbb705d172ccd88 | 35,313 |
import re
def dot2bracket(s: str) -> str:
"""Replace layer names with valid names for pruning.
Test:
>>> dot2bracket("dense2.1.bn1.bias")
'dense2[1].bn1.bias'
>>> dot2bracket("dense2.13.bn1.bias")
'dense2[13].bn1.bias'
>>> dot2bracket("conv2.123.bn1.bias")
'conv2[123].bn1.bias'
>>> dot2bracket("dense2.6.conv2.5.bn1.bias")
'dense2[6].conv2[5].bn1.bias'
>>> dot2bracket("model.6")
'model[6]'
>>> dot2bracket("vgg.2.conv2.bn.2")
'vgg[2].conv2.bn[2]'
>>> dot2bracket("features.11")
'features[11]'
>>> dot2bracket("dense_blocks.0.0.conv1")
'dense_blocks[0][0].conv1'
"""
pattern = r"\.[0-9]+"
s_list = list(s)
for m in re.finditer(pattern, s):
start, end = m.span()
# e.g s_list == [..., ".", "0", ".", "0", ".", ...]
# step1: [..., "[", "0", "].", "0", ".", ...]
# step2: [..., "[", "0", "][", "0", "].", ...]
s_list[start] = s_list[start][:-1] + "["
if end < len(s) and s_list[end] == ".":
s_list[end] = "]."
else:
s_list.insert(end, "]")
return "".join(s_list) | 2b388a73ea016ef152309c5c499c2435875a02ad | 35,315 |
def normalize_application_tags(app_original, app_updated):
""" Simple function to normalize application tags when application is created or updated.
It aims to ensure that required tags are always well defined.
:param app_original string: The ghost "app" object before modification.
:param app_updated string: The ghost "app" object with the new modifications.
:return list A list of dict. Each dict define a tag
Test with only the default tag Name
>>> from copy import deepcopy
>>> from pprint import pprint
>>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[]}}
>>> app_updated = deepcopy(app_original)
>>> pprint(sorted(normalize_application_tags(app_original, app_updated), key=lambda d: d['tag_name']))
[{'tag_name': 'Name', 'tag_value': 'ec2.prod.webfront.app1'}]
Test with a custom Tag Name
>>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[]}}
>>> app_updated = deepcopy(app_original)
>>> app_updated['environment_infos']['instance_tags'] = [{'tag_name': 'Name', 'tag_value': 'Prod.Server1'}]
>>> pprint(sorted(normalize_application_tags(app_original, app_updated), key=lambda d: d['tag_name']))
[{'tag_name': 'Name', 'tag_value': 'Prod.Server1'}]
Test with a custom Tag Name build with variables
>>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[{'tag_name': 'Name', 'tag_value': 'Prod.Server1'}]}}
>>> pprint(sorted(normalize_application_tags(app_original, app_updated), key=lambda d: d['tag_name']))
[{'tag_name': 'Name', 'tag_value': 'Prod.Server1'}]
Test with a custom tag
>>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[]}}
>>> app_updated = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[{'tag_name': 'billing', 'tag_value': 'account1'}]}}
>>> pprint(sorted(normalize_application_tags(app_original, app_updated), key=lambda d: d['tag_name']))
[{'tag_name': 'Name', 'tag_value': 'ec2.prod.webfront.app1'},
{'tag_name': 'billing', 'tag_value': 'account1'}]
Test with a custom tag updated
>>> app_original = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[{'tag_name': 'billing', 'tag_value': 'account1'}]}}
>>> app_updated = {'_id': 1111, 'env': 'prod', 'name': 'app1', 'role': 'webfront', 'modules': [{'name': 'mod1', 'git_repo': 'git@github.com/test/mod1'}, {'name': 'mod2', 'git_repo': 'git@github.com/test/mod2'}], 'environment_infos': {'instance_tags':[{'tag_name': 'billing', 'tag_value': 'account2'}]}}
>>> pprint(sorted(normalize_application_tags(app_original, app_updated), key=lambda d: d['tag_name']))
[{'tag_name': 'Name', 'tag_value': 'ec2.prod.webfront.app1'},
{'tag_name': 'billing', 'tag_value': 'account2'}]
"""
app_tags = []
reserved_ghost_tags = ['app', 'app_id', 'env', 'role', 'color']
default_tag_name_value = "ec2.{env}.{role}.{app}".format(env=app_original['env'], role=app_original['role'],
app=app_original['name'])
custom_tags = (app_updated['environment_infos']['instance_tags']
if 'instance_tags' in app_updated['environment_infos'] else [])
if 'Name' not in [i['tag_name'] for i in custom_tags]:
app_tags.append({'tag_name': 'Name', 'tag_value': default_tag_name_value})
for tag in custom_tags:
if tag['tag_name'] not in reserved_ghost_tags:
app_tags.append({'tag_name': tag['tag_name'], 'tag_value': tag['tag_value']})
return app_tags | 0a3e69f38cc0d5dfbcbec07dc224ecfba4cc02b6 | 35,317 |
def grelha_nr_linhas(g):
"""
grelha_nr_linhas: grelha --> inteiro positivo
grelha_nr_linhas(g) devolve o numero de linhas da grelha g.
"""
return len(g) | ac1b0b730b5bd139dc238d80283a5dfcc7a2dd57 | 35,318 |
import sys
import linecache
def _exception_stack():
"""
Helper function to parse call stack of an exception
Returns
-------
List[Dict]
{'filename': str, 'lineno': int, 'line': str} for each traceback in the current exception
"""
try:
exctype, value, tb = sys.exc_info()
stack = []
tback = tb
while tback is not None:
frame = tback.tb_frame
filename = frame.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, tback.tb_lineno, frame.f_globals)
stack.append({'filename': filename,
'lineno': tback.tb_lineno,
'line': line.strip()})
tback = tback.tb_next
finally:
exctype = value = tb = None
return stack | 133fbbbef3f621ac97c57800d24ac833f99d6ded | 35,319 |
def counter2val(counter):
"""Extract current value of an `itertools.count()` w/o incrementing it."""
return counter.__reduce__()[1][0] | 1c3f2929422974a2a0e38763c9fa20251804bf3d | 35,320 |
def replace_recursive(text, to_match, repl=''):
"""
Works the same as str.replace, but recurses until no matches can be found.
Thus, ``replace_recursive('hello_wooorld', 'oo', '')`` would replace
``wooorld`` with ``woorld`` on the first pass, and ``woorld`` with
``world`` on the second.
Note that ``str.replace`` only performs one pass, so
``'wooorld'.replace('oo', 'o')`` would return ``woorld``.
:param text: the text to operate on.
:param to_match: the text to match.
:param repl: what to replace any matches with.
:return: text, guaranteed to not contain ANY instances of ``to_match``.
"""
while to_match in text:
text = text.replace(to_match, repl)
return text | fcf58b9b5f54b0bc1e152b4a4686012ff6cc7506 | 35,322 |
def find_cwsus(txn, prod):
"""
Provided a database transaction, go look for CWSUs that
overlap the discussion geometry.
ST_Overlaps do the geometries overlap
ST_Covers does polygon exist inside CWSU
"""
wkt = "SRID=4326;%s" % (prod.geometry.wkt,)
txn.execute(
"select distinct id from cwsu WHERE st_overlaps(%s, geom) or "
"st_covers(geom, %s) ORDER by id ASC",
(wkt, wkt),
)
cwsus = []
for row in txn.fetchall():
cwsus.append(row["id"])
return cwsus | 3dbf4a2cb45c31076fcb566e5909d372826a5099 | 35,324 |
def cover(full_range, subsets):
"""Cover range using approximatly as few of subsets as possible."""
final_subset = set()
range_needed = full_range
while range_needed:
best_subset = None
range_covered = set()
for subset, sub_range in subsets.items():
covered = range_needed & sub_range
if len(covered) > len(range_covered):
best_subset = subset
range_covered = covered
range_needed = range_needed - range_covered
final_subset.add(best_subset)
return final_subset | 8df936baab188c09ce005a9266c06b21488159d9 | 35,326 |
def get_sim_time_span(n_interval, step_size):
"""Calculate the time span of the simulation.
:param n_interval: number of intervals
:type n_interval: integer
:step_size: length of one time step in minutes
:type step_size: number
:return: time delta in minutes
:rtype: number
"""
return n_interval * step_size | 3cb0b573ad504593479a853ddd04465e3abd11ea | 35,328 |
def f2f1(f1, f2, *a, **k):
"""
Apply the second function after the first.
Call `f2` on the return value of `f1`.
Args and kwargs apply to `f1`.
Example
-------
>>> f2f1(str, int, 2)
2
"""
return f2(f1(*a, **k)) | f19730807cfdd74c1b2b895ff1a034597988907a | 35,330 |
def getBounds(lvls_arr: list, n_lvl: float):
"""
A helper function to calculate the BN interpolation
@param lvls_arr: The corruption levels list
@param n_lvl: The current level to interpolate
@return: Returns the post and previous corruption levels
"""
lower = lvls_arr[0]
upper = lvls_arr[1]
for i, v in enumerate(lvls_arr[:-1]):
if n_lvl <= v:
break
lower = v
upper = lvls_arr[i + 1]
return lower, upper | a478437834a5774e572015f51a15499fde516a25 | 35,331 |
from datetime import datetime
def find_interval_rates(in_data, patient):
""" Find heart rate in a given time interval
Find the list of heart rate that is recorded after the given time stamp.
Args:
in_data (dict): The dictionary include the information of the patients
id and the time stamp that we want to calculate the average heart rate
since the time stamp
patient (dict): The correct patient we found in the data base
Returns:
list: a list of heart rate value or a empty list
"""
t1 = in_data["heart_rate_average_since"]
timestamp1 = datetime.strptime(t1, '%Y-%m-%d %H:%M:%S')
get_heart_rate_list = []
for heart_rate_data in patient["heart_rate_history"]:
timestamp2 = datetime.strptime(heart_rate_data["timestamp"],
'%Y-%m-%d %H:%M:%S')
if max(timestamp1, timestamp2) == timestamp2:
get_heart_rate_list.append(heart_rate_data["heart_rate"])
return get_heart_rate_list | 2690727830868df5a359801bb9915200010472d1 | 35,332 |
def isw_mul(d):
"""ISW"""
return int(d*(d+1)/2) | 3fec08b9dd256f4283ea10161efe17d9e14c1ab8 | 35,334 |
def income_tax(wage):
"""
:param wage: 到手的月收入
个税速算:全月应纳税所得额(Taxable Income) × 适用税率(Tax Rate) - 速算扣除数(Quick Deduction)
# Ti Tr Qd
-----------------------
1 ~1500 3% 0
2 1500~4500 10% 105
3 4500~9000 20% 555
4 9000~35000 25% 1005
5 35000~55000 30% 2755
6 55000~80000 35% 5505
7 80000~ 45% 13505
"""
quick_deductions = (
(0.00000, 0.03, 0.00000), # 1
(1500.00, 0.10, 105.000), # 2
(4500.00, 0.20, 555.000), # 3
(9000.00, 0.25, 1005.00), # 4
(35000.0, 0.30, 2755.00), # 5
(55000.0, 0.35, 5505.00), # 6
(80000.0, 0.45, 13505.0), # 7
)
threshold = 3500 # 起征点
taxable_income = wage - threshold # 应缴税工资
if taxable_income <= 0:
return 0
level = 6
for index, i in enumerate(quick_deductions):
if taxable_income < i[0]:
level = index - 1
break
return taxable_income * quick_deductions[level][1] - quick_deductions[level][2] | ed18d667bcd0f75a840b62a231f76e227a70a4ba | 35,335 |
def correlate(x, y):
"""Pearson's correlation
"""
# Assume len(x) == len(y)
n = len(x)
sum_x = float(sum(x))
sum_y = float(sum(y))
sum_x_sq = sum(xi*xi for xi in x)
sum_y_sq = sum(yi*yi for yi in y)
psum = sum(xi*yi for xi, yi in zip(x, y))
num = psum - (sum_x * sum_y/n)
den = pow((sum_x_sq - pow(sum_x, 2) / n) * (sum_y_sq - pow(sum_y, 2) / n), 0.5)
if den == 0:
return 0
return num / den | 12d8aeb4698b43b0a51d7131423e273a8c3d8d8d | 35,336 |
def trips_conflict_update():
"""
Returns a tuple (condition, actions) for generating the trips ON CONFLICT UPDATE statement.
"""
condition = "ON CONSTRAINT pk_trips"
actions = {
"trip_duration": "EXCLUDED.trip_duration",
"trip_distance": "EXCLUDED.trip_distance",
"route": "cast(EXCLUDED.route as jsonb)",
"accuracy": "EXCLUDED.accuracy",
"start_time": "EXCLUDED.start_time",
"end_time": "EXCLUDED.end_time",
"parking_verification_url": "EXCLUDED.parking_verification_url",
"standard_cost": "EXCLUDED.standard_cost",
"actual_cost": "EXCLUDED.actual_cost",
"sequence_id": "EXCLUDED.sequence_id"
}
return (condition, actions) | d80de8de81c72fb60adcba1151091c1bc841a985 | 35,338 |
import os
def get_pytest_cmd_values(request):
"""
Get pytest commmand line parameters and convert them to dict
"""
cmd_value_dict = {}
kubeconfig_value = request.config.option.kubeconfig
if kubeconfig_value is None:
if os.path.isfile('config/kubeconfig'):
kubeconfig_value = 'config/kubeconfig'
else:
kubeconfig_value = '~/.kube/config'
clusterconfig_value = request.config.option.clusterconfig
if clusterconfig_value is None:
if os.path.isfile('config/csiscaleoperators.csi.ibm.com_cr.yaml'):
clusterconfig_value = 'config/csiscaleoperators.csi.ibm.com_cr.yaml'
else:
clusterconfig_value = '../../operator/config/samples/csiscaleoperators.csi.ibm.com_cr.yaml'
test_namespace = request.config.option.testnamespace
if test_namespace is None:
test_namespace = 'ibm-spectrum-scale-csi-driver'
operator_namespace = request.config.option.operatornamespace
if operator_namespace is None:
operator_namespace = 'ibm-spectrum-scale-csi-driver'
runslow_val = request.config.option.runslow
operator_file = request.config.option.operatoryaml
if operator_file is None:
operator_file = '../../generated/installer/ibm-spectrum-scale-csi-operator-dev.yaml'
test_config = request.config.option.testconfig
if test_config is None:
test_config = "config/test.config"
cmd_value_dict = {"kubeconfig_value": kubeconfig_value,
"clusterconfig_value":clusterconfig_value,
"test_namespace": test_namespace,
"operator_namespace":operator_namespace,
"runslow_val":runslow_val,
"operator_file":operator_file,
"test_config":test_config
}
return cmd_value_dict | 7179c232b6bb436d6d67361625818a67c4a1dc39 | 35,339 |
import os
def has_xray_daemon():
"""
X-Ray Daemon isn't integrated with SAM CLI yet
therefore we should selectively trace only if running on Lambda runtime
Return: boolean
"""
return 'AWS_SAM_LOCAL' not in os.environ and 'LAMBDA_TASK_ROOT' in os.environ | 6f7a21048bc9b068c97701df68d02f27031749b0 | 35,340 |
import re
def list_from_file (file_name, separator = '\\s+', convert_to = int):
"""Returns a 2-D list which contains the content of a file, with lines
corresponding to sublists and elements being converted with function
convert_to.
separator is used (as a regexp) as a separator for each element."""
array = []
with open (file_name) as data_file:
for line in data_file:
line = line.strip ()
tokens = re.split (separator, line)
tokens = [convert_to (token) for token in tokens]
array.append (tokens)
return array | 37ada18a3c6e4bfede93d65db0cef359b520767a | 35,341 |
import re
def is_method_name(text):
"""
>>> is_method_name('hello')
False
>>> is_method_name('hello()')
True
>>> is_method_name('Foo::Bar')
False
>>> is_method_name('Foo::Bar#baz')
True
>>> is_method_name('Foo::Bar#baz()')
True
>>> is_method_name('user/repo#14')
False
"""
return bool(re.match(r'''
(?:\w+(?:[.]|::))* # Zero or more C++/Ruby namespaces
\w+
(?:
[(][)] # A standard function
|
[#]\w+(?:[(][)])? # A Ruby Method
)
''', text, re.VERBOSE)) | f495caaf05419c369cc81d9debf576152c153f7a | 35,343 |
def create_answer_mapping(annotations, ans2cat):
"""Returns mapping from question_id to answer.
Only returns those mappings that map to one of the answers in ans2cat.
Args:
annotations: VQA annotations file.
ans2cat: Map from answers to answer categories that we care about.
Returns:
answers: Mapping from question ids to answers.
image_ids: Set of image ids.
"""
answers = {}
image_ids = set()
for q in annotations['annotations']:
question_id = q['question_id']
answer = q['multiple_choice_answer']
if answer in ans2cat:
answers[question_id] = answer
image_ids.add(q['image_id'])
return answers, image_ids | a0ca05f057f635084c8407326b0e843a8fac6fc6 | 35,346 |
def clean_join(separator, iterable):
"""
Filters out iterable to only join non empty items.
"""
return separator.join(filter(None, iterable)) | f52f2a6be0f1ebdd6feb9346ccc89918c833485c | 35,347 |
def default_freq(**indexer) -> str:
"""Return the default frequency."""
freq = "AS-JAN"
if indexer:
group, value = indexer.popitem()
if "DJF" in value:
freq = "AS-DEC"
if group == "month" and sorted(value) != value:
raise NotImplementedError
return freq | 3d72b8b774a2b870e099570fb03e95b7d7cd9842 | 35,348 |
def dH_atoms(at):
"""
Returns the enthalpy corrections of the element.
Parameters:
at (char): Symbol of the element
Returns:
dH_atoms (float): Enthalpy corrections of the element
"""
h = 6.626070040*(10**-34) #6.626070040d-34
Ry = 10973731.568508 #10973731.568508d0
c = 299792458 #299792458d0
N_avo = 6.02214179000000*(10**+23) #6.02214179000000d+23
au2kcm = 2 * Ry * h * c * N_avo / 4184
kcm2au = 1 / au2kcm
# case ('Br') !Phys. Chem. Chem. Phys., 2015, 17, 3584--3598
# case ('I') !JANAF
#case ('K ') !1.4811185d0
# case ('Ca') !1.481118547d0
# case ('Ga') !1.5657266d0
dH_dict_1 = {"H": 1.01,"Li": 1.10,"Be": 0.46,"B": 0.29,"C": 0.25,"N": 1.04,"O": 1.04,"F": 1.05,"Na": 1.54,"Mg": 1.19,"Al": 1.08,"Si": 0.76,"P": 1.28,"S": 1.05,"Cl": 1.10,"K": 1.6926,\
"Ca": 1.3709,"Fe": 1.08,"Ga": 1.3291,"Ge": 1.104,"As": 1.23,"Se": 1.319,"Br": 2.930,"I": 1.58}
if at in dH_dict_1:
dH_atoms = dH_dict_1[at] * kcm2au
return(dH_atoms)
else: # equivalent to case default
with open("Thermochemistry.out", "a") as ther_chem:
ther_chem.write("Error: unknown element type encoutered in dH_atoms: " + str(at)+ " \n") | cd2b951b985b939507ec90c60a83c0a1f8283c7d | 35,349 |
def get_e():
"""有効換気量率の取得
Args:
Returns:
float: eturns: eturn: e 有効換気量率
"""
return 1.0 | 086553b953516f68c82c46f900363c8c7533c9f2 | 35,350 |
def bed_get_chromosome_ids(bed_file):
"""
Read in .bed file, return chromosome IDs (column 1 IDs).
Return dic with chromosome ID -> count mapping.
>>> test_file = "test_data/test6.bed"
>>> bed_get_chromosome_ids(test_file)
{'chr1': 2, 'chr2': 2, 'chr3': 1}
"""
ids_dic = {}
with open(bed_file) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
if chr_id in ids_dic:
ids_dic[chr_id] += 1
else:
ids_dic[chr_id] = 1
f.closed
assert ids_dic, "No chromosome IDs read into dictionary (input file \"%s\" empty or malformatted?)" % (bed_file)
return ids_dic | 77400731e30e8a0313c77aaaf726f5d35216a676 | 35,351 |
def source_to_dict(source, info='lines'):
"""
Method to convert a source dict to a dict containing isotope keys and info.
"""
reqs = ('A', 'symbol', info)
if not all(req in source for req in reqs):
raise ValueError('Missing reuqired data in source dict: %s' % ', '.join(req for req in reqs if req not in source))
return dict(('%i_%s_%i' % (source['A'], source['symbol'], i) , l) for i, l in enumerate(source[info])) | 14c567a064196a620ef89589bbe64682224ba3b9 | 35,352 |
def find_word(keyword, sentence, start=0, end=-1, strict=False):
"""
word: str 'abc'
sentence: list ['a', 'b', 'cd']
return: start_index, end_index: 0, 2
"""
if not sentence:
return -1, -1
if end == -1 or end > len(sentence):
end = len(sentence)
if keyword in sentence[start:end]:
return sentence.index(keyword, start, end), sentence.index(keyword, start, end)
elif strict:
return -1, -1
else:
s, e = -1, -1
sentence = sentence[start: end]
idx = ''.join(sentence).find(keyword)
if idx >= 0:
l = -1
for i, word in enumerate(sentence):
word = sentence[i]
l += len(word)
if l >= idx and s < 0:
s = i + start
if l >= idx+len(keyword)-1:
e = i + start
break
return s, e | 4253aced097a3bcbf9ab908e2f51186aadaba83f | 35,353 |
def get_intersections(path1, path2):
"""returns a list of the intersection points between the two paths
Args:
path1: one path (list of tuples with consecutive integer x, y coords)
path2: second path (see above)
Returns:
a list of all overlapping tuples from the two paths
"""
intersects = []
for pt in path1:
if pt in path2 and pt != (0,0):
intersects.append(pt)
return intersects | 91add11e62f898beb3faefa3b73b6bee53f2948d | 35,354 |
import torch
def get_idces_line_linear():
"""
select one row of a fully connected layer
"""
def get_idx(layer):
n_output, n_input = layer.weight.data.shape
idx_row = torch.randint(0, n_output, (1,))
idces_w = torch.cat((torch.ones(n_input,1)*idx_row, torch.arange(0,n_input).reshape(n_input, 1) ),dim=1).long()
idces_b = idx_row
return idces_w, idces_b
return get_idx | 8661c6cd54bcaaff69936c7a3b79927122011b54 | 35,355 |
import random
def generate_contents(length):
"""
generate random contents bytes
"""
print(length)
contents = random.sample(range(100), length)
bytes_data = memoryview(bytearray(contents)).tobytes()
return bytes_data | e830be6208ca4c13a39cc7ec5f9989262c5125d0 | 35,358 |
def get_next_core_id(current_id_in_hex_str):
"""
:param current_id_in_hex_str: a hex string of the maximum core id
assigned without the leading 0x characters
:return: current_id_in_hex_str + 1 in hex string
"""
if not current_id_in_hex_str or current_id_in_hex_str == '':
return '0001'
else:
return format(int(current_id_in_hex_str, 16) + 1, '04x') | b4050630deab5a5f7f2e2cf94365f9bab588d6dd | 35,359 |
def extract_block(tensor,
block_dim_x,
block_dim_y,
block_dim_z,
subsample_factor,
height_jitter=0):
"""Extract train block from stored data block."""
sz = tensor.get_shape().as_list()
assert len(sz) == 4 or len(sz) == 3
sz = sz[0:3]
block_dim_in_tensor_x = block_dim_x * subsample_factor
block_dim_in_tensor_y = block_dim_y * subsample_factor
block_dim_in_tensor_z = block_dim_z * subsample_factor
# y starts from zero since it is the height axis, don't crop there.
offset_x = (sz[0] - block_dim_in_tensor_x) / 2
offset_y = height_jitter
offset_z = (sz[2] - block_dim_in_tensor_z) / 2
block = tensor[offset_x:offset_x + block_dim_in_tensor_x:subsample_factor,
offset_y:offset_y + block_dim_in_tensor_y:subsample_factor,
offset_z:offset_z + block_dim_in_tensor_z:subsample_factor]
return block | fc8e37694e021d65145f7ae9d9b6578c0cc7b891 | 35,360 |
import re
def ValidateAccountId(account_id):
"""Ensures an account id is well structured."""
if len(account_id) > 63:
return False
# This regex is from the protobuffer
return re.match(r'[a-z]([-a-z0-9]*[a-z0-9])', account_id) | ae20a24750498e4ec163d27a3173ab52895d054f | 35,361 |
from typing import Tuple
def mean_grad(x: Tuple[float, float]) -> Tuple[float, float]:
"""A manually calculated gradient of the mean with respect to the inputs."""
return 6 * x[0], 3 * x[1] ** 2 | 459f9b1c0500080acd6097b905a4825c2dd4b80f | 35,362 |
def removeDuplicates(nums):
"""
:type nums: List[int] - sorted
:rtype: List[int]
"""
i = 0
for j in range(len(nums)):
if nums[j] != nums[i]:
i += 1
nums[i] = nums[j]
return nums[0: i + 1] | 764aedcd27c1cc5ba65dd5b3484fd292b7131792 | 35,363 |
def check_uc(*models):
"""Check if the unit cells of two models are approximately the same."""
if any(m.uc is None for m in models):
return all(m.uc is None for m in models)
tolerance = 1e-6
reference_uc = models[0].uc
for m in models:
for vec1, vec2 in zip(reference_uc, m.uc):
for x1, x2 in zip(vec1, vec2):
if abs(x1 - x2) > tolerance:
return False
return True | 170592a3a9fd12f106d6c300d944205fe7bcf7ce | 35,365 |
import re
def process_arguments(raw_arguments: list):
""" Process the arguments from CLI. The value of sys.argv """
arguments = {}
for argument in raw_arguments:
# catch the arguments with associated values
matches = re.search(r"^([a-z][a-z0-9-]+?)=['\"]?(.+?)['\"]?$", argument)
if matches:
arg = matches.group(1).lower()
value = matches.group(2)
arguments.update({
arg: value
})
continue
# catch the simple arguments
matches = re.search(r"^(-[a-z][a-z0-9-]*?)$", argument)
if matches:
arg = matches.group(1)
arguments.update({
arg: True
})
return arguments | 2a8249af500c25b869ec68f7402a0e9130656a91 | 35,366 |
def combineImagePaths(centerImagePath, leftImagePath, rightImagePath, centerMeasurement, leftMeasurement, rightMeasurement):
"""
combines cnter/left/right images and measurements to one list
"""
# combine measurements
measurements = []
measurements.extend(centerMeasurement)
measurements.extend(leftMeasurement)
measurements.extend(rightMeasurement)
# combine image paths
imagePaths = []
imagePaths.extend(centerImagePath)
imagePaths.extend(leftImagePath)
imagePaths.extend(rightImagePath)
return imagePaths, measurements | 3c8e1dbe16bc25d9efcd029c0c28f194f275e5f8 | 35,367 |
def _make_rpm_formatter(fmt=None):
""" function: Returns function that will format output of rpm query command """
if fmt is None:
fmt = [
'"name":"%{NAME}"',
'"epoch":"%{EPOCH}"',
'"version":"%{VERSION}"',
'"release":"%{RELEASE}"',
'"arch":"%{ARCH}"',
'"installtime":"%{INSTALLTIME:date}"',
'"buildtime":"%{BUILDTIME}"',
'"vendor":"%{VENDOR}"',
'"buildhost":"%{BUILDHOST}"',
'"sigpgp":"%{SIGPGP:pgpsig}"'
]
def inner(idx=None):
if idx:
return "\{" + ",".join(fmt[:idx]) + "\}\n"
else:
return "\{" + ",".join(fmt) + "\}\n"
return inner | c277d3716066c0be5c136107913ca5368a667864 | 35,368 |
def filter_rating(pages, min_val=20):
"""Pages with rating above min_val."""
return [p for p in pages if p.rating > min_val] | 272b9c307de137b9b396a125dfac2900eb535080 | 35,370 |
import os
def get_pycsw_root_path(process_environment, request_environment=None,
root_path_key="PYCSW_ROOT"):
"""Get pycsw's root path.
The root path will be searched in the ``process_environment`` first, then
in the ``request_environment``. If it cannot be found then it is determined
based on the location on disk.
Parameters
----------
process_environment: dict
A mapping with the process environment.
request_environment: dict, optional
A mapping with the request environment. Typically the WSGI's
environment
root_path_key: str
Name of the key in both the ``process_environment`` and the
``request_environment`` parameters that specifies the path to pycsw's
root path.
Returns
-------
str
Path to pycsw's root path, as read from the supplied configuration.
"""
req_env = (
dict(request_environment) if request_environment is not None else {})
app_root = process_environment.get(
root_path_key,
req_env.get(
root_path_key,
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
)
return app_root | 508d1e1864dbc79cc67cdb5843ad8dbb63e97129 | 35,371 |
import argparse
import sys
def parse_arguments():
"""
Parse the command line arguments of the program.
"""
parser = argparse.ArgumentParser(description='Generate word embedding from a corpus.')
parser.add_argument(
"output_file",
type=str,
nargs="?",
help="The file where the resulting embedding will be saved",
default="data.npy"
)
parser.add_argument(
"-i",
"--input_file",
type=str,
nargs="?",
help="The file containing a corpus",
)
parser.add_argument(
"output_dict",
type=str,
nargs="?",
help="The file where the dict will be saved",
default="dict.txt"
)
parser.add_argument(
"-vs",
"--vocabulary_size",
type=int,
nargs="?",
help="How many words will be \"known\" to the model",
default=50000
)
parser.add_argument(
"-es",
"--embedding_size",
type=int,
nargs="?",
help="How many features will be used to describe a word (vector length)",
default=128
)
parser.add_argument(
"-bs",
"--batch_size",
type=int,
nargs="?",
help="Size of a batch",
default=128
)
parser.add_argument(
"-sw",
"--skip_window",
type=int,
nargs="?",
help="How many numbers to consider left and right",
default=1
)
parser.add_argument(
"-ns",
"--num_skips",
type=int,
nargs="?",
help="How many times to reuse an input to generate a label",
default=2
)
parser.add_argument(
"-nsa",
"--num_sampled",
type=int,
nargs="?",
help="Number of negative examples to sample",
default=64
)
parser.add_argument(
"-pd",
"--precomputed_data_files",
nargs="+",
help="A list of .npy files containing the dataset (if it was precomputed)"
)
parser.add_argument(
"-ld",
"--log_dir",
type=str,
nargs="?",
help="The folder were the training loss will be recorded. Will be created if not pre-existing"
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args() | e7b77216dc2b95d0ef47b3ec41332a520e20ff72 | 35,372 |
import shutil
def check_valid_shell_command(cmd):
"""
Determine if a shell command returns a 0 error code.
Args:
cmd (string or list): Shell command. String of one command or list with arguments.
Returns:
bool
"""
if isinstance(cmd, list):
return shutil.which(cmd[0])
else:
return shutil.which(cmd) | 97d2ac7a24d15217481454fdd0a2c2b7ef11fa70 | 35,373 |
def break_inside(keyword):
"""``break-inside`` property validation."""
return keyword in ('auto', 'avoid', 'avoid-page', 'avoid-column') | 2ffc7befaee117b03669a975eb6a3964b0109d21 | 35,374 |
from datetime import datetime
def get_fiscal_year(date_val: datetime = datetime.now()) -> int:
"""Return fiscal year for the date."""
fiscal_year: int = date_val.year
if date_val.month > 3: # Up to March 31, use the current year.
fiscal_year = fiscal_year + 1
return fiscal_year | 41fbb0084ac6e9e6b02390d0fdaa55f347f0b37b | 35,375 |
def exchangeCols(M, c1, c2):
"""Intercambia las columnas c1 y c2 de M"""
for k in range(len(M)):
M[k][c1] , M[k][c2] = M[k][c2], M[k][c1]
return M | 86ffc87e23e8c9086550e2f930b538afb3ebb9f3 | 35,376 |
import os
def greatest_name(dirpath,
startswith,
endswith,
max_label=None,
dir_wanted=False):
"""
Return greatest filename (or dirname) meeting given specs.
Return the filename (or, optionally, directory name) in the given directory
that begins and ends with strings startswith and endswith, respectively.
If there ts more than one such file, return the greatest (lexicographically)
such filename. Raise an error if there are no such files.
The portion between the prefix startswith and the suffix endswith is called
the version label in the documentation.
If max_label is given, only files or directories with a version label
at most the given max_label will be considered.
If switch "dir_wanted" is True, then return greatest directory name, not filename.
Example: greatest_name(".", "foo", ".csv", max_label="-11-10")
will return "foo-11-08.csv" from a directory containing files:
"foo-11-13.csv"
"foo-11-08.csv"
"foo-11-07.csv" , and
"zeb-12-12.csv" .
"""
if max_label == None:
max_filename = None
else:
max_filename = os.path.join(dirpath, startswith, max_label, endswith)
selected_filename = ""
for filename in os.listdir(dirpath):
full_filename = os.path.join(dirpath,filename)
if (dir_wanted == False and os.path.isfile(full_filename) or \
dir_wanted == True and not os.path.isfile(full_filename)) and \
filename.startswith(startswith) and \
filename.endswith(endswith) and \
filename > selected_filename and \
(max_filename == None or filename <= max_filename):
selected_filename = filename
if selected_filename == "":
if dir_wanted == False:
raise FileNotFoundError(("No files in `{}` have a name starting with `{}`"
"and ending with `{}`.")
.format(dirpath, startswith, endswith))
else:
raise FileNotFoundError(("No directories in `{}` have a name starting with `{}`"
"and ending with `{}`.")
.format(dirpath, startswith, endswith))
return selected_filename | 918a3d9d0941831f5a5e67daf89dda76e1189fc7 | 35,380 |
def _file_read(filename):
"""Read file and return text"""
# Open and read file to get text.
with open(filename, 'r') as f:
text = f.read()
return text | a5422d174c964f2fe5fd4a1847d4fd1b95431749 | 35,381 |
def GetRateURL(base, symbols, date="latest"):
"""Create the URL needed to access the API.
For a date and chosen currencies."""
url = "http://api.fixer.io/%s?base=%s&symbols=%s" % (date, base, symbols)
return url | a42c2c85505d0b891a1d5781911ce4802a3a41dc | 35,382 |
def unify_cloud_config(cloud_config_preserve, cloud_config):
""" join the cloud config information into cloud_config_preserve.
In case of conflict cloud_config_preserve preserves
None is allowed
"""
if not cloud_config_preserve and not cloud_config:
return None
new_cloud_config = {"key-pairs":[], "users":[]}
# key-pairs
if cloud_config_preserve:
for key in cloud_config_preserve.get("key-pairs", () ):
if key not in new_cloud_config["key-pairs"]:
new_cloud_config["key-pairs"].append(key)
if cloud_config:
for key in cloud_config.get("key-pairs", () ):
if key not in new_cloud_config["key-pairs"]:
new_cloud_config["key-pairs"].append(key)
if not new_cloud_config["key-pairs"]:
del new_cloud_config["key-pairs"]
# users
if cloud_config:
new_cloud_config["users"] += cloud_config.get("users", () )
if cloud_config_preserve:
new_cloud_config["users"] += cloud_config_preserve.get("users", () )
index_to_delete = []
users = new_cloud_config.get("users", [])
for index0 in range(0,len(users)):
if index0 in index_to_delete:
continue
for index1 in range(index0+1,len(users)):
if index1 in index_to_delete:
continue
if users[index0]["name"] == users[index1]["name"]:
index_to_delete.append(index1)
for key in users[index1].get("key-pairs",()):
if "key-pairs" not in users[index0]:
users[index0]["key-pairs"] = [key]
elif key not in users[index0]["key-pairs"]:
users[index0]["key-pairs"].append(key)
index_to_delete.sort(reverse=True)
for index in index_to_delete:
del users[index]
if not new_cloud_config["users"]:
del new_cloud_config["users"]
#boot-data-drive
if cloud_config and cloud_config.get("boot-data-drive") != None:
new_cloud_config["boot-data-drive"] = cloud_config["boot-data-drive"]
if cloud_config_preserve and cloud_config_preserve.get("boot-data-drive") != None:
new_cloud_config["boot-data-drive"] = cloud_config_preserve["boot-data-drive"]
# user-data
new_cloud_config["user-data"] = []
if cloud_config and cloud_config.get("user-data"):
if isinstance(cloud_config["user-data"], list):
new_cloud_config["user-data"] += cloud_config["user-data"]
else:
new_cloud_config["user-data"].append(cloud_config["user-data"])
if cloud_config_preserve and cloud_config_preserve.get("user-data"):
if isinstance(cloud_config_preserve["user-data"], list):
new_cloud_config["user-data"] += cloud_config_preserve["user-data"]
else:
new_cloud_config["user-data"].append(cloud_config_preserve["user-data"])
if not new_cloud_config["user-data"]:
del new_cloud_config["user-data"]
# config files
new_cloud_config["config-files"] = []
if cloud_config and cloud_config.get("config-files") != None:
new_cloud_config["config-files"] += cloud_config["config-files"]
if cloud_config_preserve:
for file in cloud_config_preserve.get("config-files", ()):
for index in range(0, len(new_cloud_config["config-files"])):
if new_cloud_config["config-files"][index]["dest"] == file["dest"]:
new_cloud_config["config-files"][index] = file
break
else:
new_cloud_config["config-files"].append(file)
if not new_cloud_config["config-files"]:
del new_cloud_config["config-files"]
return new_cloud_config | 4e137f4be4b0a38139afe5b4b05c7dd3d0814da5 | 35,383 |
from typing import Any
def get_value_from_args_if_exists(args: list, pos: int, default: Any, *exceptions) -> Any:
"""
Función para obtener un parámetro de un conjunto de argumentos devolviendo un valor por defecto si se producen
las excepciones especificadas
:param args: lista de argumentos a buscar
:param pos: posición del argumento
:param default: valor por defecto a devolver
:param exceptions: excepciones a tratar
:return: valor de la posición correspondiente en la lista de parámetros
"""
try:
return args[pos]
except tuple(exceptions):
return default | 34e8d41cc1c55e74d8f90fb222b22eaf96623fad | 35,384 |
def split_words(text : str) -> list:
""" Breaks up a command input such as 'hello foo bar' into individual words"""
command_text = text.strip()
commands = command_text.split(' ')
return commands | 1bc27d5ec5f4c805eb22b84d9b916d1dca0f9312 | 35,385 |
def response(hey_bob):
"""
Bob is a lackadaisical teenager. In conversation,
his responses are very limited.
Bob answers 'Sure.' if you ask him a question,
such as "How are you?".
He answers 'Whoa, chill out!' if you
YELL AT HIM (in all capitals).
He answers 'Calm down, I know what I'm doing!'
if you yell a question at him.
He says 'Fine. Be that way!' if you address him
without actually saying anything.
He answers 'Whatever.' to anything else.
Bob's conversational partner is a purist
when it comes to written communication
and always follows normal rules regarding sentence
punctuation in English.
:param hey_bob:
:return:
"""
if hey_bob is None or hey_bob.strip() == '':
# He says 'Fine. Be that way!' if you address
# him without actually saying anything.
return 'Fine. Be that way!'
if hey_bob.isupper():
# He answers 'Calm down, I know what I'm doing!'
# if you yell a question at him.
if '?' in hey_bob:
return 'Calm down, I know what I\'m doing!'
# He answers 'Whoa, chill out!' if you
# YELL AT HIM (in all capitals).
return 'Whoa, chill out!'
if '?' == hey_bob.strip()[-1]:
# Bob answers 'Sure.' if you ask him a question,
# such as "How are you?".
return 'Sure.'
# He answers 'Whatever.' to anything else.
return 'Whatever.' | 5e1e58f9a2bd44961a6b569cd52085958b0b7422 | 35,386 |
def rgb_to_hsv(image):
"""
Wrapper function to convert a landsat 4,5 or 5 image from RGB to HSV.
:param image: Landsat Image to convert to the HSV color space
:return: Image containing three bands, representing the hue, saturation and value.
"""
image_hsv = image.select(['B3', 'B2', 'B1']).multiply(0.0001).rgbToHsv()
return image_hsv.copyProperties(image).set('system:time_start', image.get('system:time_start')) | 788551ff5af5237779df41a52f138d75f2a8e3b2 | 35,387 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.