content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def cmp_float(var1, var2):
"""
Compares two floats with potential string inputs. Returns True if var1 >
var2, False if var <= var2.
"""
try:
var1 = float(var1)
except:
var1 = 0.
try:
var2 = float(var2)
except:
var2 = 0.
if var1 > var2:
t = True
else:
t = False
return t | 084c1df1bde14d82036b542a8356bc560339003b | 42,334 |
def unique(list):
"""
Creates a list with unique entries, sorted by their appearance in the list (to make an ordered set)
:param list:
:return:
"""
existing = set()
return [x for x in list if not (x in existing or existing.add(x))] | e2bd8a917f32ec8c74276b9441480a36f3e8e3de | 42,335 |
def chk_int(line):
"""Function: chk_int
Description: Checks to see if the string is an integer.
NOTE: Does not work for floats.
Arguments:
(input) line -> String containing an integer.
(output) True|False -> Whether the string is an integer.
"""
# Remove positive/negative sign if present.
if line[0] in ("-", "+"):
return line[1:].isdigit()
return line.isdigit() | f8a53a3565cebc6fe384fc7ee9883faf37f3f693 | 42,336 |
def load_data(filename, is_lowercase):
"""
Output:
data: alias1(str); alias2(str), neg_alias(list[str])
"""
data = list()
for ln in open(filename, 'r').readlines():
items = ln[:-1].split('\t')
if len(items) == 5:
kb_link, alias1, alias2, neg_alias, _ = items
else:
kb_link, alias1, alias2, neg_alias = items
if len(alias1) <= 1 or len(alias2) <= 1:
continue
if is_lowercase:
alias1 = alias1.lower()
alias2 = alias2.lower()
neg_alias = neg_alias.lower()
neg_alias = neg_alias.split('___')
#if len(neg_alias) < 5:
# continue
neg = neg_alias, list()
data.append((alias1, alias2, neg))
return data | e10a33e035271e29ee2cea1468a36be0653f9c55 | 42,338 |
import argparse
from pathlib import Path
def parse_arguments() -> argparse.Namespace:
"""Parse arguments from CLI."""
parser = argparse.ArgumentParser(description="Enduro Learner")
parser.add_argument("--record", action="store_true")
parser.add_argument("--store_path", type=Path, default="temp/")
parser.add_argument("--trial_name", type=str, default="")
return parser.parse_args() | 8dbab5217964745a063100133888c7c356273b95 | 42,340 |
def split_artifact_filename(s):
"""
split_artifact_filename('artifact::filename') -> ('artifact', 'filename')
Raises ValueError if "::" is not found.
"""
index = s.index("::")
return (s[:index], s[index+2:]) | c45f74095f534bdc7bd341c78d9a91c471f96593 | 42,341 |
import os
def _path_to_module(path):
"""Convert a Python rules file path to an importable module name.
For example, "rules/community/cloudtrail_critical_api_calls.py" becomes
"rules.community.cloudtrail_critical_api_calls"
Raises:
NameError if a '.' appears anywhere in the path except the file extension.
"""
base_name = os.path.splitext(path)[0]
if '.' in base_name:
raise NameError('Python file "{}" cannot be imported '
'because of "." in the name'.format(path))
return base_name.replace('/', '.') | 2b91ef64a53eb040cfc6b0f0e6cbce9d99e97d5c | 42,342 |
def idx2category(idx):
"""
将idx转换为对应类别
"""
num = 24 # 每一类有24张训练图片
index = ['3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'H', 'J', 'K', 'M', 'N', 'O', 'P',
'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
return index[idx // num] | f68cf149186b2ecad57c0ab464dccfb0eb028ce1 | 42,343 |
def whiten(x):
"""Whiten the data
returns (x - mean(x)) / std(x)
Arguments:
x {np.array} -- Input data
Returns:
np.array -- Whitened data
"""
mu = x.mean()
std = x.std()
x = (x-mu)/std
return x | d494beb0a5292969e6263d55cbc5d9b0fe0e2b47 | 42,345 |
def get_decile_from_row_number(x, total_num_rows):
"""
Given (value, row number) in sorted RDD and total number of rows, return (decile, value).
Decile will be in integer interval [1, 10].
Example:
row_number = 219, total_num_rows = 1000 ->
219/1000 = 0.219 * 10 ->
int(2.19) + 1 = 3,
so 219 is in the 3rd decile.
"""
value, row_number = x
sorted_rank = row_number / float(total_num_rows)
decile = int(sorted_rank * 10) + 1
return (decile, value) | 76b8a37b2b7ec744b716bf16f25f357a6fa18948 | 42,347 |
def get_settings_from_conf(path):
"""Return settings as a mapping."""
settings = {}
with open(path) as fp:
exec(fp.read(), {}, settings)
return settings | 6ca1de8bd47205ce61eb3e6f8c57f452c7b03c3a | 42,348 |
def NOT_IN(attribute, val):
"""
Check if the value does not contain the attribute
"""
return attribute not in val | 924266ce149c3eca93ddb14a57170863452a8f35 | 42,349 |
def find_second_largest(root_node):
"""
Time: O(h)
Space: O(1)
h: height of the tree (O(lg n) if the tree is balanced, O(n) otherwise); n: # of nodes
"""
def find_largest(node):
if node is None:
raise ValueError('Tree must have at least 1 node')
while node.right:
node = node.right
return node
if root_node is None or (root_node.left is None and root_node.right is None):
raise ValueError('Tree must have at least 2 nodes')
while root_node:
if root_node.left and not root_node.right:
return find_largest(root_node.left).value
if root_node.right and not root_node.right.left and not root_node.right.right:
return root_node.value
root_node = root_node.right | c1e54189369544fa43013c495f9248efffea2dd8 | 42,350 |
def date_clean(date, dashboard_style=False):
"""
Clean the numerical date value in order to present it.
Args:
boo: numerical date (20160205)
Returns:
Stringified version of the input date ("2016-02-05")
"""
if dashboard_style:
dt = str(date)
out = dt[4:6] + '/' + dt[6:] + '/' + dt[:4]
else:
dt = str(date)
out = dt[:4] + '-' + dt[4:6] + '-' + dt[6:]
return out | c1b65619322c93fad51623baced1794bef5991f2 | 42,352 |
import re
def c(regex):
"""A convenience alias for anchoring and compiling *regex*."""
return re.compile(r'^{}$'.format(regex)) | 403a79cd5c253438d7e02d16bf0c3f4f96a72e8e | 42,354 |
def str_list(data_in, mode=0):
"""
mode 0: splits an ascii string and adds elements to a list.
mode 1: converts list elements to ascii characters and joins them.
"""
if mode == 0:
data_out = []
for i in range(len(data_in)):
data_out.append(ord(data_in[i])) # chr and ord functions for ascii conversions
return data_out
data_out = ''
for i in range(16):
data_out += chr(data_in[i])
return data_out | fc9fbe3e6ffda45e5cf8fe4273ccc42dd088237e | 42,356 |
def rechunk(ner_output):
"""regroupe les entités par type si elles sont consécutives.
[('Jean', 'I-PERS'), ('Duvieusart', 'I-PERS')] devient ainsi
[('Jean Duvieusart', 'I-PERS')]"""
chunked, pos, prev_tag = [], "", None
for i, word_pos in enumerate(ner_output):
word, pos = word_pos
if pos in ['I-PERS', 'I-LIEU', 'I-ORG'] and pos == prev_tag:
chunked[-1] += word_pos
else:
chunked.append(word_pos)
prev_tag = pos
clean_chunked = [tuple([" ".join(wordpos[::2]), wordpos[-1]])
if len(wordpos) != 2 else wordpos for wordpos in chunked]
return clean_chunked | 7f2b965b176031885437d29afc285434027108af | 42,357 |
def nested_dict_sum(vals, x = 0.0):
"""recursively sums all values in nested dictionaries \n
use x to give initial value"""
if isinstance(vals, dict):
try:
x += sum(vals.values())
except:
for dct in vals.values():
x += nested_dict_sum(dct)
elif isinstance(vals, float) or isinstance(vals, int):
x += vals
elif hasattr(vals, '__iter__'):
for i in vals:
x += nested_dict_sum(i)
else:
print("issue with nested_dict_sum dict is not as expect ")
return x | 42a7112e3fefd1cd25ed56e86e92b6838182ad51 | 42,358 |
import torch
def map_x_to_u(data, batch_info):
""" map the node features to the right row of the initial local context."""
x = data.x
u = x.new_zeros((data.num_nodes, batch_info['n_colors']))
u.scatter_(1, data.coloring, 1)
u = u[..., None]
u_x = u.new_zeros((u.shape[0], u.shape[1], x.shape[1]))
n_features = x.shape[1]
coloring = batch_info['coloring'] # N x 1
expanded_colors = coloring[..., None].expand(-1, -1, n_features)
u_x = u_x.scatter_(dim=1, index=expanded_colors, src=x[:, None, :])
u = torch.cat((u, u_x), dim=2)
return u | 9b6fb161298b8c4db105348ee4463b8c9ce426a4 | 42,359 |
import datetime
def Business_Day(Date):
"""Function to Work Within Business Days Only Mon-Fri"""
Weekday = Date.weekday()
if Weekday == 5:
Date += datetime.timedelta(days=2)
elif Weekday == 6:
Date += datetime.timedelta(days=1)
return Date | 2016406b7122f253aa93bb1dfc9831e9a9a8d2d3 | 42,360 |
def filterDups(coordList):
"""
gets rid of adjacent duplicates in a list
"""
ret = []
for i in range(0, len(coordList)-1):
if coordList[i] == coordList[i+1]:
continue
ret += [coordList[i]]
ret += [coordList[-1]]
return ret | 9710755f5e4c051839c37a627e4d20392806c792 | 42,361 |
def color_nodes(graph_description, node_set, color):
"""
Changes the color of each node in node_set
:param graph_description: A source string of a graphviz digraph
:param node_set: A set of node names
:param color: The color to set the nodes
:return: A graphviz digraph source
"""
for node in node_set:
place = graph_description.find(']', graph_description.find('\t{} ['.format(node)))
graph_description = graph_description[:place] \
+ ' fontcolor=white fillcolor={} style=filled'.format(color) \
+ graph_description[place:]
return graph_description | 3c1bfb7e8cf8901c4611bafa8cd62534bdd1689a | 42,362 |
def one_hand_hash(x, y):
"""
Use hash; more space; Bad solution since hashing is order invariant 'pale' and 'elap' will returned the same
:param x: string
:param y: string
:return:
"""
if x == y:
return True
# Fill dict of chars from the first string
cnt = {}
for ch in x:
if ch not in cnt:
cnt[ch] = 1
else:
cnt[ch] += 1
# Remove existing chars from the second string
for ch in y:
if ch in cnt:
cnt[ch] -= 1
# Check everything is 0 except one key
tmp = 0
for k, v in cnt.items():
if v != 0:
tmp += 1
if tmp > 1:
return False
return True | 671884d9af290128f62f0083670bdb1007a866d7 | 42,363 |
import argparse
def parse_args() -> argparse.Namespace:
"""
Create the parser and parse the arguments.
:returns parser: argparse.ArgumentParser()
Argument parser
"""
# parse command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--study_accession', "-s", type=str,
help='Study accession in ENA')
parser.add_argument('--output_path', "-o", default='.',
help='path where files will be downloaded: can be local or s3')
parser.add_argument('--threads', "-t", default=1, type=int,
help='Number of processes to simultaneously spam')
parser.add_argument('--database', "-db", default='sra', type=str,
choices=['sra','ena'],help='Which database to use: sra or ena')
parser.add_argument('--allowed', "-a", type=str,
help='Full path to plain text file with line separated list of files that should be transferred.')
parser.add_argument("--respect-filenames", "-r", default=False, action='store_true', dest='respect_filename',
help="Try to correct filenames based on cross-database metadata")
return parser.parse_args() | f371b05fd53d8e594c7f286ec04e8b7d34dba5a4 | 42,364 |
def x_spread(request):
"""
Ratio of :math:`\\mu` to :math:`\\sigma` of gamma distribution.
The spread uniquely determines the shape since
.. math::
\\frac{\\mu}{\\sigma} = \\frac{\\theta k}{\\theta \\sqrt{k}} = \\frac{1}{\\sqrt{k}}
"""
return request.param | 691f9c2399f1662033bea3a2aa81e758e2023756 | 42,365 |
def _fni_input_validation(dist, tol, max_intervals):
"""
Input validation and standardization for _fast_numerical_inverse.
"""
has_pdf = hasattr(dist, 'pdf') and callable(dist.pdf)
has_cdf = hasattr(dist, 'cdf') and callable(dist.cdf)
has_ppf = hasattr(dist, 'ppf') and callable(dist.ppf)
has_isf = hasattr(dist, 'isf') and callable(dist.isf)
if not (has_pdf and has_cdf and has_ppf):
raise ValueError("`dist` must have methods `pdf`, `cdf`, and `ppf`.")
if not has_isf:
def isf(x):
return 1 - dist.ppf(x)
dist.isf = isf
tol = float(tol) # if there's an exception, raise it now
if int(max_intervals) != max_intervals or max_intervals <= 1:
raise ValueError("`max_intervals' must be an integer greater than 1.")
return dist, tol, max_intervals | f07c6f2708058930ae73ee867b75fa7992ab1937 | 42,366 |
import secrets
def generate_uid():
"""Generate a unique id in the form of an 8-char string. The value is
used to uniquely identify the record of one user. Assuming a user
who has been creating 100 records a day, for 20 years (about 1M records),
the chance of a collision for a new record is about 1 in 50 milion.
"""
n = 8
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
# with len(chars) 52 => 52**8 => 53459728531456 possibilities
return "".join([secrets.choice(chars) for i in range(n)]) | 6f7ba4d5b8bf2dc181375ce870560f8a741dc3c6 | 42,367 |
def bearing_to_cartesian(heading):
"""
Bearing (heading from North) to cartesian orientation CCW from east
:param heading: CW from North, in degrees
:type heading: float
:returns: Cartesian direction, CCW from East, in degrees
:rtype: float
"""
return 90 - heading; | d583be85d4c7e529e31206c12752310aadce3a3c | 42,369 |
def actual_quadralinear_interp(field,x0,y0,z0,t0,
x_index,y_index,z_index,t_index,
x,y,z,t):
"""!This is a numba accelerated quadralinear interpolation. The @numba.jit decorator just above this function causes it to be compiled just before it is run. This introduces a small, Order(1 second), overhead the first time, but not on subsequent
calls.
"""
field_interp = ((field[0,0,0,0]*
(x[x_index] - x0)*(y[y_index] - y0)*(z[z_index] - z0)*(t[t_index] - t0) +
field[0,1,0,0]*
(x[x_index] - x0)*(y[y_index] - y0)*(z0 - z[z_index-1])*(t[t_index] - t0) +
field[0,1,1,0]*
(x[x_index] - x0)*(y0 - y[y_index-1])*(z0 - z[z_index-1])*(t[t_index] - t0) +
field[0,0,1,0]*
(x[x_index] - x0)*(y0 - y[y_index-1])*(z[z_index] - z0)*(t[t_index] - t0) +
field[0,0,0,1]*
(x0 - x[x_index-1])*(y[y_index] - y0)*(z[z_index] - z0)*(t[t_index] - t0) +
field[0,1,0,1]*
(x0 - x[x_index-1])*(y[y_index] - y0)*(z0 - z[z_index-1])*(t[t_index] - t0) +
field[0,1,1,1]*
(x0 - x[x_index-1])*(y0 - y[y_index-1])*(z0 - z[z_index-1])*(t[t_index] - t0) +
field[0,0,1,1]*
(x0 - x[x_index-1])*(y0 - y[y_index-1])*(z[z_index] - z0)*(t[t_index] - t0) +
field[1,0,0,0]*
(x[x_index] - x0)*(y[y_index] - y0)*(z[z_index] - z0)*(t0 - t[t_index-1]) +
field[1,1,0,0]*
(x[x_index] - x0)*(y[y_index] - y0)*(z0 - z[z_index-1])*(t0 - t[t_index-1]) +
field[1,1,1,0]*
(x[x_index] - x0)*(y0 - y[y_index-1])*(z0 - z[z_index-1])*(t0 - t[t_index-1]) +
field[1,0,1,0]*
(x[x_index] - x0)*(y0 - y[y_index-1])*(z[z_index] - z0)*(t0 - t[t_index-1]) +
field[1,0,0,1]*
(x0 - x[x_index-1])*(y[y_index] - y0)*(z[z_index] - z0)*(t0 - t[t_index-1]) +
field[1,1,0,1]*
(x0 - x[x_index-1])*(y[y_index] - y0)*(z0 - z[z_index-1])*(t0 - t[t_index-1]) +
field[1,1,1,1]*
(x0 - x[x_index-1])*(y0 - y[y_index-1])*(z0 - z[z_index-1])*(t0 - t[t_index-1]) +
field[1,0,1,1]*
(x0 - x[x_index-1])*(y0 - y[y_index-1])*(z[z_index] - z0)*(t0 - t[t_index-1]))/
((t[t_index] - t[t_index-1])*(z[z_index] - z[z_index-1])*
(y[y_index] - y[y_index-1])*(x[x_index] - x[x_index-1])))
return field_interp | 69e9cdf32d25cd9a9fa3546b3325b8f81bf630da | 42,372 |
def calculate_monthly_interest(total_balance: float, monthly_interest: float) -> float:
"""
Calcualtes the amount of interest added to the balance each months
:param total_balance: Total debt left
:type total_balance: float
:param monthly_interest: Monthly interest in %
:type monthly_interest: float
"""
return total_balance * monthly_interest / 100 | 3f67a43c3f08aff8614d19029666d37a5875c78c | 42,373 |
def check_is_fid_valid(fid, raise_exception=True):
"""
Check if FID is well formed
:param fid: functional ID
:param raise_exception: Indicate if an exception shall be raised (True, default) or not (False)
:type fid: str
:type raise_exception: bool
:returns: the status of the check
:rtype: bool
:raises TypeError: if FID is invalid
:raises ValueError: if FID is not well formatted
"""
if fid is None:
if raise_exception:
raise ValueError("FID shall be set")
return False
if not isinstance(fid, str):
if raise_exception:
raise TypeError("Type of fid: '%s' shall be str, not %s" % (fid, type(fid)))
return False
if len(fid) < 3:
if raise_exception:
raise ValueError("fid shall have at least 3 characters: '%s'" % fid)
return False
if " " in fid:
if raise_exception:
raise ValueError("fid shall not contains spaces: '%s'" % fid)
return False
return True | b629d0ae76dee292615d7786a6ce95d2f2391d87 | 42,374 |
def timeleft(i, num, elapsed):
"""Returns the time left, in secs, of a given operation.
Useful for loops, where `i` is the current iteration,
`num` is the total number of iterations, and `elapsed`
is the time elapsed since starting.
"""
try:
rate = i/float(elapsed)
left = (num-i)/rate
except ZeroDivisionError: return 0
return left | f65f07f8f9711da58be0217d6e66082e0e66732e | 42,375 |
def _plus(arg1, arg2):
"""int plus"""
return str(int(arg1) + int(arg2)) | 15f8d82736b90d5aa7ac94fecca0616651f54d20 | 42,376 |
def base64_add_padding(data):
"""
Add enough padding for base64 encoding such that length is a multiple of 4
Args:
data: unpadded string or bytes
Return:
bytes: The padded bytes
"""
if isinstance(data, str):
data = data.encode('utf-8')
missing_padding = 4 - len(data) % 4
if missing_padding:
data += b'=' * missing_padding
return data | 1093bead6d06f98f8821da80265aa73f892aff62 | 42,378 |
def get_minification_delta(source_text, minified_text):
"""Computes how much the code size has been reduced after minification"""
orig_size = len(source_text)
mini_size = len(minified_text)
delta = orig_size - mini_size
return delta | ca837b03a38c9b9a185bab86d2a895f5a1d56bcc | 42,380 |
import torch
def get_reconstruction_loss(x: torch.Tensor, z: torch.Tensor, decoder: torch.nn.Module) -> torch.Tensor:
"""
Calculate the reconstruction of the VAE
Args:
- x: original data
- m, s: mean and diagonal std of encoding data
- decoder
"""
x_reconstruct = decoder.forward(z=z) # (N, C, H, W)
c_bernoulli = torch.distributions.continuous_bernoulli.ContinuousBernoulli(logits=x_reconstruct)
reconstruction_loss = - c_bernoulli.log_prob(value=x) # (N, C, H, W)
return torch.mean(input=reconstruction_loss) | 44c0bb3530086d2ae4b99ce4b6efba0ae7d1cfc5 | 42,381 |
import re
def sort_human(l):
"""Sort a list of strings by numerical."""
def convert(text): return float(text) if text.isdigit() else text
def alphanum(key): return [convert(c)
for c in re.split('([-+]?[0-9]*\.?[0-9]*)', key)]
l.sort(key=alphanum)
return l | ae1ede3d3af89b4646d272daca24c47f2eb1663d | 42,382 |
import logging
import copy
def vector2profile(vector, exprofile):
""" convert vector to profile """
""" take exprofile to begin with """
""" a vector contains only T values, """
""" lnq bottom lnq vales , Tsurf, lnq surf and Tskin"""
""" temperature, and ln q are stored from top of atmosphere to ground """
nlevels = vector.shape[0] - 29 - 3
if nlevels != exprofile['NLEVELS']:
logging.error("vector and profile are not of the same dimension")
return None
profile = copy.deepcopy(exprofile)
for i in range(nlevels):
profile["T"][i] = vector[i]
for i in range(29):
profile["Q"][nlevels - 29 + i] = vector[nlevels + i]
profile["S2M"]["T"] = vector[nlevels + 29]
profile["S2M"]["Q"] = vector[nlevels + 29 + 1]
profile["SKIN"]["T"] = vector[nlevels + 29 + 2]
return profile | c38e17f21b1fc1d1e5a2ddc21c50e1dc36a558e6 | 42,384 |
from typing import Any
def can_be_parsed_as_bool(value: Any) -> bool:
"""Checks whether a value can be parsed as a boolean.
can_be_parsed_as_bool(True) -> True
can_be_parsed_as_bool("true") -> True
can_be_parsed_as_bool("false") -> True
can_be_parsed_as_bool("TRUE") -> True
can_be_parsed_as_bool(0) -> False
Args:
value: The value to be parsed.
Returns:
True if the value can be parsed as a boolean. False otherwise.
"""
if isinstance(value, bool):
return True
if not isinstance(value, str):
return False
value = value.lower().strip()
return value == "true" or value == "false" | 5427ac07fc84c61da4631b54b4bf14d6f767c345 | 42,385 |
def dedent_initial(s, n=4):
# type: (str, int) -> str
"""Remove identation from first line of text."""
return s[n:] if s[:n] == ' ' * n else s | cf09c9aa846b2aeb3278401f921d00912b90bb1f | 42,386 |
def _is_item_allowed(resource, item, resourcesalloweddict, resourcesuseddict):
"""
<Purpose>
Check if the process can acquire a non-fungible, non-renewable resource.
<Arguments>
resource:
A string with the resource name.
item:
A unique identifier that specifies the resource. It has some
meaning to the caller (like a port number for TCP or UDP), but is
opaque to the nanny.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
True or False
"""
if item in resourcesalloweddict[resource]:
# this is semi nonsensical, but allows us to indicate which ports are used
# through get_resource_information()
resourcesuseddict[resource].add(item)
return True
else:
return False | a098f6376576446156ea25c583520fdacbd7d2ed | 42,388 |
def dataset_constructor_kwargs(root_path, **kwargs):
"""Return key-value arguments for dataset constructor. In order to add or
change arguments pass arguments to this function. The parameter root_path
must be always specified.
Args:
root_path: The root_path for the Dataset.
kwargs: Arguments to update the defaults.
Example use:
# Set "version" to 2, add new parameter "new_par" to "new_value" and
# remove "firmware_url":
result = dataset_constructor_kwargs(root_path=root_path,
version=2,
new_par="new_value")
del result["firmware_url"]
"""
result = {
"root_path": root_path,
"shortname": "shortname",
"architecture": "architecture",
"implementation": "implementation",
"algorithm": "algorithm",
"version": 1,
"paper_url": "http://paper.url",
"firmware_url": "http://firmware.url",
"licence": "CC BY 4.0",
"description": "description",
"url": "http://download.url",
"firmware_sha256": "abc123",
"examples_per_shard": 1,
"measurements_info": {
"trace1": {
"type": "power",
"len": 1024,
}
},
"attack_points_info": {
"key": {
"len": 16,
"max_val": 256
},
}
}
result.update(kwargs)
return result | 250668e917b373dbd09281e3b9c081894f97010b | 42,389 |
def add_zeros(X, Y):
""" add zeros at the beginning and end to prevent interpolation in join_data
from stacking to much up """
for x in X:
x.insert(0, x[0] - 1)
x.append(x[-1] + 1)
for data in Y:
for d in data:
d.insert(0, 0.)
d.append(0.)
return X, Y | 83ec0eba3f143a56c9bc23c69130c69130b5a63e | 42,390 |
import uuid
def generate_uuid(model=None):
"""新しくUUIDを生成する.
:param Any model: 渡されたら、このmodelの中で重複がないかチェックする
"""
while True:
new = uuid.uuid4()
if model:
check = model.query.filter_by(uuid=new).limit(1).all()
if check:
continue
break
return new | 14154ddf556c0e1077c99f1ba62bcbc1da89a38d | 42,391 |
def checkNomBall(bam, prefs):
""" returns TRUE if the times have been calculated
"""
for stn in bam.stn_list:
# Check Ballistic
if not (len(stn.times.ballistic) >= 1 and prefs.ballistic_en):
return False
return True | 313298ec1dbdb4a8ae32c26befb6f0d8c9808ed1 | 42,392 |
import os
import json
def parse_label_arg(s):
""" If s is a file load it as JSON, otherwise parse s as JSON."""
if os.path.exists(s):
fp = open(s, 'r')
return json.load(fp)
else:
return json.loads(s) | a95f733f40404d1d5a7e5f69d15c440358d33cfb | 42,393 |
def normalize_data(vms, vm_statuses, nics, public_ips):
"""
Normalize the data from a series of Azure cloud API calls into
a Python dict object containing very specific portions of the original data.
dict = { '<instance_name>': { 'public_ip': '<public_ip>', 'public_dns_name': '<public_dns_name>',
'status': '<Up|Down>', 'source': 'Azure' }
}
"""
normalized_data = {}
for vm_id in vms:
vm_data = vms[vm_id]
name = vm_data['name']
nic_id = vm_data['nic_id']
nic_data = nics[nic_id]
public_ip_id = nic_data['public_ip_id']
public_ip_data = public_ips[public_ip_id]
public_ip = public_ip_data['address']
public_dns_name = public_ip_data['fqdn']
status = vm_statuses[vm_id]
source = "Azure"
instance_data = { 'public_ip': public_ip, 'public_dns_name': public_dns_name, 'status': status, 'source': source }
normalized_data[name] = instance_data
return normalized_data | 58067d8cd44b2e20063e2e642f5e84951e62ea47 | 42,394 |
def preprocess_target(dframe):
"""Function to preprocess target feature."""
specs = {
'M': 0, 'B': 1
}
dframe['diagnosis'] = dframe['diagnosis'].map(specs)
dfr = dframe['diagnosis']
return dfr | ef9823453e8d590b8f756511e95aac7e78f9ea4d | 42,395 |
def form_tweets(sentences):
"""Create a tweet, or multiple tweets if the tweets are too long, out of the sentences."""
tweet = ""
tweets = []
while sentences:
if len(tweet) + len(sentences[0]) > 139:
tweets.append(tweet)
tweet = "(cont'd.):"
else:
if len(tweet) != 0:
tweet += "\n"
tweet += sentences.pop(0)
tweets.append(tweet)
return tweets | 21c0aa8e069ce86f7b219e10d1e16c206eaeabe4 | 42,399 |
def _attr_files(ctx, name):
"""Returns the list of files for the current target's attribute.
This is a convenience function since the aspect context does not expose the
same convenience `file`/`files` fields used in rule contexts.
Args:
ctx: The Skylark context.
name: The name of the attribute.
Returns:
A list of Files.
"""
return [f for t in getattr(ctx.rule.attr, name) for f in t.files] | 038151d7aa91906f33ce7afb05af62f71376ec37 | 42,400 |
def task_color(task_object, show_green=False):
""" Return css class depending on Task execution status and execution outcome.
By default, green is not returned for executed and successful tasks;
show_green argument should be True to get green color.
"""
if not task_object.task_executed:
return 'class=muted'
elif task_object.outcome == task_object.OUTCOME_FAILED:
return 'class=error'
elif task_object.outcome == task_object.OUTCOME_SUCCESS and show_green:
return 'class=green'
else:
return '' | 3e3997128a006d3fdef5ec5e540e9c32926175a3 | 42,401 |
def get_scale_factor(scale, max_size, img_h, img_w):
"""
:param scale: min size during test
:param max_size: max size during test
:param img_h: orig height of img
:param img_w: orig width
:return: scale factor for resizing
"""
short = min(img_w, img_h)
large = max(img_w, img_h)
if short <= 0:
scale_factor = 1.0
return scale_factor
if scale <= 0:
scale_factor = 1.0
else:
scale_factor = min(scale / short, max_size / large)
return scale_factor | d23a435aca038ecc57bef14268a255a8404bc68c | 42,402 |
from typing import Iterable
def unique_letters(edges: Iterable) -> str:
"""Return unique letters in collection of edges
Args:
edges (Iterable): Iterable containing edges, i.e. tuples.
The last element in the tuples is assumed to contain the letter.
Returns:
str: String of unique letters
"""
return ''.join(set([edge[-1] for edge in edges])) | b144d0600a72c9b7671e7b356d90560b785fdfa9 | 42,403 |
def _combine_array_of_u8_into_one_value(data, element_index, transfer_size_bytes, size_in_bits):
""" This method is a helper to convert the array read from hardware
and return a single element removing any excessive bits.
First we combine the data into a single number while swapping the
endianness.
Whenever the array is longer than 1 word, the data is left justified, we
need to shift the combined data to the right before doing the
conversion.
For example, if the element had a size in bits of 54, the 54 MSB would
be the data bits. The 10 LSB of the combinedData must be shifted
off in order to not mess up further calculations.
"""
combinedData = 0
if transfer_size_bytes >= 4:
index = element_index
element_end = element_index + transfer_size_bytes
while index < element_end:
combinedData = (combinedData << 8) + data[index + 3]
combinedData = (combinedData << 8) + data[index + 2]
combinedData = (combinedData << 8) + data[index + 1]
combinedData = (combinedData << 8) + data[index]
index += 4
elif transfer_size_bytes == 2:
combinedData = data[element_index + 1]
combinedData = (combinedData << 8) + data[element_index]
else:
combinedData = data[element_index]
if transfer_size_bytes > 4:
combinedData = combinedData >> (8 * transfer_size_bytes - size_in_bits)
return combinedData | 0159b663ced9619abce6385c87ef8caf21c4c65e | 42,404 |
import re
def sass_map_vars(string_sass):
"""Returns a dictionary with all the variables found with their respective values.
:param string_sass: String with the sass code
Example:
>>> from phanterpwa.tools import sass_map_vars
>>> sass_str = '''
... $BG: red
... $FG: green
... .my_class
... background-color: $BG
... color: $FG
... '''
>>> sass_map_vars(sass_str)
{'BG': 'red', 'FG': 'green'}
"""
v = re.compile(r"^[\t ]*?(\$[A-Za-z_][A-Za-z0-9_\-]*\:{1}[ ]?[\w!#$%,\ .\-\(\)]+)\/*.*$", re.MULTILINE)
r = v.findall(string_sass)
d = {}
for x in r:
k, v = x.split(":")
d[k.strip()[1:]] = v.strip()
return d | 38ecb2f1197a05c33b2ef5e2e7226e7ac2030726 | 42,405 |
def is_palindrome1(w):
"""Create slice with negative step and confirm equality with w."""
return w[::-1] == w | ebad030db0a0389d8db558128f35c931b50fc213 | 42,406 |
import random
def random_user_agent():
"""Returns a randomly chosen User Agent for webscraper"""
user_agent_list = [
#Chrome
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
#Firefox
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 6.2; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)'
]
user_agent = random.choice(user_agent_list)
headers = {'User-Agent': user_agent}
return headers | 9b50239ed2b70083bac545eb3892b0bc9b2d815a | 42,407 |
def report_insert_error(error, msg):
"""
Returns text string with cause of the insert error
"""
if error == -3:
return "Lock timeout exceeded"
elif error == -2:
return "Duplicate entry:"+msg
elif error == -1:
return "method 'report_insert' error:"+msg
else:
return "Unknown error:"+error+"; "+msg | b1f55b822c334fdc49757121936e939093945da1 | 42,408 |
def energy_budget_rule(mod, g, h):
"""
**Constraint Name**: GenHydro_Energy_Budget_Constraint
**Enforced Over**: GEN_HYDRO_OPR_HRZS
The sum of hydro energy output within a horizon must match the horizon's
hydro energy budget. The budget is calculated by multiplying the
user-specified average power fraction (i.e. the average capacity factor)
for that horizon with the product of the matching period's installed
capacity (which can be a user input or a decision variable, depending on
the capacity type), the number of hours in that horizon, and any
availability derates if applicable.
WARNING: If there are any availability derates, this means the effective
average power fraction (and associated energy budget) will be lower than
the user-specified input!
Example: The average power fraction is 50% of the installed capacity in
horizon 1, which represents a winter week. If the installed capacity
during the period of interest is 1,000 MW, there are 168 hours in
the horizon (1 week), and the unit is fully available, the hydro budget
for this horizon is 0.5 * 1,000 MW * 168 h = 84,000 MWh.
If the unit were unavailable for half of the timepoints in that horizon,
the budget would be half, i.e. 42,000 MWh, even though the average power
fraction is the same!
"""
return sum(mod.GenHydro_Gross_Power_MW[g, tmp]
* mod.hrs_in_tmp[tmp]
for tmp in mod.TMPS_BY_BLN_TYPE_HRZ[
mod.balancing_type_project[g], h]
) \
== \
sum(mod.gen_hydro_average_power_fraction[g, h]
* mod.Capacity_MW[g, mod.period[tmp]]
* mod.Availability_Derate[g, tmp]
* mod.hrs_in_tmp[tmp]
for tmp in mod.TMPS_BY_BLN_TYPE_HRZ[
mod.balancing_type_project[g], h]) | b2e2b6308efaee4d3f1491dd659c643456c2f194 | 42,409 |
import subprocess
from io import StringIO
def generate_stream(processArgs):
""" Runs a given process and outputs the resulting text as a StringIO
Parameters
----------
processArgs : list
Arguments to run in a process
Returns
-------
StringIO
Resulting text
"""
process = subprocess.Popen(
processArgs,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
stdout, _ = process.communicate()
return StringIO(stdout), process.returncode | d539de6646796d415b8903a9a4cb043d4667d789 | 42,410 |
def strip_whitespace(content):
"""Strip whitespace from an input list.
Take a list as input and strip whitespace from each entry.
Args:
content (list): The input list
Returns:
The input list without whitespaces.
Example:
>>> from base64_rot13_decode import strip_whitespace
>>> list = ['bla\n', 'bla2\n', 'bla3\n']
>>> list
['bla\n', 'bla2\n', 'bla3\n']
>>> strip_whitespace(list)
['bla', 'bla2', 'bla3']
"""
return [x.strip() for x in content] | 94b4fdf8578eb9ad70043cf5e692c7c89570b6ca | 42,413 |
def S(**kwargs):
"""Convenience function for creating dictionaries more cleanly"""
return kwargs | ccda6867be5c01ecf45bcf7bba882b6aaedf511b | 42,415 |
def read_list(filename):
"""Read list from file."""
with open(filename, "r") as fin:
items = fin.readlines()
return [item.strip() for item in items] | 81c2aedbfc5d7a8b2189389ac02c1201f6d35e14 | 42,416 |
import os
def find_in_directory_path(filename, path):
"""
Attempt to locate the given file in the specified directory or any ancestor directory,
up to the filesystem root.
:param filename: name of file to look for
:type filename: str
:param path: starting directory
:type path: str
:return: fully-qualified path to filename or None if not found
:rtype: str or None
"""
path = os.path.abspath(path)
if os.path.isfile(path):
path = os.path.dirname(path)
while True:
app_settings_path = os.path.abspath(os.path.join(path, filename))
if os.path.exists(app_settings_path):
return app_settings_path
path, _ = os.path.split(path) # strip one directory
if not path or path == os.path.sep:
return None | 507ee8b764d51fbaf32e514f11465b8e4d67c227 | 42,417 |
def permutate(es, idxs, keys=None):
"""
>>> permutate((0,1,2,3),(0,2,3,1))
(0, 2, 3, 1)
>>> permutate((2,2,1), (0,2,1))
(1, 1, 2)
>>> permutate((2,3,1,3,3), (3,4,1,0,2))
(1, 0, 4, 0, 0)
"""
assert len(es) == len(idxs)
assert keys is None or len(keys) == len(idxs)
if keys is None: keys = range(len(idxs))
vals = (keys[i] for i in idxs)
M = dict(zip(keys, vals)) # {'a': 'a', 'b': 'c', 'c': 'b'}
return tuple(M[e] for e in es) | 66e1b93b8c199821542106084cf10872ddc1086f | 42,418 |
def find_stop_codon(exons, cds_position):
"""Return the position along the cDNA of the base after the stop codon."""
if cds_position.is_forward_strand:
stop_pos = cds_position.chrom_stop
else:
stop_pos = cds_position.chrom_start
cdna_pos = 0
for exon in exons:
exon_start = exon.tx_position.chrom_start
exon_stop = exon.tx_position.chrom_stop
if exon_start <= stop_pos <= exon_stop:
if cds_position.is_forward_strand:
return cdna_pos + stop_pos - exon_start
else:
return cdna_pos + exon_stop - stop_pos
else:
cdna_pos += exon_stop - exon_start
raise ValueError('Stop codon is not in any of the exons') | f370f711932ad27946a960865264e8dd8b9e74dd | 42,419 |
def find_all(txt, char):
"""Extract all position of a character in a string"""
return tuple(i for i, ltr in enumerate(txt) if ltr in char) | 19e2a771cccb9dc2d11150fa7da8e3d1cf76d6ae | 42,422 |
def parse_flags(flags, flags_dict, show_unknown_flags=True, separator=" "):
"""
Parses an integer representing a set of flags. The known flags are
stored with their bit-mask in a dictionnary. Returns a string.
"""
flags_list = []
mask = 0x01
while mask <= flags:
if flags & mask:
if mask in flags_dict:
flags_list.append(flags_dict[mask])
elif show_unknown_flags:
flags_list.append("???")
mask = mask << 1
return separator.join(flags_list) | 640cb0bd4f80f33dc62be15ca62333b27d008b92 | 42,423 |
def blend_into_next_effect(effects, point, incr, color):
"""Blend together 2 function results.
effects = list of function pointers
point = tuple(x, y, step)
incr = current iteration
color = tuple(r, g, b)
"""
r, g, b = color # unpack tuples
x, y, step = point
r2, g2, b2 = effects[-1](x, y, step)
ratio = (500.00 - incr) / 100.0
r = r * ratio + r2 * (1.0 - ratio)
g = g * ratio + g2 * (1.0 - ratio)
b = b * ratio + b2 * (1.0 - ratio)
return (r, g, b) | eda7336c543ee07f20529c5c5cd312984605b69b | 42,425 |
def get_property(line):
"""return key, value pair by splitting key=value with ="""
# parser = re.compile(r'(.*)=(.*)')
# match_object = parser.match(line)
# if match_object:
# return match_object.group(1),match_object.group(2)
assert line.find('=') != -1
line_list = line.split('=', 1)
return line_list[0], line_list[1] | 9fbe4440021db03b85e7a12391736c2309f8a042 | 42,426 |
import torch
def _handle_one(parts, is_weight):
"""Make it look like a normal LayerNorm"""
n_parts = len(parts)
err_msg = f"Redundant ModelParallelFusedLayerNorm params have been updated."
if is_weight:
init = 1.0
assert not torch.logical_and(parts[0].ne(1), parts[1].ne(1)).any(), err_msg
else:
init = 0.0
assert not torch.logical_and(parts[0].ne(0), parts[1].ne(0)).any(), err_msg
ret_val = torch.cat([p.unsqueeze(-1) for p in parts], dim=1).sum(1) - (
init * (n_parts - 1)
)
return ret_val | b881dc04e48d192ee8c9b7aa02f6c1999c86ca36 | 42,427 |
def solution(S):
"""
This is super easy, no explanation needed.
"""
# use counter to check nesting depth
depth = 0
for char in S:
# increase depth
if char == '(':
depth += 1
# decrease depth
else:
depth -= 1
# negative depth is not allowed
if depth < 0:
return 0
# return whether depth is 0
return int(depth == 0) | 4372113e7f51ae1c2fc88906d43520b2493039c2 | 42,428 |
def drop_duplicate_rows(data_frame, column_name):
"""Drop duplicate rows in given column in pandas data_frame"""
df = data_frame.drop_duplicates(subset=column_name, keep="first")
return df | 9dcdc06cf4f5ef466c6d808e03a92ef09b451994 | 42,429 |
def get_dataset(city, dataset):
"""
Loads the dataset from the data folder
:param city: City selected by the user
:param dataset: Corresponding dataset for the city selected by the user
:return: User selected dataset
"""
if city == 'chicago':
dataset = './data/chicago.csv'
if city == 'new york':
dataset = './data/new_york_city.csv'
if city == 'washington':
dataset = './data/washington.csv'
if city.lower() not in ['chicago', 'new york', 'washington']:
print('Oops! I didn\'t get that. Please input either Chicago, New York, or Washington.')
return dataset | b7c6829f752d0aa10a85fe74386b563679d0848f | 42,430 |
from typing import Any
from typing import Tuple
def extend_attr_to_tuple(
val: Any,
num_elem: int,
) -> Tuple[Any, ...]:
"""
If `val` is not a tuple, then we make a tuple of size `num_elem` by
replicating `val` `num_elem` times.
Args:
val (Any): Value that we want to process.
Returns:
A tuple.
"""
if not isinstance(val, tuple):
val = (val,) * num_elem
return val | d3eee740d8955c49dab9366bb77ea3c923140dd0 | 42,431 |
def illegal_input(row):
"""
:param row: str, user input
:return: boolean
"""
for i in range(len(row)):
if i % 2 == 1:
if row[i] != ' ':
print('Illegal input')
return False
elif len(row) > 7:
print('Illegal input')
return False
else:
if row[i].isalpha is False:
print('Illegal input')
return False
return True | 37dec0dfbfeb2db7d927a814761871088a7bd5ed | 42,433 |
def combine_two(a, b, delimiter='/'):
"""returns an n-nested array of strings a+delimiter+b
a and b (e.g. uuids and object_keys) can be a singlet,
an array, an array of arrays or an array of arrays of arrays ...
example:
>>> a = ['a','b',['c','d']]
>>> b = ['e','f',['g','h']]
>>> combine_two(a, b)
['a/e','b/f',['c/g','d/h']]
"""
if isinstance(a, list):
if not isinstance(b, list):
raise Exception("can't combine list and non-list")
if len(a) != len(b):
raise Exception("Can't combine lists of different lengths")
return [combine_two(a_, b_) for a_, b_, in zip(a, b)]
else:
return(str(a) + delimiter + str(b)) | c31e508ec742a58662116d0722bd98d4b1a7b171 | 42,434 |
def newman_conway(num):
""" Returns a list of the Newman Conway numbers for the given value.
Time Complexity: O(n)
Space Complexity: O(n)
"""
if num < 1:
raise ValueError("Input must be an integer larger than 0")
if num == 1:
return "1"
seq = [1, 1]
i = 3
while i <= num:
n = seq[seq[i - 2] - 1] + seq[i - seq[i - 2] - 1]
seq.append(n)
i += 1
printed_seq = ""
for n in seq:
printed_seq += str(n) + " "
return printed_seq[0:-1] | 6d4427cfa6d662fdc9be1f376c827ee2969d4d4e | 42,435 |
def cpe(*args):
"""Concatenate values as strings using ':', replace None with '*'
:param args: sequence of values
:return: string in CPE format
"""
return ":".join(map(lambda x: "*" if x is None else str(x), args)) | bf15b0d684b77e40d069b4be7b97175511d2e830 | 42,436 |
import re
def text_parse_en(big_str_en):
"""
英文单词分词
"""
token_list = re.split(r'\W+', big_str_en)
if len(token_list) == 0:
print(token_list)
return [tok.lower() for tok in token_list if len(tok) > 2] | 4887a3403898080cd2eb3510177488da71c2c47e | 42,438 |
def cli(ctx, entity):
"""Get the list of available fields for an entity
Output:
Fields information
"""
return ctx.gi.entity.get_fields(entity) | bfbe3814c557f7676d259413ce942afb50b781fa | 42,439 |
def none_for_empty_string(s: str):
"""Input csv reads empty strings as '' instead of None. We'd want to insert None in such cases"""
if s:
return s
else:
return None | 265395b2040fb43623d91413152b7bc907906857 | 42,440 |
def float_one(vals):
""" Return a constant floating point value of 1.0
"""
return 1.0 | 31ce58c2629fb36fd67d84ff440adae05a823c2e | 42,442 |
def json_patch_headers():
"""JSON headers for Invenio Records REST PATCH api calls."""
return [
("Content-Type", "application/json-patch+json"),
("Accept", "application/json"),
] | ce55b3810b6e2358b943676ca90d15a1fe8ea6eb | 42,443 |
def _parse_args(p):
"""Command line arguments - all optional with [reasonable] defaults"""
p.add_argument(
"--version", action="store_true", help="show version information and exit"
)
p.add_argument("-v", action="count", help="raise verbosity", default=0)
g = p.add_mutually_exclusive_group()
g.add_argument(
"--watch",
action="store_true",
help="watch and build in foreground",
default=False,
)
g.add_argument(
"--daemon", action="store_true", help="watch and build as daemon", default=False
)
g.add_argument(
"--kill", action="store_true", default=False, help="exorcise daemon if running"
)
p.add_argument(
"--i3-refresh-msg",
action="store",
default="reload",
choices=["restart", "reload", "nop"],
help="i3-msg to send after build",
)
p.add_argument(
"--notify",
action="store_true",
default=False,
help="show build notification via notify-send",
)
p.add_argument(
"--log",
action="store",
default=None,
help="i3configgerPath to where log should be stored",
)
p.add_argument("message", help="message to send to i3configger", nargs="*")
return p.parse_args() | 2d7a0d5d35025d16b7239738aaa530ee06d6b003 | 42,444 |
def count_leaves(dt, c=[0,0]):
"""
Count number of non-leaf and leaf branches.
Parameter:
dt -- the decision tree
c -- a counter
Return:
c -- a count for both non-leeaves and leaves
"""
c[0] += 1
leaves = dt.keys()
for leaf in leaves:
branches = dt[leaf].values()
for branch in branches:
if isinstance(branch, dict):
count_leaves(branch, c)
else:
c[1] += 1
return c | b9fc9749f5018712933becdd8e5e784ff190d695 | 42,445 |
import posixpath
import os
def PosixPath(file_path):
"""Converts the file path to posixpath format."""
return posixpath.join(*(file_path.split(os.sep))) | e7a4d955fb50e83688fa2006d5e19fc471e34c39 | 42,446 |
from typing import OrderedDict
def _to_ordered_dict(d):
"""
Recursively converts a dict to OrderedDict.
This is needed to preserve `(key, value)` ordering
with iterating through `dict`'s between Python 2 and Python 3.
Parameters
----------
d : dict
Dictionary to order.
Returns
-------
OrderedDict
Recursively order representation of the input dictionary.
"""
d_ordered = OrderedDict()
for key, value in sorted(d.items()):
if isinstance(value, dict):
d_ordered[key] = _to_ordered_dict(value)
elif isinstance(value, list) and (all(isinstance(item, dict) for item in value)):
list_of_ordered_dicts = []
for item in value:
list_of_ordered_dicts.append(_to_ordered_dict(item))
d_ordered[key] = list_of_ordered_dicts
else:
d_ordered[key] = value
return d_ordered | 350556c1fc885e7b495509ca0ed9a0ff47f2eb70 | 42,447 |
import os
import re
import logging
def resolve_addon(toc):
"""Parse the TOC file of an addon to get meta data.
"""
tocdir, tocbase = os.path.split(toc)
tocstem = os.path.splitext(tocbase)[0]
info = {
'toc_path': toc,
'toc_dir': tocdir,
'toc_name': tocstem,
}
if tocstem != os.path.basename(tocdir):
return dict(
info, error="mismatch TOC and directory name")
# parse TOC
info['toc'] = {}
re_hdr = re.compile(r'^##\s*(\w+)\s*:\s*(.+)$')
logger = logging.getLogger('resolve')
logger.info("read {}".format(toc))
with open(toc, 'r') as fo:
try:
for ln in fo.readlines():
m = re_hdr.match(ln)
if m is None:
continue
info['toc'][m.group(1)] = m.group(2).strip()
except UnicodeDecodeError:
pass
return info | 5a168e4600ea8c9cf4151a768ed934f0647bd34b | 42,448 |
import subprocess
def git_modified_files(path, suffix):
"""Obtain a list of modified files since the last commit merged by GitHub.
Args:
path: path to examine.
suffix: path suffix to filter with.
Return:
A list of strings providing the paths of modified files in the repo.
"""
try:
modified_files = subprocess.check_output(
['tools/git/modified_since_last_github_commit.sh', 'api', 'proto']).decode().split()
return modified_files
except subprocess.CalledProcessError as e:
if e.returncode == 1:
return []
raise | 80260f9ee9ff0cac3a43d1b73ad319a933e96537 | 42,449 |
import argparse
def parse_args():
"""Return the populated Namespace of command-line args."""
parser = argparse.ArgumentParser(prog='lyvi')
parser.add_argument('command', nargs='?',
help='send a command to the player and exit')
parser.add_argument('-c', '--config-file',
help='path to an alternate config file')
parser.add_argument('-l', '--list-players',
help='print a list of supported players and exit',
action='store_true')
parser.add_argument('-v', '--version',
help='print version information and exit',
action='store_true')
return parser.parse_args() | 4f188fa56bf4d1c978c18b8251cefbb33ac553d6 | 42,450 |
def get_cell_entry(cell):
"""
function for reading cell entry of given xls spreadsheet cell
input: cell (xlrd.sheet.cell object), cell to be read from
output: entry (any), value stored in cell
"""
cell_str = str(cell) # get cell description
if 'text' in cell_str: # typical text cell: text:u'L'
entry = cell_str.split("'")[1]
elif 'number' in cell_str: # typical numerical cell: number:3.0
entry = cell_str.split(':')[1].split('.')[0]
else:
entry = ''
return entry | 24471d68484941ec77bf617d19d6ee63ad154aab | 42,451 |
import re
def linearize(multiline: str) -> str:
"""
:param multiline: A multiline string as found in indented source code.
:return: A stripped, one-line string. All newlines and multiple
consecutive whitespace characters are replaced by a single space.
"""
oneline = re.sub(r"\s+", " ", multiline)
return oneline.strip() | 1053b72a0a3948d2a185545259ea52f8bf1cba3a | 42,452 |
def cond(pred, then_func, else_func):
"""Run an if-then-else using user-defined condition and computation
This operator simulates a if-like branch which chooses to do one of
the two customized computations according to the specified condition.
`pred` is a scalar MXNet NDArray,
indicating which branch of computation should be used.
`then_func` is a user-defined function, used as computation of the then branch.
It produces `outputs`, which is a list of NDArrays.
The signature of `then_func` should be
`then_func() => NDArray or nested List[NDArray]`.
`else_func` is a user-defined function, used as computation of the else branch.
It produces `outputs`, which is a list of NDArrays.
The signature of `else_func` should be
`else_func() => NDArray or nested List[NDArray]`.
The `outputs` produces by `then_func` and `else_func` should have the same number
of elements, all of which should be in the same shape, of the same dtype and stype.
This function returns a list of symbols, representing the computation result.
Parameters
----------
pred: a MXNet NDArray representing a scalar.
The branch condition.
then_func: a Python function.
The computation to be executed if `pred` is true.
else_func: a Python function.
The computation to be executed if `pred` is false.
Returns
-------
outputs: an NDArray or nested lists of NDArrays, representing the result of computation.
Examples
--------
>>> a, b = mx.nd.array([1]), mx.nd.array([2])
>>> pred = a * b < 5
>>> then_func = lambda: (a + 5) * (b + 5)
>>> else_func = lambda: (a - 5) * (b - 5)
>>> outputs = mx.nd.contrib.cond(pred, then_func, else_func)
>>> outputs[0]
[42.]
<NDArray 1 @cpu(0)>
"""
def _to_python_scalar(inputs, type_, name):
"""Converts "inputs", possibly typed mxnet NDArray, a numpy ndarray, other python types,
to the given type
"""
if hasattr(inputs, "asscalar"):
inputs = inputs.asscalar()
try:
inputs = type_(inputs)
except:
raise ValueError("Cannot convert %s to python %s" % (name, type_.__name__))
return inputs
branch = _to_python_scalar(pred, bool, "pred")
if branch:
return then_func()
else:
return else_func() | 220881138de8be90779bb833e83b7241957287c8 | 42,453 |
def load_overrides(config, cmd_params):
"""Overrides configuration parameters (at the first level only) with the given cmd_params
:rtype: dict
:param config: a python dict containing the configuration parameters
:param cmd_params: a python list containing the overriding key, values i.e. value follows key in the list
:return: a python dict containing the overridden configuration parameters
"""
if cmd_params is None or len(cmd_params) == 0:
return config
modified = {}
for k in config:
modified[k] = config[k]
for p in cmd_params:
p = str(p)
if p.startswith('-'):
index = cmd_params.index(p)
if len(cmd_params) > index + 1:
k, v = p[1:], cmd_params[index + 1]
if k in modified:
print('Overriding configuration key: %s value: %s with provided value: %s' % (k, modified[k], v))
modified[k] = v
return modified | 083825abf6c4a3e9c07260291838782c5fd02721 | 42,454 |
from typing import Dict
from typing import Any
def extremise_distribution(distribution: Dict[Any, float],
tau: float = 1) -> Dict[Any, float]:
"""Calculate an extremised probability distribution parametrised by
the temperature parameter `tau`.
The extremised values are proportional to the values in
`distribution` exponentiated to 1 / `tau`, where `tau` is a number
between 1 and 0. Here, a value of 1 leaves the relative values
unchanged and merely normalises the `distribution` whereas a value
tending to 0 maximally extremises the `distribution`. In this case,
the entry corresponding to the the highest value in the input
distribution tends to 1 and the all the others tend to 0.
Parameters
----------
distribution
A dictionary with values equal to positive floats.
tau
A parameter between 1 and 0 defining the reciprocal exponent or
'temperature' of the extremised distribution.
Returns
-------
dict:
A probability distribution whose keys are equal to those
of the input distribution and values are proportional to
extremised (exponentiated to 1 / tau) values of the input
distribution.
"""
assert tau > 0
assert min(distribution.values()) >= 0
max_value = max(distribution.values())
rescaled_distribution = {k: v / max_value for k, v in distribution.items()}
total = sum(v ** (1 / tau) for v in rescaled_distribution.values())
extremised_distribution = {k: (v ** (1 / tau) / total)
for k, v in rescaled_distribution.items()}
return extremised_distribution | 1f2ae7e2a1ebda15f2f37e52afe4d004f2994292 | 42,455 |
import copy
def gene_indexing(node_list_gene):
"""
Invert the indexing of the product6 "gene" to index the relationships between gene and disorder
from the gene point of view
:param node_list_gene: copy of processed node_list
:return: similar list of disorder-gene relation but indexed by gene
ie.
[
{
"Name": "kinesin family member 7",
"Symbol": "KIF7",
"Synonym": [
"JBTS12"
],
"GeneType": "gene with protein product",
"ExternalReference": [
{
"Source": "Ensembl",
"Reference": "ENSG00000166813"
},
{...},
],
"Locus": [
{
"GeneLocus": "15q26.1",
"LocusKey": "1"
}
],
"GeneDisorderAssociation": [
{
"SourceOfValidation": "22587682[PMID]",
"DisorderGeneAssociationType": "Disease-causing germline mutation(s) in",
"DisorderGeneAssociationStatus": "Assessed",
"disorder": {
"ORPHAcode": "166024",
"ExpertLink": "http://www.orpha.net/consor/cgi-bin/OC_Exp.php?lng=en&Expert=166024",
"Name": "Multiple epiphyseal dysplasia, Al-Gazali type",
"DisorderType": "Disease",
"DisorderGroup": "Disorder"
}
},
{...}
]
}
]
"""
gene_dict = dict()
for disorder in node_list_gene:
# disorder still contains gene association
disorder_info = copy.deepcopy(disorder)
# association_list : list, need to exploit with according gene
# disorder_info now only contains disorder
association_list = disorder_info.pop("DisorderGeneAssociation")
for association_info in association_list:
# Each association_info contains a different Gene,
# we need to index the Gene then substitute it with disorder_info
gene_info = association_info.pop("Gene")
gene_index = gene_info["Symbol"]
# Initialize the Gene index on first occurrence
if gene_index not in gene_dict:
gene_dict[gene_index] = {}
for gene_prop, gene_prop_value in gene_info.items():
gene_dict[gene_index][gene_prop] = gene_prop_value
gene_dict[gene_index]["GeneDisorderAssociation"] = []
# insert disorder_info in the association_info
association_info["disorder"] = disorder_info
# Extend the GeneDisorderAssociation with this new disorder relation
gene_dict[gene_index]["GeneDisorderAssociation"].append(association_info)
node_list_gene = list(gene_dict.values())
return node_list_gene | d9dab40cda5aa81f42eb10ed212bcf82e9a878c5 | 42,456 |
def work_strategy2(lx, l, Ve, Vr, Vf, wf, wr):
"""
a function that returns the work done by a swimmer if they choose to escape
from a rip current via strategy 2
"""
W1 = ((Ve**2 + Vr**2)**1.5)*(wr/(2*Ve)) # Calculates work for section swimming within rip channel
W2 = ((Ve**2)*(lx-(wr/2))) # Calculates work for section outside rip current swimming parallel to shore
W3 = ((Ve**2)*(l-wf)) # Calculates work for section outside rip current but swimming directly back to shore
W4 = ((Ve**2 + Vf**2)**1.5)*(wf/Ve) # Calculates work done for section in feeder channel swimming directly back to shore
if l - wf < 0: #Limits Starting distance offshore to be further than the feeder width
return 10
else:
return W1+W2+W3+W4 # Returns total value of work done for Strategy 2 | 9927203864178b5f86fd536ad99ba90d79b2f845 | 42,457 |
from typing import Optional
import calendar
def parse_month(name: str) -> Optional[int]:
"""Return the month that unambiguously matches name partially or fully.
Return None if more than one month matches.
Month names that are matched against are localized according to the
currently active system locale.
"""
comparable_name = name.lower()
months = [
n
for n, m in enumerate(calendar.month_name)
if n > 0 and m.lower().startswith(comparable_name)
]
# ambiguous if more than one match; return None
return None if len(months) != 1 else months[0] | 49ea4f224c8773daad9ce57626021aea66563fc3 | 42,459 |
import math
def distance2D(loc2d1=tuple(), loc2d2=tuple()):
"""
get distance from (x1, y1), (x2, y2)
loc2d1 : (x1, y1)
loc2d2 : (x2, y2)
return : distance
"""
return math.sqrt((loc2d1[0] - loc2d2[0])**2 + (loc2d1[1] - loc2d2[1])**2) | 638631e678ddb16098e1d0ad32241454ce95a9ef | 42,460 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.