content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import base64
import json
def build_async_task_result(content, content_type, filename):
"""Builds an Async Task result from JSON
This is necessary if we want to return multiple values, as the result by default is just a plain string.
"""
payload = {
'content' : base64.b64encode(content),
'content_type' : content_type,
'filename' : filename,
}
result = json.dumps(payload)
return result | 621a99b18477e49c4a7cb0225a758aec08d85c7c | 49,135 |
import sys
def prependBOMifNotUTF8(data):
"""
Ensures that def.json is either UTF16 or UTF-8 with BOM.
data = byte sequence
Returns the same byte sequence, optionally prefixed with a UTF8 BOM.
"""
BOM = b'\xEF\xBB\xBF'
if data.startswith(BOM):
# UTF-8 BOM
return data
try:
data.decode('utf-16')
if b'\x00[' in data or b'[\x00' in data:
# If encoded as UTF-16, then it must contain the [ somewhere,
# together with a NUL byte. A valid def.json contains a "[".
# The "[" character is encoded as "\x00[" or "[\x00" in UTF-16.
return data
except UnicodeDecodeError:
# If decoding fails, then it is not UTF-16
pass
# Try to interpret the data as UTF-8.
try:
data.decode('utf-8')
# If decoding succeeds, then it is UTF-8
return BOM + data
except UnicodeDecodeError as e:
# http://extension.maxthon.com/upload rejects files that are not
# encoded as UTF-16 or UTF-8.
sys.stderr.write("WARNING: def.json is not encoded as UTF-16 or ")
sys.stderr.write("UTF-8. Please save the file as UTF-8 or UTF-16.")
sys.stderr.write("\nUnicodeDecodeError: %s\n" % e)
return data | 8909e1331bdf8ee5ad82f55d6d9ae9b7755accde | 49,136 |
def get_size_and_path(line):
"""From a 'ls -l' line, return columns 4 (size) and 8 (path)."""
cols = line.split()
size, path = (int(cols[4]), cols[8])
return size, path | 73d1d66b6d759b2e238b4140ca91581c5e303724 | 49,137 |
def isMember(a, B):
"""
"""
for b in B:
if len(a) == len(b):
countTrue = 0
for i in range(len(a)):
if (a[i] == b[i]):
countTrue +=1
if countTrue == len(a):
return True
return False | f19963d946e7b08074c030883348f5fc0916a662 | 49,138 |
def _as_float(string, default: float = 0.0):
"""Return first sequence as float."""
return float(string.strip().partition(" ")[0] or default) | e0d199ab8e71f9e87174f4fb78cdd351ccfc69d0 | 49,139 |
def legendre_symbol(a, p):
"""
Computes the Legendre symbol
:param a: number
:param p: prime number
:return: Returns 1 if `a` has a square root modulo p, -1 otherwise.
"""
ls = pow(a, (p - 1) // 2, p)
if ls == p - 1:
return -1
return ls | 8a0238b2a4e89c36b1bd0f6a5a99d656c3a52eab | 49,141 |
def FE_concatenate_multiple_columns(df, cols, filler=" ", drop=True):
"""
This handy function combines multiple string columns into a single NLP text column.
You can do further pre-processing on such a combined column with TFIDF or BERT style embedding.
Inputs
---------
df: pandas dataframe
cols: string columns that you want to concatenate into a single combined column
filler: string (default: " "): you can input any string that you want to combine them with.
drop: default True. If True, drop the columns input. If False, keep the columns.
Outputs:
----------
df: there will be a new column called ['combined'] that will be added to your dataframe.
"""
df = df.copy(deep=True)
df['combined'] = df[cols].apply(lambda row: filler.join(row.values.astype(str)), axis=1)
if drop:
df = df.drop(cols, axis=1)
return df | 35d77da9562ee40e20d049d45a548e71c454a18b | 49,142 |
def tabla_mulitplos(inicio: int, fin: int, base: int) -> int:
"""Imprime una secuencia comprendida entre "inicio" y "fin"
(ambos incluidos) con el texto "múltiplo de" si el número de la
secuencia es múltiplo de "base" y la suma acumulada de dichos
múltiplos.
:param inicio: Número inicial de la secuencia.
:inicio type: int
:param fin: Número final de la secuencia.
:fin type: int
:param base: Número base para el cálculo de los múltiplos.
:base type: int
:return: La suma acumulada de los múltiplos.
:return type: int
"""
suma = 0
for numero in range(inicio, fin + 1):
if numero % base == 0:
suma += numero
print(f"{numero} múltiplo de {base} - suma acumulada: {suma}")
else:
print(numero)
return suma | 2d506ab4495f7e345f8b5163b666ecbc31334c52 | 49,143 |
def turning(player_data, p, t):
"""
>>> player_data = {}
>>> player_data['a'] = pd.DataFrame({'tick':[4,5],'angle':[10,10]}, [4,5])
>>> turning(player_data, 'a', 5)
False
>>> turning(player_data, 'a', 6)
False
>>> player_data['a'] = pd.DataFrame({'tick':[4,5],'angle':[10,11]}, [4,5])
>>> turning(player_data, 'a', 5)
True
"""
if not (t in player_data[p]['tick']) or not ((t-1) in player_data[p]['tick']):
return False
if abs(player_data[p].loc[t,'angle'] - player_data[p].loc[t-1,'angle']) > 1e-8:
return True
return False | 45edcfd85799e4204c08af0fc4910794981f118c | 49,145 |
def _validate_asset_type(value):
"""
:param value:
:return:
"""
return value in ['I', 'E', 'F', 'FD'] | 811483fd811758a599161720bf41b9b7e784ceb0 | 49,148 |
def gen_tuple_type(full_name, *args):
"""
Generate direct c++ wrapper code for a particular Tuple type.
Args:
full_name: fully specified dotted name from codebase,
module.class.subclass. ... .typename
*args: sequence of python Types
Returns:
A list of strings, containing c++ code implementing this wrapper.
"""
name = full_name.rsplit(".", 1)[-1]
keys = ["a" + str(i) for i in range(len(args))]
items = list(zip(keys, list(args)))
revkeys = list(keys)[::-1]
ret = list()
ret.append(f"// Generated Tuple {name}")
for key, value in items:
ret.append(f"// {key}={value}")
ret.append(f"class {name} {{")
ret.append("public:")
for key, value in items:
ret.append(f" typedef {value} {key}_type;")
for i, (key, value) in enumerate(items):
offset = (
"" if i == 0 else " + " + " + ".join(["size" + str(j) for j in range(1, i + 1)])
)
ret.append(f" {key}_type& {key}() const {{ return *({key}_type*)(data{offset}); }}")
ret.append(" static Tuple* getType() {")
ret.append(" static Tuple* t = Tuple::Make({")
ret.append(
",\n".join(
[f" TypeDetails<{name}::{key}_type>::getType()" for key in keys]
)
)
ret.append(" });")
ret.append(" return t;")
ret.append(" }")
ret.append("")
ret.append(f" static {name} fromPython(PyObject* p) {{")
ret.append(f" {name} l;")
ret.append(
" PyInstance::copyConstructFromPythonInstance"
"(getType(), (instance_ptr)&l, p, ConversionLevel::ImplicitContainers);"
)
ret.append(" return l;")
ret.append(" }")
ret.append("")
ret.append(" PyObject* toPython() {")
ret.append(
" return PyInstance::extractPythonObject((instance_ptr)this, getType());"
)
ret.append(" }")
ret.append("")
ret.append(f" {name}& operator = (const {name}& other) {{")
for key in keys:
ret.append(f" {key}() = other.{key}();")
ret.append(" return *this;")
ret.append(" }")
ret.append("")
ret.append(f" {name}(const {name}& other) {{")
for key in keys:
ret.append(f" new (&{key}()) {key}_type(other.{key}());")
ret.append(" }")
ret.append("")
ret.append(f" ~{name}() {{")
for key in revkeys:
ret.append(f" {key}().~{key}_type();")
ret.append(" }")
ret.append("")
ret.append(f" {name}() {{")
for key in keys:
ret.append(f" bool init{key} = false;")
ret.append(" try {")
for key in keys:
ret.append(f" new (&{key}()) {key}_type();")
ret.append(f" init{key} = true;")
ret.append(" } catch(...) {")
ret.append(" try {")
for key in revkeys:
ret.append(f" if (init{key}) {key}().~{key}_type();")
ret.append(" } catch(...) {")
ret.append(" }")
ret.append(" throw;")
ret.append(" }")
ret.append(" }")
ret.append("")
if len(keys) > 0:
ret.append(
f" {name}("
+ ", ".join([f"const {key}_type& {key}_val" for key in keys])
+ ") {"
)
for key in keys:
ret.append(f" bool init{key} = false;")
ret.append(" try {")
for key in keys:
ret.append(f" new (&{key}()) {key}_type({key}_val);")
ret.append(f" init{key} = true;")
ret.append(" } catch(...) {")
ret.append(" try {")
for key in revkeys:
ret.append(f" if (init{key}) {key}().~{key}_type();")
ret.append(" } catch(...) {")
ret.append(" }")
ret.append(" throw;")
ret.append(" }")
ret.append(" }")
ret.append("private:")
for i, key in enumerate(keys):
ret.append(f" static const int size{i + 1} = sizeof({key}_type);")
ret.append(
" uint8_t data[{}];".format(
" + ".join(["size" + str(i) for i in range(1, len(keys) + 1)])
)
)
ret.append("};")
ret.append("")
ret.append("template <>")
ret.append(f"class TypeDetails<{name}> {{")
ret.append("public:")
ret.append(" static Type* getType() {")
ret.append(f" static Type* t = {name}::getType();")
ret.append(" if (t->bytecount() != bytecount) {")
ret.append(
f' throw std::runtime_error("{name} somehow we have the wrong bytecount!");'
)
ret.append(" }")
ret.append(" return t;")
ret.append(" }")
ret.append(" static const uint64_t bytecount = ")
ret.append(" +\n".join([f" sizeof({name}::{key}_type)" for key in keys]) + ";")
ret.append("};")
ret.append("")
ret.append(f"// END Generated Tuple {name}")
ret.append("")
return [e + "\n" for e in ret] | 0ef45b719d7a706e8cd1e54001e347aafa4a01ba | 49,149 |
def get_nlat_nlon(n_nodes, lonlat_ratio):
"""Calculate the width (longitudes) and height (latitude) of an equiangular grid provided in 1D.
Parameters
----------
lonlat_ratio : int
lonlat_ratio = H // W = n_longitude rings / n_latitude rings
Aspect ratio to reshape the input 1D data to a 2D image.
A ratio of 2 means the equiangular grid has the same resolution
in latitude and longitude.
"""
# ratio = n_lon/n_lat (width/height)
n_lat = int((n_nodes / lonlat_ratio) ** 0.5)
n_lon = int((n_nodes * lonlat_ratio) ** 0.5)
if n_lat * n_lon != n_nodes: # Try to correct n_lat or n_lon if ratio is wrong
if n_nodes % n_lat == 0:
n_lon = n_nodes // n_lat
if n_nodes % n_lon == 0:
n_lat = n_nodes // n_lon
assert n_lat * n_lon == n_nodes, f'Unable to unpack nodes: {n_nodes}, lonlat_ratio: {lonlat_ratio}'
return n_lat, n_lon | 644b11b2439c503840580c634e994014bf3812e0 | 49,150 |
def flags_to_run_name(flag_values):
"""Converts a flags object to a string representation of the run name."""
if flag_values.num_output_propagation_steps < 0:
num_output_propagation_steps = flag_values.num_input_propagation_steps
else:
num_output_propagation_steps = flag_values.num_output_propagation_steps
run_name_terms = [
('mk', flag_values.model_kind),
('h', flag_values.hidden_dim),
('nip', flag_values.num_input_propagation_steps),
('nop', num_output_propagation_steps),
('mgn', float(flag_values.max_gradient_norm)),
('bspd', flag_values.batch_size_per_device),
('lr', flag_values.learning_rate),
('mvb', flag_values.max_validation_batches),
]
if flag_values.model_kind.startswith('transformer'):
run_name_terms.append(('nh', flag_values.num_transformer_attention_heads))
run_name_terms.append(('rb', flag_values.use_relational_bias))
if flag_values.max_num_subtokens != 0:
run_name_terms.append(('ns', flag_values.max_num_subtokens))
if flag_values.warmup_steps_fraction:
run_name_terms.append(
('wsf', float(flag_values.warmup_steps_fraction)))
if flag_values.model_initialization_seed:
run_name_terms.append(('seed', flag_values.model_initialization_seed))
run_name = '_'.join([
'{}{}'.format(short_name, value) for short_name, value in run_name_terms
])
return run_name | 37db060d4cf12ac4e5b98a6c50b4613bd30aaec4 | 49,151 |
def list2str(l):
"""
Convert list to a string
:param l: list
:returns: list <string>
"""
s = ''
for i in range(len(l)):
s = s + str(l[i])
return s | 94e70d371f4c81c08dbdd7d2a583b9c2e68500a8 | 49,152 |
def remove_linear_chains(d):
"""
Function to remove nodes that have exactly one parent and one child.
Parameters
----------
d: ehreact.diagram.diagram.Diagram
A Hasse diagram.
Returns
-------
d: ehreact.diagram.diagram.Diagram
The modified Hasse diagram without linear chain nodes.
"""
node_list = list(d.nodes)
for n in node_list:
key_node = n
if key_node == "":
continue
key_parent = d.nodes[n].edges_to_parent[0].parent_node.key
if len(d.nodes[n].edges_to_child) == 1:
d.move_node(
key_node=d.nodes[n].edges_to_child[0].child_node.key,
key_parent_new=key_parent,
)
d.delete_edges_around_node(d.nodes[n].key)
return d | dc3fd3bce46784158e1b7837bfdd4021a2d8cb33 | 49,153 |
def cal_out_shape(shape, stride_h, stride_w):
"""
calculate output shape
"""
n, c1, h, w, c0 = shape
out_shape = (n, c1, h * stride_h, w * stride_w, c0)
return out_shape | 0664a3f5b7e9769a8898470fb2c1a253dd3153d7 | 49,154 |
import re
def extract_number(f):
"""
Extract Integer Value from filename
:param f: file
:return: int
DUPLICATE FROM ASSETS.PY
To connect to both run-makehuman and run-textures
"""
s = re.findall("\d+$", f)
return (int(s[0]) if s else -1, f) | 272dc878a7209383b2153da11a290e9a79bb6979 | 49,155 |
def is_valid_color(color, threshold):
""" Return True if each component of the given color (r, g, b, a) is
above threshold
"""
if color[0] > threshold and color[1] > threshold and color[2] > threshold:
return True
return False | 3391779e0977225f7340f50a823610b4dd22876b | 49,158 |
import csv
def readCSV(path2File):
"""
returns a list of links to pdfs.
Assumes the first column of the csv file corresponds to the links
"""
pdfFileLinks = []
with open(path2File, newline='') as csvfile:
data = csv.reader(csvfile)
for row in data:
pdfFileLinks.append(row[0])
return pdfFileLinks | 0918fbbaf580cdf4221551b7cac58e4add129c76 | 49,159 |
import asyncio
def run_in_loop(future):
"""Run a co-routine in the default event loop"""
try:
result = asyncio.get_event_loop().run_until_complete(future)
except Exception as ex:
print("Exception: {}".format(ex))
raise
return result | 7bc36ae9d406a20bb3d8aaa0bd34356c98251ad3 | 49,160 |
def list_to_tran(list):
"""
Transform the list of predicted tags to a printable way
:param list: list of tags
:return: string of transliteration
"""
transcription = ""
for tran in list:
if tran[-3:] == "(0)":
transcription += tran[:-3]
elif tran[-4:] == "(0)-" or tran[-4:] == "(0).":
transcription += tran[:-4] + tran[-1]
elif tran[-1] == ")" or tran[-2:] == ")-" or tran[-2:] == ").":
continue
else:
transcription += tran
if tran[-1] != "-" and tran[-1] != ".":
transcription += " "
return transcription | 3ae435ffefdb8ef48d2647b5f2c2ba61fd5958ab | 49,161 |
def fun(s):
"""Determine if the passed in email address is valid based on the following rules:
It must have the username@websitename.extension format type.
The username can only contain letters, digits, dashes and underscores [a-z], [A-Z], [0-9], [_-].
The website name can only have letters and digits [a-z][A-Z][0-9]
The extension can only contain letters [a-z][A-Z].
The maximum length of the extension is 3.
Args:
s (str): Email address to check
Returns:
(bool): Whether email is valid or not
"""
if s.count("@") == 1:
if s.count(".") == 1:
user, domain = s.split("@")
website, extension = domain.split(".")
if user.replace("-", "").replace("_", "").isalnum():
if website.isalnum():
if extension.isalnum():
if len(extension) <= 3:
return True
return False | d5a3e4de4010bf71b646270d901f87b526fb92e9 | 49,162 |
def make_exec001_scen():
"""Specifies the conditions of each scenario. Indexing follows:
[n_pops,pop_mode,pop1_culture,pop2_culture,pop_start,pop_hire]"""
# Create the conditions for each scenario as a list of dicts
scen = []
# Scenario 1: One population, any culture is equally likely
scen.append([1, 'uniform_2var'])
# Scenario 2: One population, average culture
scen.append([1,'beta_2var',{'min': 0.1,'max': 0.9}])
# Scenario 3: Two populations, pop2 is more inclusive
scen.append([
2,
'beta_2var',
{'min': [0.5],'max': [0.9]},
{'min': [0.1],'max': [0.9,'index_p1_culture']},
{'min': [0.5],'max': [0.9]},
'index_p1_start'
])
# Scenario 4: Two populations, pop2 is more contest
scen.append([
2,
'beta_2var',
{'min': [0.1],'max': [0.9]},
{'min': [0.5,'index_p1_culture'] ,'max': [0.9]},
{'min': [0.5],'max': [0.9]},
'index_p1_start'
])
# Scenario 5: Two populations, more inclusive pop2 enters workforce
scen.append([
2,
'beta_2var',
{'min': [0.5],'max': [0.9]},
{'min': [0.1],'max': [0.9,'index_p1_culture']},
{'min': [0.6],'max': [0.9]},
{'min': [0.5],'max': ['index_p1_start']}
])
# Scenario 6: Two populations, more contest pop2 enters workforce
scen.append([
2,
'beta_2var',
{'min': [0.1],'max': [0.9]},
{'min': [0.5,'index_p1_culture'] ,'max': [0.9]},
{'min': [0.6],'max': [0.9]},
{'min': [0.5],'max': ['index_p1_start']}
])
return scen | 1cb533ad1e1215494441ec839885be396c5cb95d | 49,163 |
import base64
def decode_bytes(s: str) -> bytes:
"""Decode the bytes.
Args:
s: Encoded binary content.
Returns:
Decoded binary data.
Raises:
binascii.Error: If the data isn't valid.
"""
return base64.b64decode(s[1:]) | d8572387012e51d7eb230b3d09a1e6bfdabcaac0 | 49,165 |
def queryset_to_dict(self):
""" Converts the elements in a QuerySet into dictionaries.
"""
return [x.to_dict(expand=False) for x in self] | ddc77221abdce55ededd6a01ac909f3b1f3752dc | 49,166 |
def ctof(temp_c):
"""Convert temperature from celsius to fahrenheit"""
return temp_c * (9/5) + 32 | 4357cb6d355d14c11b21bfe0bd4efc5c1e3b4703 | 49,167 |
def rename_cols(data):
"""Rename columns of a DataFrame by capitalizing them."""
return data.rename(columns=str.capitalize) | b6aced2882b12feeed5dc3494125b4c8e68d2fc5 | 49,168 |
def about():
"""about.html"""
return open('./about.html', 'r').read() | 3b0792fc2aabe5fef5e4fd735bd1a38096b78f18 | 49,170 |
import re
def is_esemble_id(name):
"""
It returns True if name is an stable Ensembl ID.
Stable IDs are created in the form
ENS[species prefix][feature type prefix][a unique eleven digit number].
"""
return re.match("ENS.*G[0-9]{11}", name) is not None | ec96712499d064a19b0aa719e504f3317fc1fcac | 49,172 |
import struct
def serialize_ident(digest: str, schema: str) -> bytes:
"""
len_digest digest_str len_schema schema_str
"""
ident_digest = struct.pack(f'<I{len(digest)}s', len(digest), digest.encode())
ident_schema = struct.pack(f'<I{len(schema)}s', len(schema), schema.encode())
return b''.join([ident_digest, ident_schema]) | d1eefe6efb90e60ec80d1c876d0ae5bcb7d9975f | 49,173 |
def addouter(C, b, factor=1):
"""Add in place `factor` times outer product of vector `b`,
without any dimensional consistency checks.
"""
for i in range(C.shape[0]):
for j in range(C.shape[1]):
C[i,j] += factor * b[i] * b[j]
return C | ded8354eb180fe50bd6aab9f03f3128f27889deb | 49,176 |
import argparse
def get_args(add_help=True):
"""get_args
Parse all args using argparse lib
Args:
add_help: Whether to add -h option on args
Returns:
An object which contains many parameters used for inference.
"""
parser = argparse.ArgumentParser(
description='PaddlePaddle Args', add_help=add_help)
args = parser.parse_args()
return args | 9905f2e912674d7de4dd2810a646ca540ab928ec | 49,177 |
def product_rule(u, v, du, dv):
"""The product rule of calculus, d/dx uv = u dv v du."""
return u * dv + v * du | 7f5861e2c6d081f51b2949b272b04c4161eac0d2 | 49,178 |
def sum_sub(a, b):
"""This Function Returns Results Of
Addition And Subtraction Of a, b """
c = a + b
d = a - b
return c, d | 9d16c1216868907379d0d534a882ea18321c06fb | 49,180 |
def rgb(red, green, blue):
"""
Make a tkinter compatible RGB color.
"""
return "#%02x%02x%02x" % (red, green, blue) | e33394c24728847d420991d5633a239b8684bdb8 | 49,182 |
import torch
def create_features(order_param_input, atom_inputs, atom_indices, zeros):
""" Stitch together fixed atom type inputs and the learnable order parameters,
ported and adapted from Tess Smidt's code.
"""
order_param = torch.cat([zeros, order_param_input], dim=0)
# N = len(atom_indices)
all_atom_types = torch.cat([
atom_inputs[i] for i in atom_indices
], dim=0) # [N, atom_types]
return torch.cat([all_atom_types, order_param], dim=-1) | 9a8fabe9e77fc1fd364cb719076a419afd2f8220 | 49,183 |
def _get_game_points_color(points: int) -> str:
"""Returns the markup color that should be used to display the game score."""
assert 0 <= points < 7, "Invalid game points: %s" % points
if points < 4:
return "33aa33" # green
if points == 4:
return "ffffff" # white
if points == 5:
return "ffff33" # yellow
return "ff3333" # red | 6b6b54b7ebd8be681929af57abd609732e6aa79c | 49,185 |
def transform_dict(d, xformer):
"""
Transform elements of the dict d using the xformer (also a dict,
where the keys match the keys in d and the values of d are transformed
by invoking the corresponding values in xformer.
"""
for k, v in xformer.items():
if k in d:
d[k] = v(d[k])
return d | f38fc198671a8bf706fed6a57ecda07c8ab08321 | 49,186 |
def float_else_zero(sstring):
"""Return converted string to float. If conversion fail, return zero.
:param sstring: String to be converted
:return: ``float(sstrinq)`` if ``sstring`` can be converted to float
(e.g. ``"3.14"``), else ``0``
"""
try:
return float(sstring)
except ValueError:
return 0 | 6c51c811574664ef2e19e4f6978075c2f92da9e1 | 49,187 |
def _local_name(element):
"""Strip namespace from the XML element's name"""
if element.tag and element.tag[0] == '{':
return element.tag.rpartition('}')[2]
return element.tag | 0cf7b7d2aa3571679e49a4a5fb18c7e7a4193683 | 49,188 |
import binascii
import codecs
def hexToBase64(hex):
""" Testing Docstring"""
return (binascii.b2a_base64(codecs.decode(hex,'hex')))[:-1] | b084b6200a338d3214dade88f8475d4b8c01980a | 49,189 |
from datetime import datetime
def date():
"""
Return formatted date
"""
today = datetime.today()
month = today.strftime("%B")
day = today.day
year = today.year
date = f"{month} {day}, {year}"
return date | c559fa4b235410daf22c52bc1efd04a59364b4cc | 49,190 |
def variable(library):
"""
Example variable.
"""
return library['ECON']['VIT_STAT'] | 09909377fc73513f38ef0f2359d51b1c4d2ddf71 | 49,191 |
def build_event_info(info, time):
"""Adds created_at time to event info dict."""
return {**info, 'created_at': time} | a7faa2d3798d2692310af16e84099d7b86fa84f0 | 49,192 |
def aggregate(sequence, func, seed=None):
"""
Applies accumulator function over a sequence.
Args:
sequence: iterable
Sequence of items to go through.
func: callable
Accumulator function expecting two arguments (res, next).
seed: any
Initial aggregation value. If set to None, the first item is
used.
Returns:
any
"""
res = seed
for i, item in enumerate(sequence):
if i == 0 and seed is None:
res = item
continue
res = func(res, item)
return res | 30318589a7073226d547d9131754a44b3d20f0f7 | 49,193 |
def get_carl_location(record):
"""
Uses 945 field to extract library code, bib number, and item number
from record
:param record: MARC21 record
"""
output = {}
field945 = record['945']
if field945 is not None:
subfield_a = field945['a']
if subfield_a is not None:
data = subfield_a.split(" ")
output['site-code'] = data[0]
output['ils-bib-number'] = data[1]
output['ils-item-number'] = data[2]
return output | 4248b98c13b2c9f594b2d6fa0334a7100871599d | 49,194 |
def intersection(a, b):
"""
This solution runs in O(A + B) time and uses
O(N) memory where N = max(A, B) for the seen
hash table.
"""
seen = {}
curr = a.head
while curr is not None:
seen[curr] = 1
curr = curr._next
curr = b.head
while curr is not None:
try:
int_node = seen[curr]
except KeyError:
curr = curr._next
else:
return int_node
return None | 66cd20a4549feed581188cb5ccd64dc1779d7484 | 49,195 |
import torch
def word_embed(data, embedding_dict, pad_size=60):
"""
data: a list contains dataset, e.g. [[sentence1], [sentence2], ..., sentence[m]].
embedding_dict: map a word to a 300d vector
pad_size: the size of padding
return:
word_embeddings: a 3d array, which generated by Glove model
"""
data_size = len(data)
# word_embeddings = np.zeros((data_size, pad_size, 300))
word_embeddings = torch.zeros(data_size, pad_size, 300)
for i, sentence in enumerate(data):
for j, word in enumerate(sentence):
word_embeddings[i][j] = embedding_dict[word]
return word_embeddings | 12610533d22e9c27ad834f7eab1ba2f8bb0d4cae | 49,196 |
import re
def sanitize_license_plate(number):
"""Sanitize a license plate number to [A-Z0-9]+, no dashes/spaces."""
number = re.sub(r'[ -]', '', number.strip().upper())
return number if re.match(r'^[A-Z0-9]+$', number) else None | f2c89d3bfffd9056a74cfcd4e7aef8892a7eb8f9 | 49,197 |
def with_last_multiplicity(template_layers_config, multiplicity):
"""
Change the multiplicity of the last layer in a layered micro-service
application, while keeping the average service time for a user request
constant.
"""
layers_config = [
layer_config for
layer_config in template_layers_config]
layers_config[-1] = \
layers_config[-1]._replace(
average_work=layers_config[-1].average_work/multiplicity,
multiplicity=multiplicity)
return layers_config | ab27b4dd2f6d329cbe3275310ceb806dc7c95a47 | 49,199 |
def greater_than_previous_counter(iterable: list) -> int:
"""
Return a count of the number of times the i + 1 > i
"""
ctr = int()
for i, j in zip(iterable, iterable[1:]):
if int(j) > int(i):
ctr += 1
return ctr | f5f34dbdf0779c4e1a695a9d31431a3513ace40c | 49,202 |
def merge_prop_labels(labels):
"""After joining multiple propositions, we need to decide the new type.
Rules:
1. if the span is a single prop, keep the label
2. if the span props have the same type, use that type
3. Else, rules from Jon: policy>value>testimony>reference>fact
"""
if len(labels) == 1:
return labels[0]
labels = set(labels)
if len(labels) == 1:
return next(iter(labels))
if 'policy' in labels:
return 'policy'
elif 'value' in labels:
return 'value'
elif 'testimony' in labels:
return 'testimony'
elif 'reference' in labels:
return 'reference'
elif 'fact' in labels:
return 'fact'
else:
raise ValueError("weird labels: {}".format(" ".join(labels))) | 97ce18b35f3e86352ff10b73a74a21f8093f3f4a | 49,203 |
import os
import subprocess
def install(pw_root, env):
"""Installs rust tools using cargo."""
prefix = os.path.join(pw_root, '.cargo')
# Adding to PATH at the beginning to suppress a warning about this not
# being in PATH.
env.prepend('PATH', os.path.join(prefix, 'bin'))
if 'CARGO_TARGET_DIR' not in os.environ:
env.set('CARGO_TARGET_DIR', os.path.expanduser('~/.cargo-cache'))
# packages.txt contains packages one per line with two fields: package
# name and version.
package_path = os.path.join(pw_root, 'pw_env_setup', 'py', 'pw_env_setup',
'cargo_setup', 'packages.txt')
with env(), open(package_path, 'r') as ins:
for line in ins:
line = line.strip()
if not line or line.startswith('#'):
continue
package, version = line.split()
cmd = [
'cargo',
'install',
# If downgrading (which could happen when switching branches)
# '--force' is required.
'--force',
'--root', prefix,
'--version', version,
package,
] # yapf: disable
subprocess.check_call(cmd)
return True | 3402aeb8538e724362d4de9380f9fda0b84d096d | 49,204 |
def huntingBehaviorVerification(behList, huntBehList, sigh):
"""
This function check if there is any behavior ID into the hunting section that doesn't exists in the sighting.
"""
l_error = []
for b in huntBehList:
if b not in behList:
l_error.append(["-", "BehaviorId in huntingQuery that does not exist in behavior section", b, sigh])
return l_error | 163c6dd31cb94acb44920b8380208253dec5675d | 49,205 |
import struct
def bytes_to_fp32(bytes_data, is_big_endian=False):
"""
bytes to float
:param bytes_data: bytes
:param is_big_endian: is big endian or not,default is False.
:return: fp32
"""
return struct.unpack('>f' if is_big_endian else '<f', bytes_data)[0] | a2b22a6bedcc060b63d160482bdadbe042fbdda2 | 49,206 |
def load_attribute_from_file(file_path: str):
"""
Loads attribute from file.
:param file_path: path to file
:return: list of attributes
"""
attributes = []
with open(file_path) as f:
for line in f:
attributes.append(list(map(float, line.split())))
return attributes | cc98cf28ab41c6c06a358fb833dda071aaa7688f | 49,207 |
def mph(mps):
"""
Meters per second to miles per hour
"""
mpsToMph = 3600.0 / (0.0254 * 12.0 * 5280.0)
return mps * mpsToMph | 93ab7c27e55b16587cfa1315c77a453c5487889d | 49,208 |
def new_state():
"""
Get an initial empty state.
"""
return {'thresholds': {}} | ed24dc02a11fb0ab512946eabf7ba16bb13af1e7 | 49,209 |
import zlib
def adler32(filepath,blocksize=2**20):
"""
Return the ader32 of a file as an 8-byte hex number
`blocksize` adjusts how much of the file is read into memory at a time.
This is useful for large files.
2**20 = 1024 * 1024 = 1 mb
2**12 = 4 * 1024 = 4 kb
"""
csum = 1
with open(filepath, 'rb') as afile:
buf = afile.read(blocksize)
while len(buf) > 0:
csum = zlib.adler32(buf,csum)
buf = afile.read(blocksize)
# From the documentation:
# > Changed in version 3.0: Always returns an unsigned value.
# > To generate the same numeric value across all Python versions and
# > platforms, use crc32(data) & 0xffffffff.
csum = csum & 0xffffffff
return ('0'*8 + hex(csum)[2:])[-8:] | 9e299e752664dc71a29a6393331751d49327ecc1 | 49,211 |
import os
import shelve
def mood_schedule_retrieval(author):
"""This function retrieves the mood schedule data (days and times) from the mood schedule file for that user."""
mood_filename = str(author) + "_moodfile"
if not os.path.exists(mood_filename + ".dat"):
mood_days = [False, False, False, False, False, False, False]
mood_times = []
else:
schedule = shelve.open(mood_filename)
if 'days' in schedule:
mood_days = schedule['days']
else:
mood_days = [False, False, False, False, False, False, False]
if 'times' in schedule:
mood_times = schedule['times']
else:
mood_times = []
schedule.close()
return mood_days, mood_times | 2b7a9c626415f08be3aecbec7cbc7ab061b790b8 | 49,212 |
def islandPerimeter(grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
row = len(grid)
ret = 0
repeat = 0
if row ==0:
return ret
col = len(grid[0])
if col ==0:
return ret
for i in range(0,row):
for j in range(0,col):
if grid[i][j] ==1:
ret+=1
if i!=0 and grid[i-1][j] ==1:
repeat+=1
if j!=0 and grid[i][j-1] ==1:
repeat+=1
return 4*ret-repeat*2 | cc640450ff920c4c36480e2fc934a74725113a82 | 49,213 |
def compare_dict(old, new):
"""function to compare two dictionaries with same format.
Only compares common elements present in both original and new dictionaries
"""
field_list = []
change = {}
sharedKeys = set(old.keys()).intersection(new.keys())
for key in sharedKeys:
change_flag = False
for sub_field in old[key]:
if old[key][sub_field] != new[key][sub_field]:
change_flag = True
field_list.append(sub_field)
if change_flag:
change[key] = field_list
return change | 52d984e451d9d5087aedf560c7666937b278b534 | 49,214 |
def make_batches(data, bsz):
"""Return a list of lists of size bsz given a list of examples."""
return [data[i:i + bsz] for i in range(0, len(data), bsz)] | cbbc388ba58074a22a4b94ec94c81ac303443054 | 49,215 |
def lmap(*args):
"""For compatibility with map() in python 2"""
return list(map(*args)) | 65ede5b5a4cccec3f22a8bce93f62e3fa518fcfd | 49,216 |
def hexstr_to_dbytes(h,swap=True):
""" converts and swaps hex string represenation of hash to bytes for generating digest """
if h[:2] == "0x":
h = h[2:]
if len(h)%2 != 0:
h = h.zfill(len(h)+1)
num_bytes = len(h)//2
if swap:
return int(h,16).to_bytes(num_bytes,"little")
else:
return int(h,16).to_bytes(num_bytes,"big") | a943fcc06a38528dee2b035950928b84857ad6f3 | 49,218 |
import re
def api_result_to_release_format(api_df, id_lookup_dict=None, verbose=False):
"""
Reindex a PyCAP API result to an NCANDA release format.
REDCap API, when used with PyCAP, returns results as a DataFrame indexed by
NCANDA ID (study_id - X-00000-Y-0) and combined event + arm
(redcap_event_name)
On the other hand, release files are typically indexed by XNAT ID
(NCANDA_S0?????; mri_xnat_id in Redcap).
This function will:
1. Convert Redcap IDs to NCANDA SIDs using id_lookup_dict (as generated by
`get_id_lookup_from_demographics_file`) or the `mri_xnat_sid` column
(if present in api_df),
2. Drop Redcap IDs that cannot be converted in that way,
3. Separate event and arm to individual columns and make their names
release-compatible,
4. Return DataFrame indexed by release primary keys (subject, arm, visit).
"""
df = api_df.copy(deep=True)
df.reset_index(inplace=True)
if id_lookup_dict:
df['subject'] = df['study_id'].map(id_lookup_dict)
elif 'mri_xnat_sid' in df.columns:
df['subject'] = df['mri_xnat_sid']
else:
raise IndexError("You must supply id_lookup_dict, or api_df has to "
"have the mri_xnat_sid column")
nan_idx = df['subject'].isnull()
if verbose:
study_id_nans = df.loc[nan_idx, 'study_id'].tolist()
print ("Dropping study IDs without corresponding NCANDA SID: " +
", ".join(study_id_nans))
df = df[~nan_idx]
df[['visit', 'arm']] = (df['redcap_event_name']
.str.extract(r'^(\w+)_(arm_\d+)$'))
def clean_up_event_string(event):
"""
If possible, convert Redcap event name to NCANDA release visit name.
If conversion fails, return the original string.
Intended to be passed to pd.Series.map.
"""
# NOTE: Only accounts for full Arm 1 events
match = re.search(r'^(baseline|\dy)', event)
if not match:
return event
elif re.match('^\d', match.group(1)):
return "followup_" + match.group(1)
else:
return match.group(1)
df['visit'] = df['visit'].map(clean_up_event_string)
def clean_up_arm_string(arm):
"""
If possible, convert Redcap arm name to NCANDA release arm name.
If conversion fails, return the original string.
Intended to be passed to pd.Series.map.
"""
arm_dict = {'arm_1': 'standard',
'arm_2': 'recovery',
'arm_3': 'sleep',
'arm_4': 'maltreated'}
if arm not in arm_dict:
return arm
else:
return arm_dict[arm]
df['arm'] = df['arm'].map(clean_up_arm_string)
return df.set_index(['subject', 'arm', 'visit']) | aacdc5c177ff7026dea8bc399c67bd78287c7e4f | 49,219 |
from typing import List
from typing import Tuple
def swap_rows(matrix: List[List[float]], rows: Tuple[int, int]) -> List[List[float]]:
"""
Mutate matrix by swapping rows[0] and rows[1].
Preconditions:
- len(matrix) > 0
- all(len(row) > 0 for row in matrix)
- sum([1 for i in range(1, len(matrix)) if len(matrix[i]) != len(matrix[0])]) == 0
- all([0 <= i < len(matrix) for i in rows])
"""
temp = matrix[rows[0]]
matrix[rows[0]] = matrix[rows[1]]
matrix[rows[1]] = temp
return matrix | 666df5872d468086dc97614dfc39007e182cd7bc | 49,220 |
import os
def read_local_version():
"""
Extracts the version number of the currently installed release from the local version.txt
"""
if os.path.exists("version.txt"):
version_file = open("version.txt")
version = version_file.readline()
return version.rstrip()
else:
print("no local version file exists!")
return "-1" | 519045eb1edde33359ee4c46d011e4abaac89854 | 49,221 |
def DataFrame_to_JsonFile(pandas_data_frame,file_name="test.json"):
"""Converts a pandas.DataFrame to a JsonFile using orient='records'
inverse of JsonFile_to_DataFrame"""
json=pandas_data_frame.to_json(file_name,orient='records')
return file_name | 0d5e86d465d38cc3c0d433b2d92574527b3c5761 | 49,222 |
def toDigits(dictList=[], li=[]):
"""convert string numbers to digits inside list of dict or list of lists
Args:
dicList (list): list of dict
li (list): list of lists
Returns:
dict: dict of dict and list of lists with names dictList and li respectivley
"""
chk = not len(dictList) and not len(li)
if chk:
print("toDigits doesn't got needed args!")
return {}
d = []
l = []
# chk for any of args
# verify & change string numbers to digits
if dictList and (type(dictList) is list) and len(dictList):
d = [x for x in dictList]
for x in d:
for i in x:
if x[i].isdigit():
x[i] = int(x[i])
# print("i: ", x[i])
if li and (type(li) is list) and len(li):
l = [k for k in li]
for x in l:
for j in range(len(x)):
if x[j].isdigit():
x[j] = int(x[j])
# print("j: ", j)
# print("dicList: ", dicList)
# return the new data modified
return {"dict": d, "list": l} | cec7a3fe1e22f25a960ef4c0d32ee9f63090f010 | 49,223 |
def flatten_requirement(requirement):
"""
Return only the package name from a requirement.
Arguments:
requirement (pkg_resources.Requirement): A requirement object.
Returns:
string: Package name.
"""
return requirement.key | 9a2e493a97763417aef41d18405c609e65e28875 | 49,224 |
from typing import Callable
from datetime import datetime
def parse_time(string: str, parser: Callable = datetime.strptime)->datetime:
"""
:param string: date and time as a string
:param parser: function to convert string to datetime
:return: datetime.datetime
"""
date_formats = ["%Y-%m-%dT%H:%M:%S.%fZ",
'%Y-%m-%dT%H:%M:%SZ']
for df in date_formats:
try:
return parser(string, df)
except ValueError:
pass
raise ValueError('Invalid time format in string %s' % string) | c8a937842cf8878a3442a53ae8fd5dc780404daf | 49,225 |
import math
def betalambda(mass, freq, w):
"""Return value of beta*lambda of beam.
Arguments:
mass(double): mc^2 of beam particle in MeV
freq(double): frequency in MHz
w(double): Kinetic energy in MeV
"""
c = 2.99792458e8 # m/s
wavelength = c / (freq * 1.0e6)
gamma = 1.0 + w / mass
beta = math.sqrt(1.0 - 1/(gamma * gamma))
return beta * wavelength | 75ac561912822bb28c6fbbefbf7a6ea930ad291f | 49,227 |
import random
import collections
def xor_encode(data, seed_key=None, encoding='utf-8'):
"""
Encode data using the XOR algorithm. This is not suitable for encryption
purposes and should only be used for light obfuscation. The key is
prepended to the data as the first byte which is required to be decoded
py the :py:func:`.xor_decode` function.
:param bytes data: The data to encode.
:param int seed_key: The optional value to use as the for XOR key.
:return: The encoded data.
:rtype: bytes
"""
if isinstance(data, str):
data = data.encode(encoding)
if seed_key is None:
seed_key = random.randint(0, 255)
else:
seed_key &= 0xff
encoded_data = collections.deque([seed_key])
last_key = seed_key
for byte in data:
e_byte = (byte ^ last_key)
last_key = e_byte
encoded_data.append(e_byte)
return bytes(encoded_data) | 7ef08439f504a6c928634ad8cceea97096d0fe80 | 49,228 |
import json
def json_dump(d: dict) -> str:
"""Dumps a dictionary in UTF-8 foprmat using json
Effectively the `json` counterpart to `orjson_dump`.
"""
return json.dumps(d, ensure_ascii=False) | 8b623a5284aa770d762eaebf7a6f90539b2e67bd | 49,229 |
import os
def get_config_dir(app_name: str) -> str:
"""Return application configuration directory.
Idea borrowed from click.utils:get_app_dir.
"""
config_dir = os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
app_folder = "-".join(app_name.lower().split(" "))
return os.path.join(config_dir, f".{app_folder}", "config.json") | aab60b8906bb624bea11072f4d45dde56a62df77 | 49,230 |
import subprocess
def _parse_text_section_bounds(chrome_binary):
"""Find out the boundary of text section from readelf.
Args:
chrome_binary: Path to unstripped Chrome binary.
Returns:
(offset, size): offset and size of text section.
Examples:
Section output looks like:
[Nr] Name Type Address Off Size ES Flg Lk Inf Al
[16] .text PROGBITS 01070000 1070000 9f82cf6 00 AX 0 0 64
With offset being the fourth field, and size being the sixth field.
"""
readelf = subprocess.check_output(
['llvm-readelf', '--sections', '--wide', chrome_binary]).splitlines()
text = [x.strip() for x in readelf if b'.text' in x]
if len(text) != 1:
raise ValueError('Expected exactly one .text section; got: %s' % text)
text_line = text[0]
fields = text_line.split()
offset, size = fields[3], fields[5]
return int(offset, 16), int(size, 16) | fc08441c5c393b14e05a456b033d615c0939e7d5 | 49,232 |
import subprocess
import re
def get_remote(target_repo, cfg_root):
"""
Get the organization/repo_name from a repo
"""
remote = cfg_root.get("remote", "origin")
cmd = ["git", "config", f"remote.{remote}.url"]
res = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=target_repo)
url = res.stdout.rstrip().decode()
m = re.match(r"^git@github.com:(?P<repo>\S+)\.git$", url)
if m is not None:
return m.group("repo")
m = re.match(r"^https://github.com/(?P<repo>\S+)\.git$", url)
if m is not None:
return m.group("repo")
raise ValueError(url) | 38be2b91476c09f7be9f8caf9680611542cac5a0 | 49,234 |
def edits0(word):
"""
Return all strings that are zero edits away (i.e. the word itself).
"""
return{word} | 06836ba7da2a02eb7d9e9d1a2d12d74fc10f95e8 | 49,235 |
def decker_sum(a, b):
"""Computationally equivalent to knuth_sum, but formally a bit cheaper.
Only works for floats though (and not arrays), and the branch make it in
fact less favorable in terms of actual speed.
"""
x = a + b
y = b - (x - a) if abs(a) > abs(b) else a - (x - b)
return x, y | 576f3f5489431a19785823d6c1d79cee5f30736d | 49,236 |
import torch
def calc_metrics_binary_val(preds, labels, criterion):
"""
return BCE, ACC (List)
"""
# method 1: cross entropy:
labels = labels.squeeze()
labels = labels > 0
labels = labels.long().unsqueeze(dim=2)
b, N, _ = labels.shape
one_hot = torch.zeros(b, N, 2).scatter_(2, labels, torch.ones(b, N, 1))
one_hot = one_hot.float()
bce = criterion(preds, one_hot)
# calculate acc:
preds_b = preds.argmax(dim=2)
one_hot_b = one_hot.argmax(dim=2)
total = torch.sum(one_hot_b == preds_b, dim=0).float()
acc = total / b
return bce, acc | d34abca722fd1a37baa668b223c7db186a944ff3 | 49,238 |
from bs4 import BeautifulSoup
def extract_html_text(text):
"""获取html中的text
extract html text
Args:
text: String, text/origin text, eg. "叉尾斗鱼介绍"
Returns:
passages: List<tuple>, 文章段落
"""
soup = BeautifulSoup(text, "html.parser")
text = soup.get_text()
passages = [t.replace("\xa0", "").replace("\n", "").replace("\t", "")+"\n" for t in text.split("\n")
if t.replace("\xa0", "").replace("\n", "").replace("\t", "")]
return passages | 0b2ef8770289ca15bb20492dfa46e5f3169947c5 | 49,239 |
def microsecs_to_sec(microsec):
"""
Given mircoseonds returns seconds
:param: microseconds
:type: integer
:returns: seconds :type: integer
"""
if type(microsec) is not int:
raise ValueError("microsec must be integer")
return float(microsec) / 1000000 | d0b27d81828c1569d56305b9906886e299485233 | 49,240 |
def media_attachment(url, content=None, options=None):
"""iOS media_attachment builder.
:keyword url: String. Specifies the URL to be downloaded by the UA
Media Attachment extension.
:keyword content: Optional dictionary. Describes portions of the
notification that should be modified if the media attachment
succeeds. See :func:`content`.
:keyword options: Optional dictionary. Describes how to display the
resource given by the URL. See :func:`options`.
"""
payload = {"url": url, "content": content, "options": options}
return {key: val for key, val in iter(payload.items()) if val is not None} | 609df92f17fcabce653c41542eaccb2e5efef3b1 | 49,241 |
def update_structure(xl, structure):
"""
:param xl: an xlrd object
"""
for sheet_name in xl.sheet_names():
sheet = xl.sheet_by_name(sheet_name)
column_names = sheet.row_values(0)
# mongodb doesn't like '.' in field names
column_names = [c.replace('.', '') for c in column_names]
structure[sheet_name] = column_names
return structure | 35fc0e5da62cb01cce5b57e253bb9f71840caad5 | 49,242 |
def getTokenToTokenPrice(orfeed_i, tokenSrc, tokenDst, dex, amount_src_token=1):
"""Get the rate of swap tokenSrc to tokenDst in a given Dex
Args:
orfeed_i (OrFeed): The instance of OrFeed class
tokenSrc (Symbol): Symbol of src token
tokenDst (Symbol): Symbol of dst token
dex (str): The Dex where the rate is going to be requested
amount_src_token (int, optional): Amount of src token. Defaults to 1 src token unit.
Returns:
Dict: Return a dict containing all relevant infos about the request
"""
res = orfeed_i.getExchangeRate(tokenSrc, tokenDst, dex, amount_src_token)
return {
"tokenSrc": tokenSrc,
"tokenDst": tokenDst,
"tokenPair": tokenSrc + "-" + tokenDst,
"provider": dex,
"price": res,
} | 1a0299f03a1e002f5c2a3a613a54443cd6ac4f07 | 49,243 |
def poly_negate_mod(op, coeff_mod):
"""Negate polynomial and modulo every coefficient with coeff_mod.
Args:
op1 (list): First polynomial (Multiplicand).
op2 (list): Second polynomial (Multiplier).
Returns:
A list with polynomial coefficients.
"""
coeff_count = len(op)
result = [0] * coeff_count
for i in range(coeff_count):
if coeff_mod == 0:
raise ValueError("Modulus cannot be 0")
if op[i] >= coeff_mod:
raise OverflowError("operand cannot be greater than modulus")
non_zero = op[i] != 0
result[i] = (coeff_mod - op[i]) & (-int(non_zero))
return result | 47c4e634f84f15e004e70b4d6ec408b8d864512b | 49,244 |
def _replace_comments(s):
"""Replaces matlab comments with python arrays in string s."""
s = s.replace('%', '#')
return s | 0420a4cc2f54fea3e2b3e35e185758cd826380c7 | 49,245 |
def get_indels_regions(read):
"""Get indel region start and end positions of a read."""
indels_blocks = []
aligned_regions = read.get_blocks()
start = read.reference_start; end = read.reference_end
indels_blocks.append((start, aligned_regions[0][0]))
for i in range(len(aligned_regions)-1):
indels_blocks.append((aligned_regions[i][1], aligned_regions[i+1][0]))
indels_blocks.append((aligned_regions[-1][1], end))
return indels_blocks | 342c48d936d7c4264d087904eba0023dd4f0610d | 49,246 |
def handle400error(ns, message):
"""
Function to handle a 400 (bad arguments code) error.
"""
return ns.abort(400, status=message, statusCode="400") | f7a0ae35337b38dfb49e6f197c8e67ab4508ead4 | 49,247 |
def mapGridPosition(densMap, atom):
"""
Returns the index of the nearest pixel to an atom, and atom mass (4 values in list form).
Arguments:
*densMap*
Map instance the atom is to be placed on.
*atom*
Atom instance.
"""
origin = densMap.origin
apix = densMap.apix
box_size = densMap.box_size()
x_pos = int(round((atom.x-origin[0])/apix,0))
y_pos = int(round((atom.y-origin[1])/apix,0))
z_pos = int(round((atom.z-origin[2])/apix,0))
if((densMap.x_size() > x_pos >= 0) and (densMap.y_size() > y_pos >= 0) and (densMap.z_size() > z_pos >= 0)):
return (x_pos, y_pos, z_pos, atom.mass)
else:
return 0 | 58b3125d17d0274db22489d75003576828545702 | 49,248 |
def p_norm(vector, p=2):
"""Solution to exercise C-1.28.
Give an implementation of a function named norm such that norm(v, p)
returns the p-norm value of v and norm(v) returns the Euclidean norm of v.
You may assume that v is a list of numbers.
"""
return sum([x ** p for x in vector]) ** (1/p) | 65c187e1d177100f69f8728b982f95f99966b5eb | 49,249 |
def get_local_keys(view, prefix):
""" Returns a dictionary of keyname -> target_list mapping for all names
that start with ``prefix`` on engines in ``view``.
"""
def get_keys_engine(prefix):
return [key for key in globals() if key.startswith(prefix)]
keys_from_target = view.apply_async(get_keys_engine, prefix).get_dict()
targets_from_key = {}
for target, keys in keys_from_target.items():
for key in keys:
targets_from_key.setdefault(key, []).append(target)
return targets_from_key | f760c92d275b9ecc5ae19cc827e288a127b2daba | 49,253 |
import torch
def ACC(output, target):
"""
calc accuracy.
:param output: [b]
:param target: [b]
:return:
"""
return float(torch.sum(torch.eq(output, target))) / (output.size()[0] + 0.0) | 9da0f8b81a5d504b03a904d45f80fecdbbd3e532 | 49,254 |
def euler_problem_68():
"""
I looked this up and found a clever pen-and-paper solution.
https://www.mathblog.dk/project-euler-68-what-is-the-maximum-16-digit-string-for-a-magic-5-gon-ring/
"""
return 6531031914842725 | 425e32b95fcbea4dc619b77f8f5f61094f635dde | 49,255 |
def check_results(table):
""" Check if the table is in the right order.
:arg table, the GtkTable to browse which is the board of the game.
"""
rows = {}
for child in table.get_children():
(col, row) = table.child_get(child, 'left-attach', 'top-attach')
if row in rows.keys():
rows[row][col] = child.get_label()
else:
rows[row] = ["", "", "", ""]
rows[row][col] = child.get_label()
return rows[0] == ['1', '2', '3', '4'] \
and rows[1] == ['5', '6', '7', '8'] \
and rows[2] == ['9', '10', '11', '12'] \
and rows[3] == ['13', '14', '15', ''] | c3812a47f5ddc50e5f47f9482865c2aec5e29fc8 | 49,256 |
def group(merge_func, tokens):
"""
Group together those of the tokens for which the merge function returns
true. The merge function should accept two arguments/tokens and should
return a boolean indicating whether the strings should be merged or not.
Helper for tokenise(string, ..).
"""
output = []
if tokens:
output.append(tokens[0])
for token in tokens[1:]:
prev_token = output[-1]
if merge_func(prev_token, token):
output[-1] += token
else:
output.append(token)
return output | 4912e4a20b2313b34617cd9dd23bd33fe5b9e4bc | 49,257 |
async def prometheus_charm(ops_test):
"""Prometheus charm used for integration testing."""
charm = await ops_test.build_charm(".")
return charm | 97f2fda5b3534d37aa9b68b18cc550b05157df1d | 49,258 |
def assign_scores(game):
"""Cleanup games without linescores to include team score
based on team totals."""
for team in game['team'].values():
if 'score' not in team and 'totals' in team:
team['score'] = team['totals']['source__B_R']
return game | 87eb0d1d4453ab5734a7ce7c10d8d8828fc34071 | 49,259 |
import uuid
def session_unified_id(session_id, session_timestamp):
"""
Returns a uuid based on the session timestamp
and random session id
"""
return uuid.uuid5(session_id, str(session_timestamp)) | 9985137c9a9860ccbd52acde2ccce46700a55d5d | 49,260 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.