content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import re
def extract_urls(requests):
"""
Takes driver's requests as an input,
returns a `dict` of page image URLs.
"""
urls = re.findall(r"url='(https:\/\/[^']+content[^']+pg=[A-Z]+([0-9]+)[^']+)(&w=[0-9]+)'", requests)
return {int(url[1]): url[0] + "&w=69420" for url in urls} | 96454c9b9171f6cb552c499ec71ec09ddbd9006b | 39,467 |
import numpy as np
def dr(prox_f,prox_g,dim,gamma = 1,rho = 1,Z0 = None, tol = None):
"""
Douglas-Rachford proximal splitting algorithm with two proximal
mappings as inputs
DR(prox_f,prox_g,dim) runs the Douglas-Rachford splitting algorithm for the objective function
f + g, where:
1. prox_f(Z,gamma) and prox_g(Z,gamma) are funcionts that derive the proximal mappings of
gamma*f and gamma*g evalutaed in Z.
2. dim is the dimension of the output of f and g.
Note: If f and g are convex, then X = argmin(f(X) + g(X)).
X,Z_fix,iter,D = DR(prox_f,prox_g,dim) returns:
1. Solution X
2. Z_fix = fix point of that Douglas-Rachford iterations
3. iter = total number of Douglas-Rachford iterations
4. D = Z_fix - X, which is the solution of the dual problem if f
and g are convex.
... = DR(prox_f,prox_g,dim,option) allows to specify further options:
1. ... = DR(prox_f,prox_g,dim,...,rho = rho_val,...) set the step
length update of the fix-point update, i.e.,
Z_{k+1} = Z_k + rho*(Y_k - X_k), where 0 < rho < 2.
The default value is rho = 1.
2. ... = DR(prox_f,prox_g,dim,...,gamma = gamma_val,...) sets gamma
to another value than the default gamma = 1.
3. ... = DR(prox_f,prox_g,dim,...,Z0 = Z0_val,...) sets the
initial value of the fix-point iteration.
The default choice is Z0 = 0.
4. ... = DR(prox_f,prox_g,dim,...,tol=tol_val,...)
sets the tolerance for zero entries as well as to stop the
iteratrions once norm(Y_k-X_k,'fro') < tol
The default tol-value is sqrt(eps).
"""
# Standard value for Z0 and tol
if Z0 is None:
Z = np.zeros(dim)
else:
Z = Z0
if tol is None:
tol = np.sqrt(np.finfo(float).eps)
# Initialize iteration counter
iter = 0
# Initialize X, Y and error between X and Y
X = np.zeros(dim)
Y = np.zeros(dim)
err_XY = tol+1 # Larger than the tol so that loop starts
# Set step-size for printing err_XY
err_bound = 1e-1 # Error bound step-size for display
# Compute the Douglas-Rachford iterations
while err_XY >= tol:
iter += 1 # Increase counter
# Display error between X_iter and Y_iter
if err_XY <= err_bound:
print('Error between X_'+str(iter)+' and '+'Y_'+str(iter)+' <= '+str(err_bound))
err_bound /= 10
## Compute Douglas-Rachford steps
X = prox_f(Z=Z,gamma=gamma)
# Check if X is tuple in case of multiple outputs
if isinstance(X, tuple):
X = X[0]
# Check if Y is tuple in case of multiple outputs
Y = prox_g(Z=2*X-Z,gamma=gamma)
if isinstance(Y, tuple):
Y = Y[0]
Z = Z+rho*(Y-X)
# Update iteration error ||X_k - Y_k||_F
err_XY = np.linalg.norm(X-Y,ord = 'fro')
# Set the final solution
X = Y
# Compute dual variable D
D = (Z-X)/gamma
Z[(np.absolute(Z) < 0)] = 0
# Set fix point
if iter > 0:
Z_fix = Z
else:
# Set fix point if iter = 0 and D = 0
Z_fix = X
X[np.absolute(X) < tol] = 0
return X,Z_fix,iter,D | 3b7046ae851045c200603a13320c26adf2decb0e | 39,469 |
def convertParty(partyCode):
"""
Parameters :
partyCode : Party code number for member
Returns :
Str name for party affiliation
"""
if partyCode == 100:
return "Dem"
elif partyCode == 200:
return "Rep"
else:
return "Ind" | 4d874e577e9534c6f79569299404a981d3626d3b | 39,470 |
import json
def is_json(myjson):
""" Return True if source string is valid json """
jsontype = 0
try:
json_object = json.loads(myjson)
del json_object
jsontype = 1 # Return Type of JSON
except:
pass
try:
json_object = json.loads(myjson.decode("utf-8"))
del json_object
jsontype = 2 # Return Type of JSON
except:
pass
return json | 9c4a6e031fdd413579b509a3203caca115bfd141 | 39,472 |
from uwsgidecorators import timer
def u_timer():
"""
Return uwsgi timer
"""
return timer | d3dbde08608b6cdbd829ca1bc190037bf7b59d84 | 39,473 |
def get_event_description(e):
"""Return the description field for the event."""
return e.get('description') | 96fde5fe77964e364907e6321cbd8352ee2c6bc1 | 39,474 |
def count_trees_encountered(input: list[str], x_step: int, y_step: int) -> int:
"""Count the number of trees that would be encountered by taking the
path (right 3, down 1) down the slope from [0,0] start position.
"""
tree_count = 0
x = 0
y = 0
n_steps = (len(input) - 1) / y_step
for _ in range(int(n_steps)):
x += x_step
y += y_step
x = x % len(input[0])
if input[y][x] == "#":
tree_count += 1
return tree_count | b216b3a4ee0620148c650748ace8dd6e6c703374 | 39,475 |
def get_references(name, references):
"""Generate section with references for given operator or module"""
name = name[12:] # remove nvidia.dali prefix
result = ""
if name in references:
result += ".. seealso::\n"
for desc, url in references[name]:
result += f" * `{desc} <../{url}>`_\n"
return result | c0b92763ae3f63aebb0f3157a90d97d77d6e5dcf | 39,476 |
import re
def _get_cc_list(cc_value):
"""Parse cc list.
Derived from from trac.ticket.model._fixup_cc_list (Trac-1.0).
"""
cclist = []
for cc in re.split(r'[;,\s]+', cc_value):
if cc and cc not in cclist:
cclist.append(cc)
return cclist | 04849fd0181fca968355a2368180ad066fc19a6a | 39,480 |
def make_options_bank_drop(values):
"""
Helper function to generate the data format the dropdown dash component wants
"""
ret = []
for value in values:
ret.append({"label": value, "value": value})
return ret | 3c89c73b6e3cdc6fe07a71080577320f0af6729f | 39,481 |
from typing import Iterable
def round_values(data, places=3):
"""
Round values in nested structures
Args:
data: Value / structure with values
places (int): Decimal places
Returns:
Rounded value / Structure with rounded values
"""
if isinstance(data, dict):
return {k: round_values(v, places) for k, v in data.items()}
if isinstance(data, Iterable):
return [round_values(x, places) for x in data]
return round(data, places) | 68bdcabff375b81567c0c710b55f21572e9ccda1 | 39,483 |
from typing import List
import json
def list_to_str(ls: List) -> str:
""" Dump dict to string. """
return json.dumps(ls) | e29f2fc0f6c36355c3a45ea0b2fa29cc55cdc61a | 39,484 |
def dr_evil(amount):
"""
>>> dr_evil(10)
'10 dollars'
>>> dr_evil(1000000)
'1000000 dollars (pinky)'
>>> dr_evil(2000000)
'2000000 dollars (pinky)'
"""
if amount >= 1000000:
return f"{amount} dollars (pinky)"
else:
return f"{amount} dollars" | 9f17e8af3be54f2e8e0de5de33f30aa865b4bf64 | 39,486 |
from typing import Iterable
from typing import List
def sort_unique_lex(iterable: Iterable) -> List:
"""Return list in lexically ascending order and without
duplicate entries."""
unique = set(iterable)
return sorted(unique) | 27ff573803aad34e46859b3607294f864bf6d016 | 39,487 |
def sort_by_index(names):
"""
Sort images by index
:param names: List with names
:return: List of names sorted by index
"""
names_dict = dict()
for name in names:
if "right" in name:
index = name.split("right")[1].split(".png")[0]
else:
index = name.split("left")[1].split(".png")[0]
names_dict[int(index)] = name
keys = list(names_dict.keys())
keys.sort()
sorted_images = []
for key in keys:
sorted_images.append(names_dict[key])
return sorted_images | c7ebde11d0ab6e9cb57dde16ee990e49e88ab5d2 | 39,488 |
def sequential_weighted_avg(x, weights):
"""Return a sequence by weighted averaging of x (a sequence of vectors).
Args:
x: batch * len2 * hdim
weights: batch * len1 * len2, sum(dim = 2) = 1
Output:
x_avg: batch * len1 * hdim
"""
return weights.bmm(x) | 52497ab49ec4e672631a2f277a48d4eefec36b77 | 39,489 |
def ASGDConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Implements Averaged Stochastic Gradient Descent.
It has been proposed in `Acceleration of stochastic approximation by
averaging`_.
.. _Acceleration of stochastic approximation by averaging:
https://dl.acm.org/citation.cfm?id=131098"""
argument_parser.add_argument(
"--params",
help="iterable of parameters to optimize or dicts defining parameter groups",
required=True,
)
argument_parser.add_argument("--lr", type=float, help="learning rate", default=0.01)
argument_parser.add_argument(
"--lambd", type=float, help="decay term", default=0.0001
)
argument_parser.add_argument(
"--alpha", type=float, help="power for eta update", default=0.75
)
argument_parser.add_argument(
"--t0", type=float, help="point at which to start averaging", default=1000000.0
)
argument_parser.add_argument(
"--weight_decay", type=int, help="weight decay (L2 penalty)", default=0
)
return argument_parser | 199f165af6cef84c00e8f2972660cd869364fea6 | 39,490 |
def expand_initial_state(bqm, initial_state):
"""Determine the values for the initial state for a binary quadratic model
generated from a higher order polynomial.
Args:
bqm (:obj:`.BinaryQuadraticModel`): a bqm object that contains
its reduction info.
initial_state (dict):
An initial state for the higher order polynomial that generated the
binary quadratic model.
Returns:
dict: A fully specified initial state.
"""
# Developer note: this function relies heavily on assumptions about the
# existance and structure of bqm.info['reduction']. We should consider
# changing the way that the reduction information is passed.
if not bqm.info['reduction']:
return initial_state # saves making a copy
initial_state = dict(initial_state) # so we can edit it in-place
for (u, v), changes in bqm.info['reduction'].items():
uv = changes['product']
initial_state[uv] = initial_state[u] * initial_state[v]
if 'auxiliary' in changes:
# need to figure out the minimization from the initial_state
aux = changes['auxiliary']
en = (initial_state[u] * bqm.adj[aux].get(u, 0) +
initial_state[v] * bqm.adj[aux].get(v, 0) +
initial_state[uv] * bqm.adj[aux].get(uv, 0))
initial_state[aux] = min(bqm.vartype.value, key=lambda val: en*val)
return initial_state | 36e9a30d8ccd1da452286325d0afd8ddf9e1acee | 39,492 |
import math
def hosts_from_prefixlength(prefixlength):
"""Calculate the number of hosts supported ia given prefix length.
Args:
prefixlength (int): CIDR mask to find the number of supported hosts.
Returns:
hosts (int): The number of hosts in a subnet the size of a given prefix length (minus net and bcast).
"""
hosts = math.pow(2, 32 - prefixlength) - 2
return int(hosts) | 471e801fd91b190c377fc850c9ada1c4beb71ed6 | 39,493 |
def init():
"""Return True if the plugin has loaded successfully."""
return True | 92546f837c367912e94a75b3e6152c502bbd8790 | 39,496 |
import os
import re
def get_prefix_and_epoch(params_path):
""" Get prefix and epoch from path to a mxnet parameter file.
Args:
params_path: Path to parameter file.
Return:
(prefix, epoch).
"""
file_name = os.path.basename(params_path)
tokens = re.split('-', file_name)
prefix = tokens[0]
post_tokens = re.split(r'\.', tokens[1])
epoch = int(post_tokens[0])
return (prefix, epoch) | dff1441c9ea88a1b96dbb86edbc85f85bd1cc988 | 39,497 |
def find_height_profile_dimension(dat_file):
"""Find the dimension of the provided height profile .dat file.
1D files have 2 columns, 2D - 8 columns.
Args:
dat_file (str): full path to height profile .dat file.
Returns:
dimension (int): found dimension.
"""
with open(dat_file, 'r') as f:
header = f.readline().strip().split()
dimension = 1 if len(header) == 2 else 2
return dimension | f982396e839eea3ffc5e6bde4386e07e93fc9f14 | 39,498 |
def _get_last_doc_ref(path, doc):
"""Mutate a document top-down using a path.
:param pathlib.Path path: The path.
:param dict doc: The document.
:rtype: dict
"""
for part in path.parts:
if part not in doc:
doc[part] = {}
doc = doc[part]
return doc | 316c2579897e1ce5834a2bf34fc7c5929c7868c9 | 39,501 |
from typing import Dict
from typing import List
def generate_reverse_lookup(mapping_digest: Dict[str, List[str]]) -> Dict[str, str]:
"""
:param mapping_digest: created by ext_to_int_sample_map
:return:
"""
return {
sample: participant
for participant, samples in mapping_digest.items()
for sample in samples
} | 093de8225f14c76da025f3bb909e4f4976cd9f9b | 39,505 |
def is_strictly_legal_content(content):
"""
Filter out things that would violate strict mode. Illegal content
includes:
- A content section that starts or ends with a newline
- A content section that contains blank lines
"""
if content.strip("\r\n") != content:
return False
elif not content.strip():
return False
elif "\n\n" in content:
return False
else:
return True | 829d9eef78f80f647334ec070484b9db92da0fa6 | 39,506 |
def set_var_conditional(context, condition=None, condition_var=None, compare=None, else_value=None, **kwds):
"""
Sets the given variables to provided values. Kind of like the 'with' block, only it isn't a block tag
:param context: template context (automatically provided by django)
:param kwds: named parameters with their respective values
:param condition_var: pair with compare to obtain True or False whether to use original assignment or else_value
:param compare: pair with condition_var to obtain True or False whether to use original assignment or else_value
:param condition: alternative to condition_var & compare: original assignment if truthy or else_value if falsy
:param else_value: value to be assigned to the variable(s) when condition is falsy
:return: this tag doesn't render
"""
if condition_var is not None:
condition = condition_var == compare
for k, v in kwds.items():
context[k] = v if condition else else_value
return '' | 4a4de9711dc6560ff6eb4806029b7a12980da38f | 39,508 |
import socket
def GetArpHostAliases(hst_addr):
"""This must be thread-safe"""
try:
host_name, aliases, _ = socket.gethostbyaddr(hst_addr)
except socket.herror:
host_name = hst_addr
aliases = []
return hst_addr, host_name, aliases | 0a247aaefaa79822cacdac2c3ada2cb1717964f9 | 39,509 |
import re
def gruppercase(value):
"""Correctly uppercases all Gr characters in a string"""
grletters = [u'α', u'β', u'γ', u'δ', u'ε', u'ζ', u'η', u'θ', u'ι', u'κ', u'λ', u'μ', u'ν', u'ξ', u'ο', u'π', u'ρ', u'σ', u'τ', u'υ', u'φ', u'χ', u'ψ', u'ω']
grletters_accent = [u'ά', u'έ', u'ή', u'ί', u'ό', u'ύ', u'ώ']
grletters_upper_accent = [u'Ά', u'Έ', u'Ή', u'Ί', u'Ό', u'Ύ', u'Ώ']
grletters_upper_solvents = [u'ϊ', u'ϋ']
grletters_other = [u'ς']
grletters_to_uppercase = [u'Α', u'Β', u'Γ', u'Δ', u'Ε', u'Ζ', u'Η', u'Θ', u'Ι', u'Κ', u'Λ', u'Μ', u'Ν', u'Ξ', u'Ο', u'Π', u'Ρ', u'Σ', u'Τ', u'Υ', u'Φ', u'Χ', u'Ψ', u'Ω']
grletters_accent_to_uppercase = [u'Α', u'Ε', u'Η', u'Ι', u'Ο', u'Υ', u'Ω']
grletters_upper_accent_to_uppercase = [u'Α', u'Ε', u'Η', u'Ι', u'Ο', u'Υ', u'Ω']
grletters_upper_solvents_to_uppercase = [u'Ι', u'Υ']
grletters_other_to_uppercase = [u'Σ']
grlowercase = grletters + grletters_accent + grletters_upper_accent + grletters_upper_solvents + grletters_other
gruppercase = grletters_to_uppercase + grletters_accent_to_uppercase + grletters_upper_accent_to_uppercase + grletters_upper_solvents_to_uppercase + grletters_other_to_uppercase
grkeys = dict(zip(grlowercase, gruppercase))
pattern = "|".join(grkeys.keys())
return re.sub(pattern, lambda m: grkeys[m.group()], value.upper()) | d505e5b1f3b9906c5f41c5d60ab52a9c2b6db685 | 39,510 |
def make_unique_node(graph, name):
""" Add as much postfix-'_' to `name` as necessary to make unique name for new node in `graph`.
Parameters
----------
graph : nx.Graph
graph, for which the node is created.
name : str
name of new node.
Returns
-------
Resulting name. Composed from `name` and possibly several '_'-characters.
"""
if name not in graph:
return name
ctr = 1
while True:
name_ = name + '_' * ctr
if name_ not in graph:
return name_
ctr += 1 | 8323e2fb36ca8bfe103f86bf7cf0a50dd013edfe | 39,511 |
import sys
def match_contigs(gtf_file, sizes_file):
"""Check if genome and annotation have (properly) matching contig names"""
# grab the first non-comment line and extract the first element (contig name)
with open(gtf_file, "r") as gtf:
for line in gtf:
if not line.startswith("#"):
gtf_id = line.split("\t")[0]
break
else:
sys.stderr.write(f"No genes found in {gtf_file}. Skipping sanitizing.")
return None # cannot continue
# check if contig name matches the first element in any of the genome headers
with open(sizes_file, "r") as sizes:
for line in sizes:
fa_id = line.split("\t")[0]
if fa_id == gtf_id:
return True # contig names match
return False | eacbaf92b1851f0157fad09990b596f7e7a5a2f8 | 39,512 |
def annual_standertize(data, time_dim='time', std_nan=1.0):
"""just divide by the time.month std()"""
attrs = data.attrs
std_longterm = data.groupby('{}.month'.format(time_dim)).std(keep_attrs=True)
if std_nan is not None:
std_longterm = std_longterm.fillna(std_nan)
data = data.groupby('{}.month'.format(time_dim)) / std_longterm
data = data.reset_coords(drop=True)
data.attrs.update(attrs)
return data | 9ce705da6526ba2e83b7d6a833767559681af0c0 | 39,513 |
def get_crate_from_line(line):
"""Get crate name from use statements"""
if not line.startswith("use "):
return None
if line.startswith("use crate"): # will handle this better later
return None
return line[4:-1] | 8bda2d140e5f6c9ceb61764d64ab16c25ef450da | 39,514 |
def scale_edge_weights(edge_weights, w):
""" Scales all the edge-weights described by a dictionary. """
edge_weights2 = {}
for (n1, n2) in edge_weights.keys():
edge_weights2[(n1, n2)] = w * edge_weights[(n1, n2)]
return edge_weights2 | b69e97f00943843fa117b1f0d7e51ab9dfe8a07b | 39,515 |
def compute_random_variance(dataf):
"""
Compute random variance, according to random effect model.
he Q value is a measure of the dispersion of the effect sizes.
This measure follows the chi square distribution with k-1 degrees of freedom,
where k is the total number of effect sizes.
v0 is the variance due to intrinsic sampling error, according to random effect models
See Lipsey & Wilson, 2001; Nakagawa & Cuthill, 2007
:param dataf: source dataframe
:return: return Q and v0 computations
"""
return (dataf
.assign(Q = lambda d: abs(d['wxES2']-(d['wxES']**2/d['sum_wi'])))
# For struture with only 1 effect size, the denominator of v0 will automatically
# be equal to 0. hence v0 will not be computed.
.assign(v0 = lambda d: (d['Q']-(d['k']-1))/(d['sum_wi']-
(d['sum_wi2']/d['sum_wi'])))
) | a4b1012863cbc452b907099c05a1e03bf44f7cdb | 39,517 |
from typing import Any
from typing import Dict
import yaml
def load_yaml(yaml_filepath: str, safe_load: bool = True, **kwargs: Any) -> Dict:
"""
Load a YAML file.
Parameters
----------
yaml_filepath : str
safe_load : bool, optional (default: True)
This triggers the usage of yaml.safe_load.
yaml.load can call any Python function and should only be used if the
source of the configuration file is trusted.
**kwargs : Any
Arbitrary keyword arguments which get passed to the loader functions.
Returns
-------
config : Dict
"""
with open(yaml_filepath) as stream:
if safe_load:
config = yaml.safe_load(stream)
else:
config = yaml.load(stream, **kwargs) # noqa
return config | 4d490e02c432c5866c62ba31aacad4bed69a6711 | 39,519 |
def remove_port_parameter(sw, port, col, keys):
"""Removes 'keys' in 'col' section from 'port' on 'sw'."""
cmd = "remove port %s %s %s" % (port, col, ' '.join(map(str, keys)))
return sw(cmd, shell='vsctl') | 53c2d8db4ba96987b16cea90fe192f0e07e0a0a3 | 39,520 |
def rectSet(rectList):
"""Returns a list of rect without doublons"""
toReturn = []
for rect in rectList:
if rect not in toReturn:
toReturn.append(rect)
return toReturn | 839c64a165d74ba58683870ba8ae6348a944d5f2 | 39,523 |
import argparse
def cmdline_parser():
"""
Create an argparse instance.
for inputting options.
"""
parser = argparse.ArgumentParser(description="""merges edgeR comparison files
to one csv file""")
parser.add_argument("-i", "--INPUT_FOLDER",
help="folder that has the edgeR results")
return parser | 3a3bd0e115018bf1306edf0b47f66b6b7f9260c7 | 39,524 |
import re
def remove_block(text, block_tag):
"""Remove the specified block from the template text.
A block is marked by [[block_name]]block contents[[/block_name]].
Parameters
----------
text : str
The template text to remove the block from.
block_tag : str
The name of the block to remove. We will search for [[block_tag]]contents[[/block_tag]] and remove it.
"""
return re.sub(f"\\[\\[{block_tag}\\]\\](.|\\r|\\n)*\\[\\[/{block_tag}\\]\\]", "", text, flags=re.MULTILINE) | e5fd7b15650834dccf950bd8897afca03d0b908c | 39,525 |
def unproxy(obj):
"""Return the Python interface from a proxy object"""
if hasattr(obj, "__moyapy__"):
return obj.__moyapy__()
return obj | 54de260e24faad6c2e560ea530d7875f6f8547ee | 39,526 |
def get_line_data(line):
"""get the data from a line"""
if line.old_lineno == -1:
status = "i"
elif line.new_lineno == -1:
status = "d"
else:
status = None
return {"status": status, "content": line.content} | 89daba7e26bc8bd44e24388c32198c3f08afcac8 | 39,528 |
from typing import Sequence
from typing import List
from typing import Tuple
from typing import Dict
import copy
import random
def configuration_model(
*, degrees: Sequence[int], max_trials: int = 10, max_fails: int = 1000
) -> List[Tuple[int, int]]:
"""Configuration model from degree list.
Generates undirected simple graph: no self-loops nor multiedges.
Returns empty list if not feasible.
Args:
degrees: Degree list.
max_trials: Max number of trials with this degree sequence.
max_fails: Max number of fails (not added pair) in a trial.
Returns:
adjacency: Adjacency list with tuples of pairs (n1, n2), with
n1 < n2.
Raises:
ValueError: If the sum of degrees is uneven.
"""
# check if sum of stubs is even
if sum(degrees) % 2 != 0:
err = f"Sum of degrees ({sum(degrees)}) must be even."
raise ValueError(err)
# backup stubs and edges
stubs_bu = []
edges_bu: Dict[int, List[int]] = {}
for i, el in enumerate(degrees):
aux = [i] * el
stubs_bu += aux[:]
edges_bu[i] = []
trials = 0
while trials < max_trials:
stubs = copy.copy(stubs_bu)
edges = copy.deepcopy(edges_bu)
fails = 0
while stubs:
n1 = random.choice(stubs)
aux = stubs[:]
aux.remove(n1)
n2 = random.choice(aux)
if n1 != n2 and n2 not in edges[n1]:
edges[n1].append(n2)
edges[n2].append(n1)
stubs.remove(n1)
stubs.remove(n2)
else:
fails += 1
if fails > max_fails:
trials += 1
break
adjacency = [(i, j) for i in edges for j in edges[i] if i < j]
return adjacency
return [] | 3e0c2088f3a12fdcce8347d1f3b143a3aee3dada | 39,529 |
from typing import List
from pathlib import Path
def get_filepaths(tutor_data_path: str) -> List[Path]:
"""
Get the paths of the .npy data files in the directory, with recursive
effect.
Parameters
----------
tutor_data_path : str
String representing the directory path.
Returns
-------
List
List of the paths of the files.
"""
return list(Path(tutor_data_path).rglob('*.npy')) | 46a657e100a638b841eeee8f0906b7f6bf668bec | 39,530 |
def subreadNamesToZmwCoverage(qnames):
"""From list of PacBio subread names, report number of ZMWs represented
QNAME of a PacBio subread has the following convention:
{movieName}/{holeNumber}/{qStart}_{qEnd}
We want to count the number of holes (ZMWs), because a given hole may
result in multiple subreads.
Parameters
----------
qnames : list
read names of PacBio subreads
Returns
-------
int
Number of ZMWs represented by the above subreads
"""
zmwnames = ["/".join(n.split("/")[0:2]) for n in qnames]
zmwnames = set(zmwnames)
return(len(zmwnames)) | 0a01ff4f5d5231173ad08d7f0b1cd9b4d9004f2b | 39,532 |
def song_to_slices(song, size, overlap=0):
"""Slices a song in whatever form it is into overlapping slices.
Args:
song (list): Song represented as a list (list of samples, list of
lists etc.)
size (int): Size of the slice.
overlap (int): Overlap between the slices.
Returns:
slices (list of lists): Slices.
"""
slices = []
begin = 0
while begin + size < len(song[0]):
slices.append(song[:, begin:begin + size])
begin += size - overlap
return slices | bb55bca7760886e4d80cd40d21fccffd742b1976 | 39,534 |
def prevent_sentence_boundary_detection(doc):
"""
Disable the sentence splitting done by Spacy
More info: https://github.com/explosion/spaCy/issues/1032
:param doc: a Spacy doc
:return: a disable sentence splitting Spacy doc
"""
for token in doc:
# This will entirely disable spaCy's sentence detection
token.is_sent_start = False
return doc | 92fb37cf80233c13392e7fa36c684af5b9e2d0ec | 39,535 |
def base_convert(n,b,bigendian=False):
"""Convert a decimal integer to another base."""
if b < 1:
raise Exception("Base must be greater than 1.")
if b == 1:
return [1]*n
if(n == 0):
return([0])
out = []
while(n > 0):
out.append(n%b)
n //= b
if(bigendian==True):
return(out)
return(out[::-1]) | 1977d7e5a0f94924b85e30b2c2c975270f791c14 | 39,536 |
def _classdir(klass):
"""Return a set of the accessible attributes of class/type klass.
This includes all attributes of klass and all of the base classes
recursively.
"""
names = set()
ns = getattr(klass, '__dict__', None)
if ns is not None:
names.update(ns)
bases = getattr(klass, '__bases__', None)
if bases is not None:
# Note that since we are only interested in the keys, the order
# we merge classes is unimportant
for base in bases:
names.update(_classdir(base))
return names | d1011da63d9205b5a4abf20d8877b1c20a7fea44 | 39,537 |
def pruner(data):
"""
Prune the data to remove the data that is not needed.
:param data: The data to be pruned.
:type data: dict
:return: The pruned data.
:rtype: dict
"""
new_data = {}
for k, v in data.items():
if isinstance(v, dict):
v = pruner(v)
if not v in (u"", None, {}):
new_data[k] = v
return new_data | b7cc7e6497271839aab99ee503376953b0df8e0d | 39,541 |
import os
def get_folders_date_sorted(directory):
"""
Get folders date sorted found in the directory.
Args:
directory (str): A directory path.
Returns:
list: A list of folders date sorted in the directory.
"""
mtime = lambda f: os.stat(os.path.join(directory, f)).st_mtime
return list(sorted(os.listdir(directory), key = mtime)) | 9c7fa89893f401e7c4de2f85a569f02c53dc985d | 39,545 |
import re
def match_img_type(path):
"""
Given path to group folder, determine image type for the group folder
"""
# Just check using first tif file found
f = next(path.rglob('*.tif'))
isTimelapse = True if (re.search(r'_T(\d+)_', f.name) != None) else False
isZstack = True if (re.search(r'_Z(\d+)_', f.name) != None) else False
isStitch = True if (re.search(r'_(\d{5})_', f.name) != None) else False
img_type = {'isZstack': isZstack, 'isTimelapse': isTimelapse, 'isStitch': isStitch}
# Make regex pattern where naming system is 'prefix_Timelapse_XY_Stitch_Zstack_Channel.tif'
pattern = r'(?P<prefix>\w+)'
if img_type['isTimelapse']:
pattern += r'_(?P<T>T{1}\d+)'
pattern += r'_XY(?P<XY>\d+)'
if img_type['isStitch']:
pattern += r'_(?P<stitch>\d{5})'
if img_type['isZstack']:
pattern += r'_(?P<Z>Z{1}\d+)'
pattern += r'_(?P<CH>.*).tif'
return (img_type, pattern) | 346e5b07e5df99384d2b0bbf7e27a5f4eea11e37 | 39,546 |
import uuid
def _get_random_string() -> str:
"""Create random string to be used."""
return uuid.uuid4().hex | 594bf0034c2812cfd1443152f22aa302c01cefb9 | 39,548 |
import torch
def MeanSquareError(y_out, y, squared=True):
"""
args: two pytorch tensors, one for the prediction and other for the target
fun: returns a mean squared error as tensor
"""
if squared:
# computing loss on the matrixes
loss_matrix = (y_out - y)**2
# computing the mean of the matrix
loss = torch.mean(loss_matrix)
else:
loss_matrix = torch.abs(y_out - y)
loss = torch.mean(loss_matrix)
return loss | 9b0c96a24ed996300f2b2712f616c09f095cb71e | 39,550 |
import random
def generate_address_zipcode():
"""Create random 5-digit number."""
return "{0:05d}".format(random.randint(0, 99999)) | 13fc78e7b166da370d5c90830158f39e63a804b1 | 39,551 |
def valid_field_in_graph(arch):
""" Children of ``graph`` can only be ``field`` """
for child in arch.xpath('/graph/child::*'):
if child.tag != 'field':
return False
return True | dd31bdc96c3fc829df578b864120eced37994d0d | 39,552 |
import numpy as np
def transform_to_2d(data, max_axis):
"""
Projects 3d data cube along one axis using maximum intensity with
preservation of the signs. Adapted from nilearn.
"""
# get the shape of the array we are projecting to
new_shape = list(data.shape)
del new_shape[max_axis]
# generate a 3D indexing array that points to max abs value in the
# current projection
a1, a2 = np.indices(new_shape)
inds = [a1, a2]
inds.insert(max_axis, np.abs(data).argmax(axis=max_axis))
# take the values where the absolute value of the projection
# is the highest
maximum_intensity_data = data[inds]
return np.rot90(maximum_intensity_data) | adfcd48958fd77063fd801fd390af96106f9f982 | 39,554 |
def find_sr(session):
"""Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
sr_refs = session.get_xenapi().SR.get_all()
for sr_ref in sr_refs:
sr_rec = session.get_xenapi().SR.get_record(sr_ref)
if not ('i18n-key' in sr_rec['other_config'] and
sr_rec['other_config']['i18n-key'] == 'local-storage'):
continue
for pbd_ref in sr_rec['PBDs']:
pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref)
if pbd_rec['host'] == host:
return sr_ref
return None | 1200eaac0e63c8467ecb16e8ddab948625eb2ac7 | 39,555 |
def align_corpora(old_corpus, new_corpus, remove_empty=True):
"""Takes two Corpus objects `old_corpus` and `new_corpus` and returns
a copy of `new_corpus` with the following modifications: (1) the
word to integer mapping agrees with that of `old_corpus` and (2)
words in `new_corpus` which do not appear in `old_corpus` are
removed from the corpus. Empty documents are removed.
"""
new_words = [w for w in new_corpus.words if w not in old_corpus.words]
out = new_corpus.apply_stoplist(new_words)
if remove_empty:
out.remove_empty()
int_words = out.words
words_int = old_corpus.words_int
int_int = {}
for i in range(len(int_words)):
int_int[i] = words_int[int_words[i]]
for i in range(len(out.corpus)):
out.corpus[i] = int_int[out.corpus[i]]
out.words = old_corpus.words.copy()
out._set_words_int()
return out | d65f0f18bca986bf7e2ad8ad213a8666cf239be3 | 39,556 |
def _gr_remove_ ( graph , remove ) :
"""Remove points that do not satisfy the criteria
>> graph = ...
>>> graph.remove ( lambda s : s[0]<0.0 )
"""
old_len = len ( graph )
removed = []
for point in graph :
if remove ( *graph [ point ] ) :
removed.append ( point )
ir = 0
for i in removed :
d = graph.RemovePoint ( i - ir )
if 0 <= d : ir += 1
return len ( graph ) - old_len | 96956ca9afedff4c6094f1cc47e26ca426b34fa1 | 39,557 |
def run(request):
"""run(request) - Execute module and return a string
"""
return 'sample module complete' | 9884a42bae50cf1745150dbd4cd790649beb0e84 | 39,558 |
import asyncio
def create_loop(coro_func):
"""Perform an async start of a new_sysyem, returning a promise
"""
print('Start', coro_func)
loop = asyncio.get_event_loop()
co_promise = loop.create_task(coro_func)
return loop, co_promise | 28cf316aedb4180317660e0be3a4e6f3d80bc8a0 | 39,559 |
def extract_record(item):
""" Extract and return data from a single record """
# Description and url
atag = item.h2.a
description = atag.text.strip()
url_product = 'https://www.amazon.com'+atag.get('href')
try:
# Price
price_parent = item.find('span', 'a-price')
price = price_parent.find('span', 'a-offscreen').text
except AttributeError:
return
try:
# Rank and rating
rating = item.i.text
review_count = item.find('span', {'class': 'a-size-base', 'dir': 'auto'}).text
except AttributeError:
rating = ''
review_count = ''
result = [description, price, rating, review_count, url_product]
return result | 2caafa3b2a55656c53da046ba568f49094d4cb0a | 39,561 |
def get_prob(nv1,nv2):
"""
Get the penalty
:param letter: two natural
vectors
:return: the penalty for 'A',
'C', 'T', 'G'
"""
ep = 10**(-5)
nv = [abs(x1 - x2) for (x1, x2)\
in zip(nv1, nv2)]
if sum(nv[4:8]) > ep:
prob_1 = [x1/sum(nv[4:8]) \
for x1 in nv[4:8]]
else:
prob_1 = [0,0,0,0]
if sum(nv[8:12]) > ep:
prob_2 = [x2/sum(nv[8:12]) \
for x2 in nv[8:12]]
else:
prob_2 = [0,0,0,0]
return [(x1 + x2)/2 for (x1, x2)\
in zip(prob_1, prob_2)] | f03db685e76863aaa41687eda69e100c1414f647 | 39,562 |
import os
def _extract_ssl_config(tls_config):
"""
Get the SSL-oriented parameters from the "tls" part of the configuration of a
controller, if it is present
Args:
tls_config (krake.data.config.TlsClientConfiguration): the "tls" configuration
part of a controller.
Returns:
tuple: a three-element tuple containing: the path of the certificate, its key
as stored in the config and if the client authority certificate is present,
its path is also given. Otherwise the last element is None.
"""
cert_tuple = tls_config.client_cert, tls_config.client_key, tls_config.client_ca
for path in cert_tuple:
if path and not os.path.isfile(path):
raise FileNotFoundError(path)
return cert_tuple | 5d1146b90f0a41b792e38581e26d12d3138a4e78 | 39,563 |
import re
def get_metrics_lines(lines):
"""
Return subset of lines corresponding to relevant metrics data.
"""
metrics_start = [idx for idx, l in enumerate(lines)
if re.search('^## METRICS CLASS', l)][0]
metrics_end = metrics_start + 3
return lines[metrics_start:metrics_end] | 29872090b2b847d0c4e88511b16f2a1b3deba303 | 39,564 |
import os
def opener(path, flags):
"""File opener to create files as 700 perms"""
return os.open(path, flags, 0o700) | f66339303f62928cdcd47399bc940c5a148de0f1 | 39,565 |
def get_comp_level_octo(year, match_number):
""" No 2015 support """
if match_number <= 24:
return 'ef'
elif match_number <= 36:
return 'qf'
elif match_number <= 42:
return 'sf'
else:
return 'f' | 8135531a381b55ea953051b63aeca36de593f24d | 39,566 |
def simple_validator_cmds(*args, **kwargs):
"""Used with SetSawtoothHome in integrationtools, to have more control
at the test file level over how the validator is started.
Returns:
str : The validator startup command.
"""
return ['sawtooth-validator -v'] | 28f432ff077569165844c39158ba05b215258635 | 39,567 |
from typing import Union
from pathlib import Path
import os
def is_directory_traversal(directory: Union[str, Path]) -> bool:
"""Check for directory traversal."""
cwd = os.path.abspath(os.getcwd())
requested_path = os.path.relpath(directory, start=cwd)
requested_path = os.path.abspath(requested_path)
common_prefix = os.path.commonprefix([requested_path, cwd])
return common_prefix != cwd | 4cdc7a2eb8d04fa9869c878665e5e3eabdc3e742 | 39,568 |
def curl_field_vector(x, y, z, px, py, pz):
"""calculate a 3D velocity field using vector curl noise"""
eps = 1.0e-4
offset = 100.0
def deriv(a1, a2):
return (a1 - a2) / (2.0 * eps)
# x_dx = deriv(px(x + eps, y, z), px(x - eps, y, z))
x_dy = deriv(px(x, y + eps, z), px(x, y - eps, z))
x_dz = deriv(px(x, y, z + eps), px(x, y, z - eps))
y_dx = deriv(py(offset + x + eps, y, z), py(offset + x - eps, y, z))
# y_dy = deriv(py(offset + x, y + eps, z), py(offset + x, y - eps, z))
y_dz = deriv(py(offset + x, y, z + eps), py(offset + x, y, z - eps))
z_dx = deriv(pz(x + eps, offset + y, z), pz(x - eps, offset + y, z))
z_dy = deriv(pz(x, offset + y + eps, z), pz(x, offset + y - eps, z))
# z_dz = deriv(pz(x, offset + y, z + eps), pz(x, offset + y, z - eps))
return z_dy - y_dz, x_dz - z_dx, y_dx - x_dy | 64702296612eb3245313c9e14cdb59e1eaef409e | 39,569 |
def optimal_weight(capacity, weights):
"""
Function calculate optimal_weight for rucksack from given list of weights
Args:
capacity: max capacity of rucksak
weights: list of weights
Returns:
Max possible weight that meet <= max capacity
Examples:
>>> optimal_weight(165, [23, 31, 29, 44, 53, 38, 63, 85, 89, 82])
165
"""
weight_idx = 0
possible_capacity = 0
combinations = [[0 for _ in range(capacity + 1)] for _ in range(len(weights) + 1)]
for weight_idx in range(1, len(weights) + 1):
for possible_capacity in range(1, capacity + 1):
combinations[weight_idx][possible_capacity] = combinations[weight_idx - 1][possible_capacity]
if weights[weight_idx - 1] <= possible_capacity:
val = weights[weight_idx - 1] \
+ combinations[weight_idx - 1][possible_capacity - weights[weight_idx - 1]]
if combinations[weight_idx][possible_capacity] < val:
combinations[weight_idx][possible_capacity] = val
return combinations[weight_idx][possible_capacity] | b0fa895c9604e243a2a0d1ded96a6320a2eaacc4 | 39,571 |
import itertools
def next_data(it):
"""
Advances an iterator until new data is found.
:param it: Character iterator.
:returns: Data found.
"""
quotation_mark = lambda c: c != '"'
data_begin = itertools.dropwhile(quotation_mark, it)
next(data_begin)
data = itertools.takewhile(quotation_mark, data_begin)
return ''.join(data) | 5ceb48c04cab857c7b00614537e2abac17cc82cb | 39,572 |
def treatment_link(flora_id, taxon_id):
"""Build a link to the treatment page."""
return ('http://www.efloras.org/florataxon.aspx?'
rf'flora_id={flora_id}&taxon_id={taxon_id}') | e853b1b767407f459881c716a4443795a9379f86 | 39,574 |
def dims_to_targetshape(data_dims, batch_size=None, placeholder=False):
"""Prepends either batch size/None (for placeholders) to a data shape tensor.
Args:
data_dims: list, indicates shape of the data, ignoring the batch size.
For an RGB image this could be [224, 224, 3] for example.
batch_size: scalar, indicates the batch size for SGD.
placeholder: bool, indicates whether the returned dimensions are going to
be used to construct a TF placeholder.
Returns:
shape: A tensor with a batch dimension prepended to the data shape.
"""
if batch_size is not None and batch_size != 0:
shape = [batch_size]
elif placeholder is True:
shape = [None]
else:
shape = [-1]
shape.extend(data_dims)
return shape | 30cc97cdeca53e835fc51288e943235a12269146 | 39,576 |
import os
def get_path(entry: dict, key: str = "name") -> str:
"""Get the full path to a render output from the manifest entry."""
path = entry[key]
if "output_path" in entry:
path = entry["output_path"]
if not os.path.isabs(path):
path = os.path.join(entry["output_dir"], path)
return path | 0ad380201398e7648d03aa86b3dac9038edf04f4 | 39,578 |
def format_number(number):
"""
Formats a number to a more readable format; 10000 -> 10,000
"""
if isinstance(number, int):
return '{:,d}'.format(number)
if number is None:
return 'Unknown'
return '{:3.2f}'.format(number) | 9fa26e43e86c12834b460ca68583f05782f2531c | 39,579 |
def getUserAgent():
""" Return a cool user-agent to hide Python user-agent """
useragents = [
# firefox
'Mozilla/5.0 (X11; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0',
]
return useragents[0] | fbd080cb8e6a3d7b68d7b2b92343326dc75f923f | 39,580 |
def _procBhalf(E, P, Q, order, B, half):
"""Helper function for getEPQ1 and getEPQ_pow"""
if B is not None:
P = P.dot(B)
if order == 1:
Q = Q.dot(B)
elif half:
n = P.shape[1]
if n & 1:
raise ValueError(
"`A` must have an even number of rows/cols (or use ``half=False``"
)
n = n // 2
P = P[:, :n]
if order == 1:
Q = Q[:, :n]
return E, P, Q | 157adb62bb0e6d75f36838e3122a9173a0dc0bde | 39,581 |
def _convertType(iterable, newType):
""" Converts element type within iterable.
"""
iterType = type(iterable)
return iterType([newType(i) for i in iterable]) | 923e971570dc4190ee7562a963a850da7695eabb | 39,582 |
import json
def derive_mapping_dict(obj):
"""Fetch the mapping dict from an object
:param obj: The mapping object
:type obj:
:return: Mappings dict
:rtype: `dict`
"""
if obj:
pdict = dict(obj.__dict__)
pdict.pop("_sa_instance_state", None)
# If output is present, json decode it
if pdict.get("mappings", None):
pdict["mappings"] = json.loads(pdict["mappings"])
return pdict | b11881c733d9ef3c8e1f9a4475b65138225c10d9 | 39,584 |
def avoidhexsingularity(rotation):
""" Avoid rotation of exact multiples of 15 degrees to avoid NaN's in hextransformEE().
Parameters
----------
rotdegrees : rotation in degrees int or float
Returns
----------
replacement value for rotation with epsilon = 1.0e-12 degrees added.
Precondition before using rotationdegrees in Affine2d for hex geometries
"""
diagnostic = rotation/15.0 - int(rotation/15.0)
epsilon = 1.0e-12
if abs(diagnostic) < epsilon/2.0:
rotation_adjusted = rotation + epsilon
else:
rotation_adjusted = rotation
return rotation_adjusted | e830669751216245ac6684c37392a3a0afeea9b0 | 39,585 |
import argparse
def create_args_parser():
"""Creates an argparse parser with two arguments: 1)
a yaml format file containing model parameter input,
and 2) an optional json dictionary string that can override that input.
This function is intended to work in concert with the :func:`repast4py.parameters.init_params`
function where the results of the argparse argument parsing are passed as arguments to
that function.
Examples:
>>> parser = create_args_parser()
...
>>> args = parser.parse_args()
>>> params = init_params(args.parameters_file, args.parameters)
"""
parser = argparse.ArgumentParser()
parser.add_argument("parameters_file", help="parameters file (yaml format)")
parser.add_argument("parameters", nargs="?", default="{}", help="json parameters string")
return parser | 532312321740a503785a952467bb0b58fc4befb8 | 39,586 |
def main():
"""
The main function to execute upon call.
Returns
-------
int
returns integer 0 for safe executions.
"""
print("Program to print multiplication table of a number.")
number = float(input("Enter the number: "))
num_elements = int(input("Number of elements: "))
print("Multiplication table of", number)
for i in range(1, num_elements+1):
print(number, "×", i, "=", number * i)
return 0 | 755fafe4bdcf957ff2e25e4b77d6cf7ec9a85400 | 39,587 |
from pathlib import Path
def filesize(fname):
"""
Simply returns the size of the file fname in bytes
"""
return (Path(fname).stat().st_size) | 1cf2cb0fbab2533e69c5200b25a134ee6dd61424 | 39,588 |
def sort_proxy_stats_rows(proxy_stats, column):
"""
Sorts proxy statistics by specified column.
Args:
proxy_stats: A list of proxy statistics.
column: An int representing a column number the list should be sorted by.
Returns:
A list sorted by specified column.
"""
return sorted(proxy_stats, key=lambda p: p[column], reverse=False) | 8fd6dd57bbb893aebfd282e4bf0718eee9d8b383 | 39,590 |
from typing import List
def are_distinct(data: List[int]) -> bool:
"""Returns True if all numbers in data are different from each
other; otherwise, False.
Naive implementation.
"""
checked = []
for k in data:
if k in checked:
return False
checked.append(k)
return True | 100c3fc2a369d609a7b0c63995feacf63c76af99 | 39,591 |
import pickle
def load_pickle(file_name):
""" data = load_pickle(file_name)
loads a pickle with core_data dictionaries
assumes first entry is a list of all following data names
returns dictionary of data
"""
pkl_file = open(file_name,'rb')
#names = safe_unpickle.loadf(pkl_file)
names = pickle.load(pkl_file)
data_dict = dict.fromkeys(names,[])
for key in names:
#data_dict[key] = safe_unpickle.loadf(pkl_file)
data_dict[key] = pickle.load(pkl_file)
pkl_file.close()
return data_dict | 41a57e5d0d177748a884530229c5cc5d61f5df81 | 39,592 |
def __getpixel(image, x, y):
""" Get pixel from image at given (x, y) coordinate.
If given coordinate is out of range, returns a non-stroke pixel.
Arguments:
image -- The PIL image object.
x -- X coordinate of the pixel.
y -- Y coordinate of the pixel.
"""
if x >= 0 and x < image.size[0] and y >=0 and y < image.size[1]:
return image.getpixel((x, y))
else:
return (255, 255, 255) if image.mode == 'RGB' else 255 | 662bc721586c7c49172eb6f3f6619336c17ab055 | 39,593 |
def first(container):
"""
Returns the first element in a set. Even with the overhead of the function
call, this is faster than next(iter(container)).
"""
for elem in container:
return elem | b8c4b2b7547f42fa263743a471f2a53542bacba9 | 39,594 |
import subprocess
from sys import path
def close_simulation() -> int:
"""kill all simulators"""
reply = int(subprocess.check_call(str(path) + "/cleanup.sh"))
return reply | cbf9fad381586bf8d6fe1166b40b0b81042119c6 | 39,596 |
def selections(node):
"""Return tree of field name selections."""
nodes = getattr(node.selection_set, 'selections', [])
return {node.name.value: selections(node) for node in nodes} | 56272663cc787ae54c457234772060088e0bbb51 | 39,598 |
def word_to_bag(word):
""" Convert word to bag-of-chars. """
return ''.join(sorted(set(word))) | a31346d604ed2868c8bd722f5e004288724a9397 | 39,599 |
def load_rfam_urs_accessions_from_file(urs_acc_list):
"""
Loads all existing Rfam URS accessions in a python
dictionary
urs_acc_list: A .txt file with all URS accession already
in Rfam
return: A python dictionary with all URS accessions as
keys.
"""
rfam_urs_accs = {}
fp = open(urs_acc_list, 'r')
for line in fp:
accession = line.strip()
if accession not in rfam_urs_accs:
rfam_urs_accs[accession] = ""
fp.close()
return rfam_urs_accs | 14f41a6241cbf7e8138a44f5e86a5b7a50df4edb | 39,601 |
def convert_decimal_to_other_system(num, system, max_length):
"""This is a custom converter which converts a decimal into a string that constains a number of the desired system.
It could even be used for other projects as well."""
final = ""
carry = num
for i in range(0, max_length).__reversed__():
exp = system**i
number = carry // exp
rest = carry % exp
carry = rest
final += str(number)
return final | 76a669ea55a7998295cd72c4bb5b7d726696cf0a | 39,602 |
import math
def degrees(rad_angle) :
"""Converts and angle in radians to degrees, mapped to the range [-180,180]"""
angle = rad_angle * 180 / math.pi
#Note this assume the radians angle is positive as that's what MMTK does
while angle > 180 :
angle = angle - 360
return angle | df366e3eef93f0f51feca48b83072bf8b13eba78 | 39,603 |
def audit(log):
"""
Single method to ensure that the log object is an audit log (by binding
the audit log param)
:param log: a bound log object
:returns: a bound log object with keyword that specifies it as an audit
log already bound
"""
return log.bind(audit_log=True) | 54b25800392c49426000a4144401c51834a35848 | 39,604 |
import threading
import socket
def setup_client_style_socket(ip_addr, port_number):
"""Return a client-style socket once it connects to an already expecting server-style socket."""
print("\nBeginning setup_client_style_socket()")
f = open("Beginning_setup_client_style_socket()"+str((ip_addr, port_number))+str(threading.current_thread().ident), "w+")
f.close()
# Create client-style socket
client_sock_for_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_sock_for_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Connect to already listening server-style socket of other agent on another machine
conn_result = client_sock_for_server.connect_ex((str(ip_addr), port_number))
if conn_result == 0: # this call requires server socket to be already listening to accept
print_msg = "Connected successfully as client to: " + str((ip_addr, port_number)) +" with socket: " + str(client_sock_for_server)
print(print_msg)
f = open("Connected_successfully_as_client_to_"+str(ip_addr)+':'+str(port_number), "w+")
f.close()
else:
print("Failed to connect as the client to: ", (ip_addr, port_number))
f = open("Failed_to_connect_as_client_to_"+str(ip_addr)+':'+str(port_number)+"_conn_result_"+str(conn_result), "w+")
f.close()
return client_sock_for_server | 2ff5e0faebcaec5921f9890ca505622f223ec252 | 39,605 |
def conditions(ctx):
"""
Tests for stack to verify that our glitch worked
ie. a fault occured during the AES computation
"""
buff = bytes(ctx['stack'][192:208])
if buff.hex() == "3ad77bb40d7a3660a89ecaf32466ef97":
return False
else:
return True | 1eddf4d0e22d4cbcc58348f073e84db5ec82bc52 | 39,606 |
from pydantic import BaseModel # noqa: E0611
def get_model_defaults(model_class: BaseModel):
"""Return the default values for fields in a Pydantic BaseModel.
If a field doesn't have a default then return None.
Default values may also be None.
Returns
-------
dict
"""
return {x: y.get("default") for x, y in model_class.schema()["properties"].items()} | f68483fbb59f0fa44365ac04ffd4514e4518efe7 | 39,607 |
def _create_rest_error_output(error_message, error_code):
"""creates rest service error output"""
response = {
"success": "false",
"data": {},
"error": {
"code": error_code,
"message": error_message
}
}
return response | bed746083b7a42e898853e1fdd20e22caf976794 | 39,608 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.