content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def average_of_array(array):
"""Calculate the average value in the given array."""
total = sum(array)
count = len(array)
return total / count
|
7ea1eba9d5423de380e3876669dc1362d0d8e82a
| 68,382
|
import re
def cmdstr(cmd):
"""Get a nicely quoted shell command."""
ret = []
for arg in cmd:
if not re.match(r"^[a-zA-Z0-9/_.=-]+$", arg):
arg = f'"{arg}"'
ret.append(arg)
return " ".join(ret)
|
1256c1bb2bf01ff0081c0864080ba386ec0246e3
| 68,384
|
def avg(lis, exception=0.0):
"""Calculates the average of a list.
lis is the list that is averaged.
exception is returned if there is a divide by zero error. The
default is 0.0 because the main usage in in percentage calculations.
"""
lis = [item for item in lis if item is not None]
if len(lis) == 0:
return exception
else:
return sum(lis) / len(lis)
|
49b633231e9fb375b54bb4c9926166331a50f877
| 68,388
|
from typing import Any
from typing import Callable
from typing import Sequence
def to_dtype(inp: Any, dtype: Callable) -> Any:
"""
helper function to convert a sequence of arguments to a specific type
Args:
inp (Any): any object which can be converted by dtype, if sequence is detected
dtype is applied to individual arguments
dtype (Callable): callable to convert arguments
Returns:
Any: converted input
"""
if isinstance(inp, Sequence):
return type(inp)([dtype(i) for i in inp])
else:
return dtype(inp)
|
ca4173360c340f7e0dfe5970e3a026df0cd66231
| 68,390
|
def trivial(target):
"""
Return True if target is irreducible, False if reducible
or None if undecidable.
This trivial function checks the followings:
(1) if the constant term is zero, the polynomial is reducible.
(2) if not (1) and the degree is <= 1, the polynomial is irreducible.
"""
if not target[0]:
return False
elif target.degree() <= 1:
return True
else:
return None
|
5b43e8f19796a1a4075fddef3cf4a52254dedf85
| 68,393
|
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
|
c8efcd993139e9f21027660c587f910d8262496c
| 68,394
|
from bs4 import BeautifulSoup
def info_from_html(html, name_selector, author_selector):
"""Return a tuple with song's name and author from a page.
name_selector -> css selector for song's name
author_selector -> css selector for author's name
"""
soup = BeautifulSoup(html, 'html.parser')
name = soup.select(name_selector)[0]
author = soup.select(author_selector)[0]
return (name.get_text().strip(), author.get_text().strip())
|
06eeafa40418730a98d25a28b65740b25e1f17e0
| 68,395
|
def consolidate_bins(labels, n_bins_in, n_bins_out):
"""Consolidate bins.
Args:
labels (array): Input labels.
n_bins_in (int): Number of bins for input data.
n_bins_out (int): Number of desired output bins.
Returns:
array: Labels consolidated into the desired number of output
bins.
"""
assert n_bins_in % n_bins_out == 0, \
(f"Must choose a number of output classes ({n_bins_out}) divisible by the"
f"initial number of output classes ({n_bins_in}).")
bin_consolidation_factor = n_bins_in / n_bins_out
return labels // bin_consolidation_factor
|
aac0efd0d97754be8eb1ee78c940d5848fd02ea9
| 68,398
|
def technologies_set(projects, sorted=True):
"""return a list of unique technologies for all given projects
:param projects: list of projects as dictionaries
:type projects: list
:param sorted: whether or not to return a sorted list
:type sorted: bool
:return: list
"""
tech_set = set()
for project in projects:
tech_set.update(project['technologies'])
tech_set = list(tech_set)
if sorted:
tech_set.sort()
return tech_set
|
4c7908c32be4dd44312d0139deece27ac74f8252
| 68,399
|
def get_docserver_setup(public, stable, server, intranet, group):
"""Returns a setup for BOB_DOCUMENTATION_SERVER.
What is available to build the documentation depends on the setup of
``public`` and ``stable``:
* public and stable: only returns the public stable channel(s)
* public and not stable: returns both public stable and beta channels
* not public and stable: returns both public and private stable channels
* not public and not stable: returns all channels
Beta channels have priority over stable channels, if returned. Private
channels have priority over public channles, if turned.
Args:
public: Boolean indicating if we're supposed to include only public
channels
stable: Boolean indicating if we're supposed to include only stable
channels
server: The base address of the server containing our conda channels
intranet: Boolean indicating if we should add "private"/"public" prefixes
on the returned paths
group: The group of packages (gitlab namespace) the package we're compiling
is part of. Values should match URL namespaces currently available on
our internal webserver. Currently, only "bob" or "beat" will work.
Returns: a string to be used by bob.extension to find dependent
documentation projects.
"""
if (not public) and (not intranet):
raise RuntimeError(
"You cannot request for private channels and set"
" intranet=False (server=%s) - these are conflicting options"
% server
)
entries = []
# public documentation: always can access
prefix = "/software/%s" % group
if stable:
entries += [
server + prefix + "/docs/" + group + "/%(name)s/%(version)s/",
server + prefix + "/docs/" + group + "/%(name)s/stable/",
]
else:
entries += [server + prefix + "/docs/" + group + "/%(name)s/master/"]
if not public:
# add private channels, (notice they are not accessible outside idiap)
prefix = "/private"
if stable:
entries += [
server + prefix + "/docs/" + group + "/%(name)s/%(version)s/",
server + prefix + "/docs/" + group + "/%(name)s/stable/",
]
else:
entries += [
server + prefix + "/docs/" + group + "/%(name)s/master/"
]
return "|".join(entries)
|
6c8bb570702f781aebe7b401bcd01a536f603f84
| 68,400
|
def example_loss(tensor):
"""Sum squared entries of a tensor."""
return (tensor ** 2).view(-1).sum(0)
|
2571c6b2c353e7405d9616b2095c8c4e7b1c90ca
| 68,409
|
def find_item_by_key_in_list(item, key, list_to_search, empty_item={}):
"""
Find an item in a list by its key.
:param item: the item we're looking for
:param key: the key by which the item can be identified
:param list_to_search: list of items
:param empty_item: is returned when the item could not be found
:return:
"""
for list_item in list_to_search:
if list_item.get(key) == item:
return list_item
return empty_item
|
c183f78878737fce0cd7de38c2d5369cfc398e5d
| 68,414
|
def remove_sublists(lst):
"""
Returns a list where all sublists are removed
:param lst: list
:return: list
>>> remove_sublists([[1, 2, 3], [1, 2]])
[[1, 2, 3]]
>>> remove_sublists([[1, 2, 3], [1]])
[[1, 2, 3]]
>>> remove_sublists([[1, 2, 3], [1, 2], [1]])
[[1, 2, 3]]
>>> remove_sublists([[1, 2, 3], [2, 3, 4], [2, 3], [3, 4]])
[[1, 2, 3], [2, 3, 4]]
"""
curr_res = []
result = []
for elem in sorted(map(set, lst), key=len, reverse=True):
if not any(elem <= req for req in curr_res):
curr_res.append(elem)
r = list(elem)
result.append(r)
return result
|
f41bb66a70dc15825ce4a57b41b0f326bde4fb84
| 68,419
|
def _get_qt_qmake_config(qmake_config, qt_version):
""" Return a dict of qmake configuration values for a specific Qt version.
"""
qt_qmake_config = {}
for name, value in qmake_config.items():
name_parts = name.split(':')
if len(name_parts) == 2 and name_parts[0] == qt_version:
qt_qmake_config[name_parts[1]] = value
return qt_qmake_config
|
b417ab153f0aac75a39d2f4ad99ada8a01fcf994
| 68,420
|
def _get_attrs_items(obj):
"""Returns a list of (name, value) pairs from an attrs instance.
The list will be sorted by name.
Args:
obj: an object.
Returns:
A list of (attr_name, attr_value) pairs, sorted by attr_name.
"""
attrs = getattr(obj.__class__, "__attrs_attrs__")
attr_names = (a.name for a in attrs)
return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]
|
958b66538bfb1e96fda5c844ef87e3b684f26565
| 68,422
|
def max_margin_to_assets_ratio_to_N_to_1_leverage(max_margin_to_assets_ratio):
"""
Reverse the equation used for N_to_1_leverage_to_max_margin_to_assets_ratio .
In particular: R = (N-1)/N ==> RN = N-1 ==> 1 = N - RN
==> 1 = N(1-R) ==> N = 1/(1-R)
"""
return 1/(1-max_margin_to_assets_ratio)
|
58cc78563f6f381640ebc33c22c718ce0f207695
| 68,425
|
def read_file(filename, delimiter=None, startline=0):
"""General function to read text file into a 2D list."""
data_list = []
ifile = open(filename,'rU')
for line in ifile:
if delimiter:
data = line.split(delimiter)
else:
data = line.split()
data_list.append(data)
ifile.close()
return data_list[startline:]
|
c2e5b5abc8077534ea0e534891871223f8acfafc
| 68,432
|
def edowham(alpha_p, e_eff):
"""
Calculate the elasticity parameter of the contact problem according to
Dowson-Hamrock.
Parameters
----------
alpha_p: ndarray, scalar
The pressure-viscosity coefficient of the lubricant.
e_eff: ndarray, scalar
The effective modulus of the contact problem.
Returns
-------
param_elasticity: ndarray, scalar
The elasticity parameter of the contact problem.
"""
param_elasticity = alpha_p * e_eff
return param_elasticity
|
d7386d1c670892ee0687109c4e0a6f4441754cb8
| 68,434
|
def requires_training_wheels(f):
"""
A decorator for model functions where the model needs training first.
"""
def wrapper(model, *args, **kwargs):
if not model._trained:
raise TypeError("the model needs training first")
return f(model, *args, **kwargs)
return wrapper
|
a2b882b2709b546895b7f9f6df095ae75f5c42a9
| 68,435
|
def compute_kxy(kxx, dTx, dTy, width, length, Resistance=5000):
"""
Computes the thermal Hall conductivity aka kxy in W / Km
Parameters:
----------------------------------------------------------------------------
kxx: 1d array
The values of the longitudinal thermal conductivity.
dTx: 1d array
The values of the longitudinal temperature gradient in the sample.
dTy: 1d array
The values of the transverse temperature gradient in the sample.
width: float
The sample's width in meters
length: 1d array
The sample's length in meters
"""
length_ratio = length/width
delta_ratio = dTy/dTx
kxy = kxx*delta_ratio*length_ratio
return kxy
|
75125182a9e84cc82f94949b3d5ede1476b53ccc
| 68,436
|
import socket
def scan_port(host, port):
"""Try to establish connection to the host on a given port. If successful, the port is open."""
try:
socket.create_connection((host, port))
return "Open"
except Exception as e:
return "Closed: %s" % e
|
9fa6a2b92e20c562802ea484e96fcb6d183973e3
| 68,440
|
import csv
def csv_to_list(path, head=False):
"""csv to 2D List
Args:
path (str): csv path
head (bool, optional): Skip CSV header. Defaults to False.
Returns:
List: 2D List
"""
with open(path, "r") as f:
reader = csv.reader(f)
if head:
next(reader)
data = [row for row in reader]
return data
|
780fd83600d67107581813422f0dece445b03e9c
| 68,442
|
def check_head_water(aFlowline_in, pVertex_start_in):
"""[Check whether a vertex assoacited with a flowline is a headwater or not]
Args:
aFlowline_in ([pyflowline]): [all the flowline]
pVertex_start_in ([pyvertex]): [the vertex of interest]
Returns:
[int]: [0: not headwater; 1: is headwater]
"""
nFlowline = len(aFlowline_in)
iFlag_head_water = -1
iCount = 0
for i in range(nFlowline):
pFlowline = aFlowline_in[i]
pVerter_start = pFlowline.pVertex_start
pVerter_end = pFlowline.pVertex_end
if pVerter_start == pVertex_start_in:
iCount = iCount + 1
pass
if pVerter_end == pVertex_start_in:
iCount = iCount + 1
pass
pass
if iCount == 1:
iFlag_head_water=1
return iFlag_head_water
|
e4f0932582f5b0c53f62e0265772bc1d17fe307b
| 68,443
|
def verify_allow(value, expected):
"""
Verify Allow header methods.
"""
if value is None:
return False
if value[-1] == ",":
value = value[:-1]
methods = value.split(",")
methods = [m.strip() for m in methods]
if len(expected) != len(methods):
return False
for exp in expected:
if exp not in methods:
return False
return True
|
a697f073b5e81913d05fb2c64064f46c1dc6c39f
| 68,448
|
from typing import Union
from typing import Dict
from typing import Hashable
from typing import Any
from typing import List
from typing import Tuple
def find_key_value(
d: Union[Dict[Hashable, Any], List[Any]], key: Hashable, value: Hashable
) -> Tuple[List[Any], ...]:
"""
Find the route to key: value pairs in a dictionary.
This function works on nested dictionaries, lists, and tuples.
Parameters
----------
d
A dict or list of dicts.
key
A dictionary key.
value
A value.
Returns
-------
A tuple of routes to where the matches were found.
Examples
--------
>>> data = {
... "a": [0, {"b": 1, "x": 3}],
... "c": {"d": {"x": 3}}
... }
... find_key_value(data, "x", 3)
(['a', 1], ['c', 'd'])
"""
found_items = set()
def _lookup(obj, path=None):
if path is None:
path = ()
if isinstance(obj, dict):
if key in obj and obj[key] == value:
found_items.add(path)
for k, v in obj.items():
_lookup(v, path + (k,))
elif isinstance(obj, (list, tuple)):
for i, v in enumerate(obj):
_lookup(v, path + (i,))
_lookup(d)
return tuple([list(path) for path in found_items])
|
587660f1796baa561913ffdab5e280872831ea4b
| 68,451
|
def std_pin(tmp_path):
"""Create a standard pin file"""
out_file = tmp_path / "std_pin"
with open(str(out_file), "w+") as pin:
dat = (
"sPeCid\tLaBel\tpepTide\tsCore\tscanNR\tpRoteins\n"
"DefaultDirection\t-\t-\t-\t1\t-\t-\n"
"a\t1\tABC\t5\t2\tprotein1\tprotein2\n"
"b\t-1\tCBA\t10\t3\tdecoy_protein1\tdecoy_protein2"
)
pin.write(dat)
return out_file
|
5f0b5a61a3e0e366b84fc4c69c534c6aeef3d85c
| 68,459
|
from typing import Dict
from typing import Any
def key_strings_from_dict(dictionary: Dict[str, Any]):
"""
Returns the set of keys from a dictionary.
"""
return set(x for x in dictionary)
|
99e775cbd11995b08c4fff2b33d43d8b70ccdeda
| 68,462
|
def find_best_hit(query):
""" Takes a blast query record and returns the hit and bit score.
Also returns bit score of next best hit if any.
"""
query_id = query.query
best_hit = "No hit"
best_score = None
next_score = None
# If there are no alignments then skip
if not len(query.alignments) == 0:
# Get hit of best alignment
best_hit = query.alignments[0].title
best_score = query.alignments[0].hsps[0].bits
# If there is a 2nd hit then get bit score of that
if len(query.alignments) > 1:
next_score = query.alignments[1].hsps[0].bits
return query_id, best_hit, best_score, next_score
|
47ffcbe54223200975618d89910e75821a7c29cd
| 68,465
|
def is_prefixed(text: str, prefix: str) -> bool:
"""Check if a text (assumed to be a token value) is prefixed with a given prefix.
This is different from simple checking text.startswith(prefix),
because it also applies criteria normally applied by tokenizer to separate tokens.
E.g. "acc loop" is prefixed with "acc", but "accelerate" is not.
"""
if not text.startswith(prefix):
return False
if len(text) == len(prefix) or prefix[-1] == ':':
return True
return any(text[len(prefix):].startswith(_) for _ in (' ', '('))
|
d26032169b98d894c0e39a19129033ae1e6eab95
| 68,468
|
import six
def _build_label_filter(category, *args, **kwargs):
"""Construct a filter string to filter on metric or resource labels."""
terms = list(args)
for key, value in six.iteritems(kwargs):
if value is None:
continue
suffix = None
if key.endswith(('_prefix', '_suffix', '_greater', '_greaterequal',
'_less', '_lessequal')):
key, suffix = key.rsplit('_', 1)
if category == 'resource' and key == 'resource_type':
key = 'resource.type'
else:
key = '.'.join((category, 'label', key))
if suffix == 'prefix':
term = '{key} = starts_with("{value}")'
elif suffix == 'suffix':
term = '{key} = ends_with("{value}")'
elif suffix == 'greater':
term = '{key} > {value}'
elif suffix == 'greaterequal':
term = '{key} >= {value}'
elif suffix == 'less':
term = '{key} < {value}'
elif suffix == 'lessequal':
term = '{key} <= {value}'
else:
term = '{key} = "{value}"'
terms.append(term.format(key=key, value=value))
return ' AND '.join(sorted(terms))
|
b46b954deee59609f28bed6d7b5a4b2b820a76ca
| 68,469
|
def fastexp(a: float, n: int) -> float:
"""Recursive exponentiation by squaring
>>> fastexp( 3, 11 )
177147
"""
if n == 0:
return 1
elif n % 2 == 1:
return a*fastexp(a, n-1)
else:
t = fastexp(a, n//2)
return t*t
|
492643463bef9861db51baeb9a4e28460dc1951a
| 68,473
|
def integral_closure(x):
"""
Return the integral closure of ``x``.
EXAMPLES::
sage: integral_closure(QQ)
Rational Field
sage: K.<a> = QuadraticField(5)
sage: O2 = K.order(2*a); O2
Order in Number Field in a with defining polynomial x^2 - 5
sage: integral_closure(O2)
Maximal Order in Number Field in a with defining polynomial x^2 - 5
"""
return x.integral_closure()
|
828abb6ea0fbf748e0718e0c53ca72744027f024
| 68,478
|
def get_full_vname(namespaces: list, table_name: str, vname: str):
"""Generates the string that is the full variable name
This full variable name is what is used in the C++ code across
all of the different namespaces and if there's a table prefix.
Putting it all here reduces repeated code.
Parameters
----------
namespaces : list
The list of namespaces that belong to this parameter.
table_name : str
The "table" name for this parameter. This is typically
when there's a block of parameters that all use the same
prefix. I.e. bssn::BH1_MASS and bssn::BH2_MASS can be defined
in the template file in one table alone.
vname : str
The name of the variable (the key for the corresponding table)
Returns
-------
str
The output string of the full variable name
"""
out_str = "::".join(namespaces)
out_str += "::" + table_name + vname
return out_str
|
b05c0afd857aed67f5f09791975718a24397de43
| 68,479
|
import click
import re
def validate_ticket_arg(ctx, param, name):
"""Verify issue id format and return issue slug"""
if name is None:
raise click.BadParameter('Ticket number is required')
if re.match(r'^[a-zA-Z]+-\d+', name):
return name
raise click.BadParameter('Invalid ticket format {}'.format(name))
|
0876f49c4ad0eef40f6ba8ec12e1ef0de473fcd1
| 68,485
|
def get_unique_strings(input_strings):
"""Returns unique strings with preference given to uppercase strings."""
seen = {} # Using {} since the order is not important.
for s in input_strings:
l = s.lower()
seen[l] = min(s, seen.get(l, s))
return seen.values()
|
ccab65dbc5d7e8b9f7c0b559d5dad1c1de067c49
| 68,487
|
def _best_local_candidate(local_candidates, git_repo):
"""
Given @local_candidates, a list of LocalCandidate named tuples,
scraped from a diffscuss file, return the best candidate.
The best candidate is:
* the earliest candidate in the list where the matching line was
found
* or the earliest candidate in the list, if none of the matching
lines were found
"""
best_candidate = None
for candidate in local_candidates:
if best_candidate is None:
best_candidate = candidate
elif candidate.found_match and not best_candidate.found_match:
best_candidate = candidate
return best_candidate
|
31fa9db46f63c8e94b1d6d769e4da6c0012dea74
| 68,489
|
def get_column_counts(workbook_dict):
"""
This expects workbook dictionaries that have been cleaned i.e. have
consistent sheet names throughout. It returns a mapping from file ids to
the number of columns in its workbook.
"""
temp_column_name_map = {}
for name, workbook in workbook_dict.items():
temp_column_name_map[name] = []
for sheet in workbook:
temp_column_name_map[name].append(len(sheet['1']))
return temp_column_name_map
|
aaf18880d968024482b288a6d07baf745d96aeb4
| 68,491
|
def get_record(record_line):
"""
Split records out by dynamic position. By finding the space,
we can determine the location to split the record for
extraction. To learn more about this, uncomment the print
statements and see what the code is doing behind the scenes!
"""
# print("Line: {}".format(record_line))
split = record_line.find(' ')
# print("Split: {}".format(split))
record_id = record_line[:split]
# print("Record ID: ".format(record_id))
record_name = record_line[split + 1:]
# print("Record Name: ".format(record_name))
return record_id, record_name
|
d5bffd599cb0ed2420baf826a8b810f089c27c33
| 68,496
|
from typing import List
from typing import Optional
def searchStrInLines(stringToSearch: str, lines: List[str], exactMatch: bool = False) -> Optional[int]:
""" Return index of a first line where ``stringToSearch`` string can be found.
Otherwise, return None.
Args:
stringToSearch: string to search in lines.
lines: list of strings, where 'stringToSearch' is searched.
exactMatch: if True, only exact 'stringToSearch' string is
compared in lines. Otherwise, only string presence is checked.
"""
for lineIndex, line in enumerate(lines):
if exactMatch:
if stringToSearch == line:
return lineIndex
else:
if stringToSearch in line:
return lineIndex
return None
|
d0a24089094d1fc15b038a7ddbf8fa2a6827ba6e
| 68,499
|
import hashlib
def hash_file(file_to_hash):
"""Computes a SHA-256 hash of the specified file."""
print("Hashing " + file_to_hash + "...")
hash_algorithm = hashlib.sha256()
file = open(file_to_hash, 'rb')
while True:
contents = file.read(65536)
if not contents:
break
hash_algorithm.update(contents)
hash_str = hash_algorithm.hexdigest()
return hash_str
|
9d2a8d8ace226222121c969118e1cd2ec6dccac5
| 68,501
|
def process_value(value: str) -> str:
"""Returns a processed value for an environment variable."""
if len(value) > 0 and value[0] == value[-1] == '"':
return value[1:-1]
return value
|
4429e9bacade0acbd91e112a752e44b7b8148d6f
| 68,502
|
def get_container_properties_from_inspect(inspect, host_name):
""" Gets the container properties from an inspect object
:param inspect: The inspect object
:param host_name: The host name
:return: dict of (Docker host, Docker image, Docker container id, Docker container name)
"""
return {'Docker host': host_name,
'Docker image': inspect['Config'].get('Image', 'N/A') if 'Config' in inspect else 'N/A',
'Docker container id': inspect.get('Id', 'N/A'),
'Docker container name': inspect.get('Names', [inspect.get('Name', 'N/A')])[0]}
|
e77f7dca38d319f93d20f116b36646d8bfc11dd0
| 68,505
|
def pose_in_A_to_pose_in_B(pose_A, pose_A_in_B):
"""
Converts a homogenous matrix corresponding to a point C in frame A
to a homogenous matrix corresponding to the same point C in frame B.
Args:
pose_A: numpy array of shape (4,4) corresponding to the pose of C in frame A
pose_A_in_B: numpy array of shape (4,4) corresponding to the pose of A in frame B
Returns:
numpy array of shape (4,4) corresponding to the pose of C in frame B
"""
# pose of A in B takes a point in A and transforms it to a point in C.
# pose of C in B = pose of A in B * pose of C in A
# take a point in C, transform it to A, then to B
# T_B^C = T_A^C * T_B^A
return pose_A_in_B.dot(pose_A)
|
b2ad24ceaa70e71247aa34d55b01a4ded0c2f911
| 68,506
|
def maplist2dict(dlist):
""" Convert a list of tuples into a dictionary
"""
return {k[0]: k[1] for k in dlist}
|
2d6c03d09ad8cdb9c9d7878c5038b99a28992dde
| 68,510
|
def add_low_value_good(preferences:list, new_item:str)->list:
"""
Given preferences on m items,
add a new item that is "low valued".
:param preferences: a list of strings representing bundles of goods, in decreasing order of preference.
:param new_item: the name of the new item to add.
:return: the new preference-list.
>>> add_low_value_good(["xyz","xy","xz",""], "o")
['xyzo', 'xyz', 'xyo', 'xy', 'xzo', 'xz', 'o', '']
"""
result = []
for bundle in preferences:
result.append(bundle+new_item)
result.append(bundle)
return result
|
8f73fc30307323361530d11658eae67927199351
| 68,516
|
def _has_gene_reaction_rule(reaction):
"""Check if the reaction has a gene reaction rule."""
rule = getattr(reaction, 'gene_reaction_rule', None)
return rule is not None and rule.strip() != ''
|
aaf45147495c96ce4a987fd5a11fd5613a1e00a4
| 68,518
|
def get_point_index(i_point, spacing, origin):
"""
Convert absolute position of a point into a grid index
"""
i_pos = tuple([i_point[i] - origin[i] for i in range(len(i_point))])
i_ind = tuple([int(i_pos[i]/spacing[i]) for i in range(len(i_pos))])
return i_ind
|
8a0edae134aa43a52499afd65c14fe15f678238d
| 68,522
|
def render_boolean(value, title, show_false=False):
"""Returns a HMTL snippet which can be inserted as True/False symbol.
"""
return {
'boolean_value': value,
'title': title,
'show_false': show_false
}
|
fbc72c49f3a47e598634f16f2a27c8e2b30b960a
| 68,525
|
def alpha_cond_deph(lyambda_cond_dist, rho_cond_dist, mu_cond_dist, P_mass, n_pipe_deph, L_pipe_deph):
"""
Calculates the coefficent of heat transfer(alpha) from steam to wall of pipe.
Parameters
----------
lyambda_cond_dist : float
The thermal conducivity condensate of distilliat , [W / (m * degrees celcium)]
rho_cond_dist : float
The destiny condensate of distilliat, [kg / m**3]
mu_cond_dist : float
The viscosity condensate of distilliat, [Pa / s]
P_mass : float
The mass flow rate of distilliat, [kg/s]
n_pipe_deph : float
The number of pipes in dephlegmator, [dimensionless]
L_pipe_deph : float
The length of pipes, [m]
Returns
-------
alpha_cond_deph : float
The coefficent of heat transfer(alpha) from steam to wall of pipe, [W / (m**2 * degrees celcium)]
References
----------
Дытнерский, формула 2.24, стр.53
"""
if n_pipe_deph < 100:
return lyambda_cond_dist * 3.78 * ((rho_cond_dist**2)* n_pipe_deph * L_pipe_deph / (mu_cond_dist * P_mass))**(1/3)
if n_pipe_deph > 100:
return 0.6 * lyambda_cond_dist * 3.78 * ((rho_cond_dist**2)* n_pipe_deph * L_pipe_deph / (mu_cond_dist * P_mass))**(1/3)
|
f7c8b717ec1ceef7a54311206551a6043c06f790
| 68,528
|
def is_silence(ann):
"""Check if the given annotation is a silence.
:param ann: (sppasAnnotation)
:returns: (bool)
"""
stamp = ann.serialize_labels()
if stamp == "#" or stamp == "silence" or "gpf_" in stamp:
return True
return False
|
947940d411feccb14f6cf15ef2677f892c460284
| 68,534
|
def cli(ctx, name, owner):
"""Returns the ordered list of changeset revision hash strings that are associated with installable revisions. As in the changelog, the list is ordered oldest to newest.
Output:
List of changeset revision hash strings from oldest to newest
"""
return ctx.ti.repositories.get_ordered_installable_revisions(name, owner)
|
eb718c34e65ba38bb991c8e816d41312a5928ac6
| 68,537
|
def check_if_local_path(path):
"""
Check if path is a local path, no matter file or directory.
True:
file:///home/admin/a.txt (standard path)
/home/admin/a.txt (standard unix path)
C:\\Users\\a.txt (standard windows path)
C:/Users/a.txt (works as well)
./a.txt (relative path)
a.txt (relative path)
False:
http://www.aliyun.com/a.txt
http://www.aliyun.com/a.txt
oss://aliyun/a.txt
Args:
path (str):
Returns:
True if path is a local path.
"""
if path.startswith("file://"):
return True
return "://" not in path
|
6f4125042883c5facd0af0f0559ac8197d7644c0
| 68,542
|
def _QuotaToCell(metric, is_integer=True):
"""Returns a function that can format the given quota as usage/limit."""
def QuotaToCell(region):
"""Formats the metric from the parent function."""
for quota in region.get('quotas', []):
if quota.get('metric') != metric:
continue
if is_integer:
return '{0:6}/{1}'.format(
int(quota.get('usage')),
int(quota.get('limit')))
else:
return '{0:7.2f}/{1:.2f}'.format(
quota.get('usage'),
quota.get('limit'))
return ''
return QuotaToCell
|
7633ac9fd7511f60cc19d6e00b20a2e2ea2e87d2
| 68,543
|
def dummy(scores, query):
"""Always returns 0"""
return 0
|
0ef805137e338436140e3c30563542369ee82312
| 68,544
|
def show(name):
"""
Print out something in a pipeline without affecting the input
"""
def go(x):
print(name, '->', x)
return x
return go
|
886b8edf1b597c290517630ab0c8811d2a69b9f2
| 68,547
|
def split_org_repo(in_str):
"""Splits the input string to extract the repo and the org
If the repo is not provided none will be returned
Returns a pair or org, repo
"""
tokens = in_str.split('/', 1)
org = tokens[0]
repo = None
if len(tokens) > 1 and tokens[1]:
repo = tokens[1]
return org, repo
|
7e4fb5e5e0886acae030b4b1e3ce44a4913f36e1
| 68,549
|
def nearest_square(limit):
""" Find the largest square number smaller than limit. """
answer = 0
while (answer+1)**2 < limit:
answer += 1
return answer**2
|
119d2a3190ccbef039e53ca1eb0cd006f62b06ca
| 68,552
|
def isFinalState(state, goal):
"""
Check if a state is final
:param state: state to check
:param goal: goal state
:return: true or false
"""
return state.properties.intersection(goal.properties) == goal.properties
|
b99b1dc18f7b3d14128f02086c94f28523ed6ccd
| 68,553
|
import base64
def base64_to_hex(base64_string: str) -> str:
"""Convert base64 string to hexadecimal string
Args:
base64_string (str): Base64 string
Returns:
str: Hexadecimal representation
"""
# Add padding to ensure valid padding
return base64.b64decode(f"{base64_string}==").hex()
|
da77607dae937e994fc1d10b1a4e8c37c056dc36
| 68,554
|
def poseFromROSTransformMsg(msg):
"""
:param msg: A populated ROS Transform message.
:return: (pos, quat), where pos is a 3-element list of positions [x, y, z],
and quat is a 4-element list of quaternion elems [w, x, y, z]
"""
pos = [msg.translation.x, msg.translation.y, msg.translation.z]
quat = [msg.rotation.w, msg.rotation.x, msg.rotation.y, msg.rotation.z]
return pos, quat
|
23792434f72cd798690f74e4b049a02907d9ca48
| 68,555
|
def height_to_resolution(height):
"""Convert height to resolution."""
if not height:
return ''
if height >= 4320:
return '8K'
if height >= 2160:
return '4K'
if height >= 1080:
return '1080p'
if height >= 720:
return '720p'
return f'{height}p'
|
f12169f7d1e2f110aef98261df788e5b0de02644
| 68,556
|
def closest_points_are_within_length(targets_distance, N, length):
"""
:param targets_dist: iterable of the form [(lat, lon), dist]
:param N: strictly positive integer
:param length: positive number
:return: boolean, whether the distance from source to the N-th point in targets_dist is leq to length
"""
return targets_distance[:N][-1][1] <= length
|
1a797c7fc460b7a04d43cc582153f3a7491c81b1
| 68,561
|
def perf_index(X, base=100., axis=0, dtype=None):
""" Compute performance of prices or index values along time axis.
Parameters
----------
X : np.ndarray[dtype, ndim=1 or 2]
Time-series of prices or index values.
base : float, optional
Initial value for measure the performance, default is 100.
axis : {0, 1}, optional
Axis along wich the computation is done. Default is 0.
dtype : np.dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from `X` input.
Returns
-------
np.ndarray[dtype, ndim=1 or 2]
Performances along time axis.
See Also
--------
perf_returns, perf_strat
Examples
--------
>>> X = np.array([10., 12., 15., 14., 16., 18., 16.])
>>> perf_index(X, base=100.)
array([100., 120., 150., 140., 160., 180., 160.])
"""
return base * X / X[0]
|
8f661c65808f79487e9f6c2a67fa9be0252e177f
| 68,562
|
def _get_optional_arg(node, default, l2tobj):
"""Helper that returns the `node` converted to text, or `default`
if the node is `None` (e.g. an optional argument that was not
specified)"""
if node is None:
return default
return l2tobj.nodelist_to_text([node])
|
387baff63c2af78e80fba8695afba59331b030ab
| 68,563
|
def scanReceiptPrivateMessage(update, context):
"""Prompt the user to send their receipt."""
chat_id = update.effective_chat.id
context.bot.send_message(
chat_id=chat_id,
text=
"Please send in the receipt to be scanned! Alternatively, to cancel please type /cancelreceipt"
)
return "waitingonpicprivate"
|
c814216884ef6c5443da84d1def479ab84a90db9
| 68,568
|
def dbfilter(rows, **kwargs):
"""
Return all the `rows` that match the `key=value` conditions, where keys are DB column
names and value is a row's value.
"""
selected = []
for row in rows:
accept = True
for key, value in kwargs.items():
if key not in row or row[key] != value:
accept = False
if accept:
selected.append(row)
return selected
|
f6532073d5a64bc1026223c23bb55e27db0e00c1
| 68,578
|
import uuid
def generate_csrf_token(controller):
""" Generates a new csrf token and stores it in the session"""
session = controller.session
if '_csrf_token' not in session:
session['_csrf_token'] = uuid.uuid4()
return session['_csrf_token']
|
eea2af90f8d7f616a4cfee3ae68c656719561b52
| 68,581
|
def _init(graph):
"""
Initialize nodes' distances and predecessors and return two dicts.
graph -- a networkx graph
Create three dicts, called dist, pred and finalized, mapping nodes to
distances, predecessors, and whether or not their distances are final
respectively, initialize all distances to infinity, all predecessors to
None, all nodes as not finalized, and return the tuple:
(dist, pred, finalized)
"""
dist, pred, finalized = dict(), dict(), dict()
inf = float('inf')
for node in graph.nodes_iter():
dist[node] = inf
pred[node] = None
finalized[node] = False
return dist, pred, finalized
|
66d2d52a35c7f1514f4058cd908af402e93d1265
| 68,586
|
import gzip
def read_wet_file(wet_file, max_lines=-1):
"""
Args:
wet_file (str): path to input WET file (gz format).
max_lines (int): maximum number of lines to read.
Returns: WET file in the form of a list.
"""
output = []
with gzip.open(wet_file, mode='rt', encoding='utf-8') as f:
for i, line in enumerate(f):
output.append(line.strip())
if i > max_lines and max_lines > 0:
break
return output
|
c915b242100889ff4e4741514cda45cf1c0ea28f
| 68,594
|
def _compare_two_tensor_shapes(t1, t2):
"""Compare tensor shapes."""
if t1.shape.as_list() != t2.shape.as_list():
raise RuntimeError("Compare shape fail: base {} {} vs gc {} {}".format(t1.name, t1.shape.as_list(), t2.name, t2.shape.as_list()))
return True
|
69b27bcbc8524886b1709abb8eb17f030e3016a0
| 68,599
|
import logging
def get_logger(name="webstompy"):
"""Internal function to get logger for module
This function returns a logger for classes and functions inside this
module. When there is no global logging configured with according handlers,
a default StreamHandler with sensible formatting will be defined.
Parameters
----------
name: str
Name of the logging hierarchy to use. Caller is responsible of
generating the correct name (i.e. package.submodule.classname). In a
class, set this to f'{__name__}.{self.__class__.__name__}'.
Returns
-------
logger: :class:logging.logger
The according logger.
"""
logger = logging.getLogger(name)
if not logging.root.handlers and logger.level == logging.NOTSET:
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s - %(levelname)-8s - %(message)s "
"[%(name)s/%(funcName)s | %(threadName)s (PID:%(process)d)]",
datefmt="%H:%M:%S",
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
|
ea27e269fa7b3af03155767f9e9e72234126d080
| 68,600
|
def save_counter(counter, filepath):
"""
Method to save counter to a .tsv file
parameters
-----------
:param counter: collections.Counter
:param filepath: str (path to .tsv file)
:return: int
Number of elements in the counter
"""
with open(filepath, 'w+', encoding="utf8") as f:
i = 0
for k, v in counter:
if k.strip():
if k != '\"':
f.write("{}\t{}\n".format(k, v))
i += 1
return i
|
783c3f1e5cc78e6d1bdba5211aa4c8a3e1aa788c
| 68,601
|
def predict_hypothesis_matrix(model, X):
""" Compute the hypothesis matrix using the linear combination of
the optimal weights matrix (model) and the training inputs (Xi)
Parameters
----------
model: sparse matrix (k_values, m_values)
the optimal weight matrix that solves the linear regression
X: 2D-array shape (N_values, m_values)
the training inputs (Xi) of the data sample
Returns
-------
hypothesis_matrix: sparse matrix (N_values, k_values)
the matrix form of the predicted values for the given X
"""
hypothesis_matrix = model@X.T # result in a k by N matrix
return hypothesis_matrix.T
|
b332fc08378a5484bdce5750c5a337e6ed7529b9
| 68,604
|
def string_to_list(s):
"""
e.g. '124' --> [1, 2, 4]
"""
return list(map(int, list(s)))
|
c4e8ea51f8c7bedf8c5d69e22f02d53bc9cab7f2
| 68,606
|
def device(request):
""" Simple fixture returning string denoting the device [CPU | GPU] """
if request.config.getoption("--cpu"):
return "CPU"
else:
return "GPU"
|
9cbccec19bc4471eabba3af6558353fce117a6e3
| 68,610
|
def need_csv_representation(url):
"""
Checks if the url has a csv_representation.
All urls that need a csv representation needs to be added to
csv_representations.
Args:
url: the url to check
Returns:
is_csv: True has csv representation
"""
csv_representations = ["export/get"]
for r in csv_representations:
if r in url:
return True
return False
|
1c8da39383128faba08ac7146fd896c97afc6916
| 68,614
|
import pickle
def loadobj(filename):
"""
Load object using pickle
:type filename: str
:param filename: object to load
"""
with open(filename, 'rb') as f:
return pickle.load(f)
|
1d2a1945e2458c5ffca663eb3219f1f2744fe290
| 68,615
|
def device_to_host(request_type):
""" Check if the direction is device to host """
return (request_type & 0x80) == 0x80
|
c59cd31a6fc48066017ef274c164c3ded191830c
| 68,616
|
import zipfile
def extract_zipfile(source_zip):
"""
unzips your new zip file into a temporary directory to work in
:param source_zip: a .zip file
:return: None. should create a temp dir in the PWD then put the .zip contents in it
"""
# unzip the .zip
zip_ref = zipfile.ZipFile(source_zip, 'r')
zip_ref.extractall('./temp')
return zip_ref
|
13a9426771bb7209cda5344b36ce49cbe66590c2
| 68,626
|
def listType(l):
"""
If the type of every element of the list l is the same, this function
returns that type, else it returns None. If l is not a list, this function
will raise a ValueError.
"""
if not isinstance(l, list):
raise ValueError("l is not a list.")
if len(l) == 0:
return None
t = type(l[0])
for e in l:
if not isinstance(e, t):
return None
return t
|
40de67882bd331f4b4d15fbbd75473322c3132c9
| 68,628
|
def unpack_bitstring(string):
""" Creates bit array out of a string
:param string: The modbus data packet to decode
example::
bytes = 'bytes to decode'
result = unpack_bitstring(bytes)
"""
byte_count = len(string)
bits = []
for byte in range(byte_count):
value = int(string[byte])
for _ in range(8):
bits.append((value & 1) == 1)
value >>= 1
return bits
|
caec5f2b5375c5c24bf7321f82c434466fd63010
| 68,634
|
def report_single_attribute_elements_coroutine(future_session, connection, report_id, attribute_id, offset=0, limit=200000):
"""Get elements of a specific attribute of a specific report.
Args:
connection: MicroStrategy REST API connection object.
report_id (str): Unique ID of the report you wish to extract information
from.
attribute_id (str): Unique ID of the attribute in the report.
offset (int): Optional. Starting point within the collection of returned
results. Default is 0.
limit (int, optional): Used to control data extract behavior on datasets
which have a large number of rows. The default is 1000. As
an example, if the dataset has 50,000 rows, this function will
incrementally extract all 50,000 rows in 1,000 row chunks. Depending
on system resources, using a higher limit setting (e.g. 10,000) may
reduce the total time required to extract the entire dataset.
Returns:
Complete Future object
"""
url = connection.base_url + '/api/reports/' + report_id + '/attributes/' + attribute_id + '/elements'
future = future_session.get(url, params={'offset': offset,
'limit': limit})
return future
|
ed250f51e2f256d9ce8b1f381b919241fc914239
| 68,637
|
from typing import Union
def is_power_of_two(n: Union[int, float]):
"""
Return True if number is a power of two, supports n>1 and n<1.
Parameters
----------
n
number to check, can be float or int
Returns
-------
bool
number is power of two
"""
if n > 1:
if n != int(n):
return False
n = int(n)
return (n != 0) and (n & (n - 1) == 0)
elif n == 1:
return True
elif n > 0:
return is_power_of_two(1/n)
else:
return False
|
e10bc7c635ad72280aab3b61d3c38fb4d83b8ee5
| 68,639
|
def iphexval(ip):
"""
Retrieve the hexadecimal representation of an IP address
.. versionadded:: 2016.11.0
CLI Example:
.. code-block:: bash
salt '*' network.iphexval 10.0.0.1
"""
a = ip.split(".")
hexval = ["%02X" % int(x) for x in a] # pylint: disable=E1321
return "".join(hexval)
|
a4154fe3ebcabfe7cdba3fb1da1df5d14050e2a0
| 68,640
|
def check_url_kwargs(context, view_kwargs):
"""Return the kwargs that check_url() should be called with."""
return {
"url": context.proxied_url,
"allow_all": view_kwargs["allow_all"],
"blocked_for": context.query_params.get("via.blocked_for"),
}
|
f3960f4d8ebfc5c9ed13edd2371c53df91e88d76
| 68,641
|
import click
def validate_positive_non_zero_integer(ctx, param, value): # pylint: disable=unused-argument,invalid-name
"""Validate that `value` is a positive integer."""
if not isinstance(value, int):
raise click.BadParameter('{} is not an integer'.format(value))
if value <= 0:
raise click.BadParameter('{} is not a positive non-zero integer'.format(value))
return value
|
85ff07f73228b6b9e37bb46b5dac3bfa8ad2dd44
| 68,648
|
def median(series, n):
"""
中位数: 求series在n个周期内居于中间的数值
注意:
1. 当n为有效值但当前的series序列元素个数不足n个, 函数返回 NaN 序列
2. 对n个周期内所有series排序后, 若n为奇数, 则选择第(n + 1) / 2个为中位数, 若n为偶数, 则中位数是(n / 2)以及(n / 2 + 1)的平均数
Args:
series (pandas.Series): 数据序列
n (int): 周期
Returns:
pandas.Series: 中位数序列
Example::
例1:
# 假设最近3日的收盘价为2727, 2754, 2748, 那么当前 median(df["close"], 3) 的返回值是2748
median3 = tafunc.median(df["close"], 3)
例2:
# 假设最近4日的开盘价为2752, 2743, 2730, 2728, 那么当前 median(df["open"], 4) 的返回值是2736.5
median4 = tafunc.median(df["open"], 4)
"""
median_data = series.rolling(n).median()
return median_data
|
65bdcf08276b68a9bbe92d6f4dfefab5ec205e74
| 68,649
|
import hashlib
def get_hash(file_path):
"""Get the hash of the given file.
Args:
file_path (path): A path to a file
Returns:
HASH: A sha256 HASH object
"""
buf_size = 4096
result = hashlib.sha256()
with open(file_path, "rb") as f:
while True:
data = f.read(buf_size)
if not data:
break
result.update(data)
return result.hexdigest()
|
d8b3c5b82f4d7a4417f84073bd99c81e10591a0f
| 68,652
|
def sum_digits(number):
"""
Takes a number as input and returns the sum of the absolute value of each of the number's decimal digits.
:param number: an integer value.
:return: sum of the absolute value of each of the number's decimal digits.
"""
return sum(int(x) for x in str(abs(number)))
|
793dfe2da11a0c471ef0bf5dfbcb707c03857ea9
| 68,655
|
import logging
def log_level_to_constant(loglevel):
"""Convert human readable log level to logging constant"""
return getattr(logging, loglevel)
|
68bd2365b404979d18261b290c18349050392f53
| 68,660
|
import torch
def _mean_plus_r_var(data: torch.Tensor, ratio: float = 0, **kwargs) -> float:
"""
Caclulates mean + ratio x standard_deviation of the provided tensor
and returns the larger of this value and the smallest element in
the tensor (can happen when ratio is negative).
Parameters
----------
data: torch.Tensor
Pytorch tensor containing the data on which the mean and stdv.
is evaluated.
ratio: float, optional
Value of the scaling factor in the value calculated by the
function.
Returns
-------
float
The result of the function.
"""
return max(data.min().item(), data.mean().item() + ratio * data.std().item() + 1e-8)
|
cca49a016dee94ca1d1b977322beb7400db0bf2e
| 68,664
|
def announce_lead_changes(score0, score1, last_leader=None):
"""A commentary function that announces when the leader has changed.
>>> leader, message = announce_lead_changes(5, 0)
>>> print(message)
Player 0 takes the lead by 5
>>> leader, message = announce_lead_changes(5, 12, leader)
>>> print(message)
Player 1 takes the lead by 7
>>> leader, message = announce_lead_changes(8, 12, leader)
>>> print(leader, message)
1 None
>>> leader, message = announce_lead_changes(8, 13, leader)
>>> leader, message = announce_lead_changes(15, 13, leader)
>>> print(message)
Player 0 takes the lead by 2
"""
# BEGIN PROBLEM 6
leader = 0
if score0 < score1:
leader = 1
elif score0 > score1:
leader = 0
else:
leader = None
if last_leader == leader or leader == None:
message = None
else:
message = 'Player ' + str(leader) + ' takes the lead by ' + str(abs(score0 - score1))
return leader, message
# END PROBLEM 6
|
f6384821b8adb2ac996b9e670908d2b8ee586835
| 68,665
|
def any_public_tests(test_cases):
"""
Returns whether any of the ``Test`` named tuples in ``test_cases`` are public tests.
Args:
test_cases (``list`` of ``Test``): list of test cases
Returns:
``bool``: whether any of the tests are public
"""
return any(not test.hidden for test in test_cases)
|
f9619fdddbf9e3257a572c81cfca7d857ec27e55
| 68,666
|
def gradients_for_var_group(var_groups, gradients, name):
"""Returns a slice of `gradients` belonging to the var group `name`."""
start = 0
for group_name in sorted(var_groups.keys()):
n = len(var_groups[group_name])
if group_name == name:
return gradients[start:start+n]
start += n
return []
|
2117a96692484720f696d2f52b90496d21414f06
| 68,667
|
import json
def load_json(message: str) -> dict:
"""
Attempts to load the message as a JSON object.
Returns the JSON object if successful, otherwise None.
:param message: the input message
:returns: The JSON object (dictionary) or None
"""
return json.loads(message)
|
a112a13c258a1c4914a87a9773c3fc8f41914472
| 68,673
|
def write_steadyst_notconv_msg(nMax):
"""Return the convergence status message for writing to file."""
PrintMsg = f"\nSTATUS: NOT CONVERGED\nMAX. ITERATIONS={nMax}"
print(PrintMsg)
print()
return PrintMsg
|
302e5b5ef09fd336a154e2896bb854582a78327c
| 68,675
|
def IsKillStep(chessboard: list, mv: list) -> bool:
"""
chessboard: current chessboard info [[x, y, class], [], []...]
mv: AI move info, [x_src, y_src, x_dst, y_dst]
return:
BOOL: true if this step is kill step, false if normal step.
"""
for pc in chessboard:
if mv[2] == pc[0] and mv[3] == pc[1]:
return True
else:
continue
return False
|
fe509decb980cda84dcba7eb757002dea6c87af1
| 68,679
|
def fib_memoized(n: int) -> int:
""" Find the n-th fibonacci number with memoization"""
memory: list = [0, 1]
for i in range(2, n+1):
memory.append(memory[-1] + memory[-2])
return memory[n]
|
b00cae69a53d9d435371e7f5179603a060c7283b
| 68,681
|
def hex_to_address(val):
"""Converts hex string to a clean Ethereum address.
Accepts padded or unpadded values.
Returns a 0x formatted address (string).
"""
return "0x{}".format(val[-40:])
|
117cb55062e5c9cc6008f0ef35f6589d8c4be6e2
| 68,685
|
def fitch_score(tree, genotypes):
"""
Returns the Fitch parsimony score for the specified set of genotypes.
"""
# Use the simplest set operation encoding of the set operations.
A = {}
for allele, u in zip(genotypes, tree.tree_sequence.samples()):
A[u] = {allele}
score = 0
for u in tree.nodes(order="postorder"):
if tree.is_internal(u):
A[u] = set.intersection(*[A[v] for v in tree.children(u)])
if len(A[u]) == 0:
A[u] = set.union(*[A[v] for v in tree.children(u)])
score += 1
return score
|
147d7b3f189f82ea9051c756c94e8c9cb4d8a090
| 68,695
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.