content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def concat_to_address(ip, port):
"""
ip: str for address to concat, like "127.0.0.1"
port: str for port, like "2379"
return: str like "127.0.0.1:2379"
return None if ip or port is None
"""
if ip is None or port is None:
return None
return ip.strip() + ":" + port.strip()
|
6300064966bbafe549c42df4a8507c861ba5acad
| 47,441
|
def split_function_name(fn):
"""
Given a method, return a tuple containing its fully-qualified
class name and the method name.
"""
qualname = fn.__qualname__
if '.' in qualname:
class_name, fn_name = qualname.rsplit('.', 1)
class_name = '%s.%s' % (fn.__module__, class_name)
else:
class_name = fn.__module__
fn_name = qualname
return (class_name, fn_name)
|
0f525d93afdf72269da303c13b69cc8f29aa0661
| 47,442
|
import array
def toarr( data ):
"""Converts a string or byte array to a byte array.
"""
if isinstance( data, array.array ):
return data
else:
return array.array( 'B', data )
|
78c386c5efb124b6bc28d7e6980004076d4c220f
| 47,443
|
import errno
def _GetUploadTrackerData(tracker_file_name, logger):
"""Reads tracker data from an upload tracker file if it exists.
Args:
tracker_file_name: Tracker file name for this upload.
logger: for outputting log messages.
Returns:
Serialization data if the tracker file already exists (resume existing
upload), None otherwise.
"""
tracker_file = None
# If we already have a matching tracker file, get the serialization data
# so that we can resume the upload.
try:
tracker_file = open(tracker_file_name, 'r')
tracker_data = tracker_file.read()
return tracker_data
except IOError as e:
# Ignore non-existent file (happens first time a upload is attempted on an
# object, or when re-starting an upload after a
# ResumableUploadStartOverException), but warn user for other errors.
if e.errno != errno.ENOENT:
logger.warn('Couldn\'t read upload tracker file (%s): %s. Restarting '
'upload from scratch.', tracker_file_name, e.strerror)
finally:
if tracker_file:
tracker_file.close()
|
ef769da5a2e27e5279e7519d622670cc5b7eaaf7
| 47,447
|
def names_from_results(response):
"""Returns card names from results as a list"""
return [x["name"] for x in response.json()["results"]]
|
c879b2cdb8f78150e50be3e115a5103992e93b79
| 47,450
|
import configparser
def _get_token(filename='token.cfg', key_ring='openweathermap'):
"""
read in API token
Parameters
----------
filename : str
local file with API token
key_ring : str
dictionary key, appearing within [] in token file
Returns
-------
str
API token
"""
parser = configparser.ConfigParser()
parser.read(filename)
return parser[key_ring]['token']
|
b5eabd3d222fa2cffae936e5fe38fa3cf312c30d
| 47,454
|
def _fits_indexhdus(hdulist):
"""
Helper function for fits I/O.
Args:
hdulist: a list of hdus
Returns:
dictionary of table names
"""
tablenames = {}
for i in range(len(hdulist)):
try:
tablenames[hdulist[i].header['EXTNAME']] = i
except(KeyError):
continue
return tablenames
|
031aed6610eacdaecc9215822746b7e70e083d92
| 47,455
|
from typing import List
import struct
def device_number_to_fields(device_number: int) -> List[int]:
"""
Splits the device number (16 bits) into two bytes
Example with device number 1000
Full bits: 0b0000001111101000
First 8 bits: 0b00000011 == 3
Last 8 bits: 0b11101000 == 232
This function will return [232, 3] because the byte order is little endian (least significant byte first)
"""
return [byte for byte in struct.pack('<H', device_number)]
|
f22f277d9cb5ff8fadf6eaf15b5c8fb92e20b543
| 47,458
|
from typing import List
def is_using_stdin(paths: List[str]) -> bool:
"""Determine if we're going to read from stdin.
:param paths:
The paths that we're going to check.
:returns:
True if stdin (-) is in the path, otherwise False
"""
return "-" in paths
|
9551c149dabdf1ca2ead2d74ed534f57fc5ea4ab
| 47,459
|
def pass_quality_filter(s,cutoff):
"""
Check if sequence passes quality filter cutoff
Arguments:
s (str): sequence quality scores (PHRED+33)
cutoff (int): minimum quality value; all
quality scores must be equal to or greater
than this value for the filter to pass
Returns:
Boolean: True if the quality scores pass the
filter cutoff, False if not.
"""
cutoff = chr(cutoff + 33)
for c in s:
if c < cutoff:
return False
return True
|
781a5e3bea1ed20fc0f28fe16f6aa90a57d3372a
| 47,460
|
def best_of_gen(population):
"""
Syntactic sugar to select the best individual in a population.
:param population: a list of individuals
:param context: optional `dict` of auxiliary state (ignored)
>>> from leap_ec.data import test_population
>>> print(best_of_gen(test_population))
[0 1 1 1 1] 4
"""
assert (len(population) > 0)
return max(population)
|
b7efcb8d6a961843d88fe1864d129a1dc502ea33
| 47,461
|
import string
import random
def generatePRandomPW(pwlen=16, mix_case=1):
"""Generate a pseudo-random password.
Generate a pseudo-random password of given length, optionally
with mixed case. Warning: the randomness is not cryptographically
very strong.
"""
if mix_case:
chars = string.ascii_letters + string.digits
else:
chars = string.ascii_lowercase + string.digits
pw = ''
for i in range(0, pwlen):
pw += random.choice(chars)
return pw
|
bccc7b185f5e742d309a2e761a527c6b0bdccc6f
| 47,463
|
def default_sum_all_losses(dataset_name, batch, loss_terms):
"""
Default loss is the sum of all loss terms
"""
sum_losses = 0.0
for name, loss_term in loss_terms.items():
loss = loss_term.get('loss')
if loss is not None:
# if the loss term doesn't contain a `loss` attribute, it means
# this is not used during optimization (e.g., embedding output)
sum_losses += loss
return sum_losses
|
d12eaa926ae5adbbb023acf316b9bf7854cdbd84
| 47,465
|
def _compute_segseg_intersection(segment1, segment2):
"""Algorithm to compute a segment to segment intersection.
Based on this article:
https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
:param segment1: first segment (defined by two endpoints)
:type segment1: list
:param segment2: second segment (defined by two endpoints)
:type segment2: list
:return: intersection point (p_x, p_y), if it exists
:rtype: tuple or None
"""
(x1, y1), (x2, y2) = segment1
(x3, y3), (x4, y4) = segment2
# Check for parallel lines
denominator = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4)
if denominator == 0:
return None
t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / denominator
u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / denominator
# Check for intersections outside of the segments
if (t < 0 or t > 1) or (u < 0 or u > 1):
return None
p_x = x1 + t * (x2 - x1)
p_y = y1 + t * (y2 - y1)
return (p_x, p_y)
|
e30f4227f499ce7eb3adab7674a46f2bdb05b0a5
| 47,466
|
def sortRemoveDupes(lst):
"""Sort the list, and remove duplicate symbols.
"""
if len(lst) == 0:
return lst
lst.sort()
lst = [lst[0]] + [lst[i] for i in range(1, len(lst))
if lst[i] != lst[i - 1]]
return lst
|
8f50d6aeb706330302112064492761e97e2c1935
| 47,470
|
def mul_by_num(num):
"""
Returns a function that takes one argument and returns num
times that argument.
>>> x = mul_by_num(5)
>>> y = mul_by_num(2)
>>> x(3)
15
>>> y(-4)
-8
"""
def h(x):
return num * x
return h
|
174859f0db6aabb0ece1bbd9b5a7fbe6e98b1253
| 47,471
|
import string
def remove_punctuation(input_string):
"""Return a str with punctuation chars stripped out"""
for element in input_string:
if element in string.punctuation:
input_string = input_string.replace(element, '')
return input_string
|
2efb60ca06ba61ff45d2ad45f554b4a0fc971947
| 47,472
|
def get_node_set(g1,g2,method="union"):
"""
Returns the set of nodes that have to be considered in counting
transitions of the Markov chains. The input for the keyword
argument `method` controls the method used.
"""
if (method=="intersection"):
nodes = list(set(g1.nodes()) & set(g2.nodes()))
else:
nodes = list(set(g1.nodes()) | set(g2.nodes()))
return nodes
|
ccecc822cc72eccaf7bc73f4b3512d8fa79519d6
| 47,477
|
def _clean_listlike(string: str) -> list:
"""Removes commas and semicolons from SQL list-like things. i,e id, number --> ['id', 'number'] """
cols = []
for item in string:
# Check if item is in list, or if user adds ; to the end of the query
if item[-1] == ',' or item[-1] == ';' or item[-1] == '\n':
cols.append(item[:-1])
else:
cols.append(item)
return cols
|
b7c92177982f7656a9d96d03ba6892e2d71056ac
| 47,479
|
def to_seconds(hours, minutes, seconds):
"""Returns the amount of seconds in the given hours, minutes, and seconds."""
return hours*3600+minutes*60+seconds
|
bdfe64f2f261a70a4af8a63a2fb984c7b08127f1
| 47,489
|
import copy
def modify_tree_with_weights(tree, weights):
"""
Given an ete3 Tree object and a dictionary where keys are node names in the tree and values are multipliers (can
be generated with read_weights_file), returns a new tree where each branch in the weights dictionary is multiplied
by the multiplier specified.
:param tree: an ete3.Tree object
:param weights: Dictionary where keys are names of nodes/tips in the tree, and values are weights by which branch lengths will be multiplied
:return: A new ete3.Tree where branch lengths have been modified.
"""
newtree = copy.deepcopy(tree)
for node in weights:
# Make sure that we can actually find the node, and that more than one branch doesn't have the same name.
branch = newtree.get_leaves_by_name(node)
if len(branch) != 1:
raise AttributeError('The branch {} either could not be found in your tree or was found more than once. '
'Please verify your tree/weights dictionary and try again.'.format(node))
else:
branch[0].dist *= weights[node]
return newtree
|
75da028d5f9fe8a55e94bd7b6074cc109ec2d79e
| 47,495
|
def rotate_voxel(xmin, ymin, xmax, ymax):
"""
Given center position, rotate to the first quadrant
Parameters
----------
xmin: float
low point X position, mm
ymin: float
low point Y position, mm
xmax: float
high point X position, mm
ymax: float
high point Y position, mm
returns: floats
properly rotated voxel in the first quadrant
"""
xc = 0.5 * (xmin + xmax)
yc = 0.5 * (ymin + ymax)
if xc >= 0.0 and yc >= 0.0: # no rotation
return (xmin, ymin, xmax, ymax)
if xc < 0.0 and yc >= 0.0: # CW 90 rotation
return (ymin, -xmax, ymax, -xmin)
if xc < 0.0 and yc < 0.0: # CW 180 rotation
return (-xmax, -ymax, -xmin, -ymin)
# xc > 0.0 && yc < 0.0: # CW 270 rotation
return (-ymax, xmin, -ymin, xmax)
|
5291043c5cd8447d44c846953a68d11488ae60dd
| 47,496
|
import attr
def attrib(*args, **kwargs):
"""Extend the attr.ib to include our metadata elements.
ATM we support additional keyword args which are then stored within
`metadata`:
- `doc` for documentation to describe the attribute (e.g. in --help)
Also, when the `default` argument of attr.ib is unspecified, set it to
None.
"""
doc = kwargs.pop('doc', None)
metadata = kwargs.get('metadata', {})
if doc:
metadata['doc'] = doc
if metadata:
kwargs['metadata'] = metadata
return attr.ib(*args, default=kwargs.pop('default', None), **kwargs)
|
bb7f48919a666eb362860f8fe965030d0fc8bc0e
| 47,500
|
def pop(self, i):
"""
Remove the item at the given position in the list, and return it.
If no index is specified, a.pop() removes and returns the last item in the list.
"""
return self.list_output.pop(i)
|
b5c99ff7c0ec14fe39babd2a3e274a9355cffd38
| 47,501
|
import uuid
def new_aid() -> str:
"""Create a new, unique ari entry id."""
return uuid.uuid4().hex
|
d274c7616a525bda2062758622b3180f1a44b621
| 47,502
|
def are_all_0(lists, index):
"""Check if the values at the same index in different list are all to 0.
:param list lists: a list of lists to check the value in.
:param int index: the index of the values to check in the lists.
:returns: True if all the values at the index in the lists are set to 0, False if at least one of them is not 0.
"""
for l in lists:
if l[index] != 0:
return False
return True
|
1fe4f8777618eed459907b2995170691be639e5b
| 47,506
|
def modify(boxes, modifier_fns):
""" Modifies boxes according to the modifier functions.
Args:
boxes (dict or list): Dictionary containing box objects per image ``{"image_id": [box, box, ...], ...}`` or list of bounding boxes
modifier_fns (list): List of modifier functions that get applied
Returns:
(dict or list): boxes after modifications
Warning:
These modifier functions will mutate your bounding boxes and some of them can even remove bounding boxes.
If you want to keep a copy of your original values, you should pass a copy of your bounding box dictionary:
>>> import copy
>>> import brambox.boxes as bbb
>>>
>>> new_boxes = bbb.modify(copy.deepcopy(boxes), [modfier_fns, ...])
"""
if isinstance(boxes, dict):
for _, values in boxes.items():
for i in range(len(values)-1, -1, -1):
for fn in modifier_fns:
values[i] = fn(values[i])
if values[i] is None:
del values[i]
break
else:
for i in range(len(boxes)-1, -1, -1):
for fn in modifier_fns:
boxes[i] = fn(boxes[i])
if boxes[i] is None:
del boxes[i]
break
return boxes
|
386cb12b0b985a3702d0fe6a3dc7a38e712c7dc1
| 47,508
|
def remap_keys(key_func, d):
"""
Create a new dictionary by passing the keys from an old dictionary through a function.
"""
return dict((key_func(key), value) for key, value in d.items())
|
be7125b7bab735522e684d766c75b4745a8c11b3
| 47,510
|
import re
def calculatedNormalisedDataForLines(lines):
""" Get normalised data for the lines of the file.
This function is intended as an example.
With the help of the function the velocity data of the file are normalized to the absolute value
of 1 to be able to measure the profile later with a individual current factor.
The parser for the line content is developed as an example for both HUDDSCOL.txt and NYCCCOL.txt.
The decimal separator is a dot and the column separator is a tab.
The data structure required by the measureProfile() method is returned. The data is structured
like the following example:
[{"time": 0, "value": 0.1},{"time": 1, "value": 0.4},{"time": 2, "value": 0.3}]
The structure is an array with a dictionary for each step. The dictionary has two keys:
time: The time point of the value.
value: The value, what the value is whether voltage, current or other is specified in the
measureProfile() method.
:param lines: Array with the data lines of the file as string.
:returns: Explained data structure.
"""
maxValue = 0
normalisedData = []
seperatorRegex = re.compile(r"([0-9,.]+)[\W]+([0-9,.]+)")
for line in lines:
linematch = seperatorRegex.match(line)
if linematch != None:
data = dict()
data["time"] = float(linematch.group(1))
value = float(linematch.group(2))
if abs(value) > abs(maxValue):
maxValue = value
data["value"] = value
normalisedData.append(data)
for data in normalisedData:
"""
Normalisation to the biggest Value from -1 to 1.
"""
data["value"] = data["value"] / abs(maxValue)
return normalisedData
|
adbde503fa0152da6ffcd7eaec985f21636d4f6e
| 47,511
|
def is_time_invariant(ds):
"""Test if the dataset is time-invariant (has no time coordinate)
Args:
ds (xarray.Dataset or xarray.DataArray): Data
Return:
bool : True if no 'time' coordinate detected, False otherwise
"""
return 'time' not in list(ds.coords.keys())
|
06254dc660171f34f911ab4e99b9d14e7a789751
| 47,524
|
def merge_dict(d1, d2):
"""Merge two dictionaries, i.e. {**d1, **d2} in Python 3.5 onwards."""
d12 = d1.copy()
d12.update(d2)
return d12
|
26c1e1700873c40bec46f5513c7eb5dbc0595325
| 47,526
|
def crystal_search(crystals, histogram_type):
"""Creating a dictionary of values list divided into centering
types for a given histogram type.
Parameters
----------
crystals : list
A list of crystal.
histogram_type : unicode str (on py3)
Type of histogram e.g. 'a', 'gamma'.
Returns
-------
crystal_dict : dict
A dict of values lists
key- type centering
value - list
"""
crystal_dict = {}
for crystal in crystals:
crystal_dict.setdefault(crystal['centering'], []).append(
crystal[histogram_type])
return crystal_dict
|
406ff81a3865a594e43cb95e5e92330617af48df
| 47,531
|
def mean(ls):
"""
Takes a list and returns the mean.
"""
return float(sum(ls))/len(ls)
|
213632c6b905317175dbecbbf4f175392451af2e
| 47,532
|
def is_iscsi_uid(uid):
"""Validate the iSCSI initiator format.
:param uid: format like iqn.yyyy-mm.naming-authority:unique
"""
return uid.startswith('iqn')
|
8670c7970e1ee5e077de3de02f2fb754fa4352aa
| 47,533
|
from typing import cast
from typing import Iterable
import itertools
def _test_for_equality_nestedly_and_block_implicit_bool_conversion(
o1: object, o2: object
) -> bool:
"""test objects, or sequences, for equality. sequences are tested recursively. Block
implicit conversion of values to bools.
>>> import methodfinder
>>> methodfinder._test_for_equality_nestedly_and_block_implicit_bool_conversion(1,1)
True
>>> methodfinder._test_for_equality_nestedly_and_block_implicit_bool_conversion(1,2)
False
>>> methodfinder._test_for_equality_nestedly_and_block_implicit_bool_conversion([1,2,3],[1,2,3])
True
>>> methodfinder._test_for_equality_nestedly_and_block_implicit_bool_conversion([1,2,3],[2,1,3])
False
>>> methodfinder._test_for_equality_nestedly_and_block_implicit_bool_conversion(1,True)
False"""
try:
# if they have iterators, no exception will be thrown
# take 100 elements from them. any user of methodfinder
# will not be putting in more than 100 elements
# if it's not an iterator, an exception will be thrown
o1_iter = cast(Iterable[object], o1)
o2_iter = cast(Iterable[object], o2)
for e1, e2 in itertools.zip_longest(
itertools.islice(o1_iter, 100), itertools.islice(o2_iter, 100)
):
if not _test_for_equality_nestedly_and_block_implicit_bool_conversion(
e1, e2
):
return False
return True
except:
# since at least one of the objects does not have an iterator,
# just test for equality normally.
# test that the types are the same to suppress implicit
# conversion of values to bools, which returns
# way too many useless results for the purpose of methodfinder
return (type(o1) == type(o2)) and (o1 == o2)
|
351fb24ec20f8967559ecfff54d1608213c04f8b
| 47,539
|
def _get_duration_in_seconds(selected_duration):
"""
Converts hours/minutes to seconds
Args:
selected_duration (string): String with number followed by unit
(e.g. 3 hours, 2 minutes)
Returns:
int: duration in seconds
"""
num_time, num_unit = selected_duration.split(' ')
if num_unit == 'hours':
num_time = int(num_time) * 3600
elif num_unit == 'minutes':
num_time = int(num_time) * 60
return int(num_time)
|
0f17f3f4ed678dfb9fdf0d4ed819cb2308311981
| 47,541
|
def check_sentence_quality(left_match_right):
"""
Take a tuple with the left and right side of the matched word
and check a few conditions to determine whether it's a good example or not
Args:
left_match_right (tuple): a tuple of three strings: the left side
of the NKJP match, the match itself (in [[baseform|match]] form)
and the right side
Returns:
int: 0 for bad quality, 1 for good quality
"""
joined_sentence = ''.join(left_match_right[:3])
# the proportion of upper case letters to all letters is too high
allowed_uppercase_proportion = 0.1
if sum(1 for c in joined_sentence if c.isupper())/len(joined_sentence) > allowed_uppercase_proportion:
return 0
#too many titlecase words
allowed_titlecase_proportion = 0.4
if sum(1 for c in joined_sentence.split() if c[0].isupper())/len(joined_sentence.split()) > allowed_titlecase_proportion:
return 0
# the sentence is too long
allowed_length = 200
minimum_length = 60
if len(joined_sentence) > allowed_length:
return 0
if len(joined_sentence) < minimum_length:
return 0
# there are too many newlines (most likely a list)
allowed_newlines = 3
if joined_sentence.count('\n') > allowed_newlines:
return 0
return 1
|
7a50e860d251e2f2ed2fd1f1c7e7ea406a7a4043
| 47,543
|
def toggle_legend_collapse(_, is_open):
"""Open or close legend view.
:param _: Toggle legend btn was clicked
:param is_open: Current visibility of legend
:return: New visbility for legend; opposite of ``is_open``
:rtype: bool
"""
return not is_open
|
50aeccdeb79dde4a6941a94cdd05ee77494cd1b9
| 47,546
|
def multiples(m, n):
"""
Builds a list of the first m multiples of the real number n.
:param m: an positive integer value.
:param n: an real integer number.
:return: an array of the first m multiples of the real number n.
"""
return [n * x for x in range(1, m + 1)]
|
3a182d95dafa1d56ce120ff1d04f108f9d9c5e37
| 47,547
|
import re
def get_version_string(init_file):
"""
Read __version__ string for an init file.
"""
with open(init_file, 'r') as fp:
content = fp.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
content, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string in %s.' % (init_file))
|
22a9faa0d6686106de7d5cb736762859cc9c483a
| 47,549
|
import re
def compute(sample: str, substr: str) -> list[int]:
"""
Given two strings s and t, t is a substring of s if t is contained as a contiguous collection of symbols in s (as a result, t must be no longer than s). The position of a symbol in a string is the total number of symbols found to its left, including itself (e.g., the positions of all occurrences of 'U' in "AUGCUUCAGAAAGGUCUUACG" are 2, 5, 6, 15, 17, and 18). The symbol at position i of s is denoted by s[i].
:param sample: A string.
:param substr: A string that is equal to or shorter than sample string.
:return: A list of integers representing the start position of the occurrence of substr in string sample.
"""
return [m.start() + 1 for m in re.finditer(f'(?={substr})', sample)]
|
ed3a157fa74e953e56d2ed1d13e0b757da23c135
| 47,552
|
def block_combine(arr, nrows, ncols):
"""Combine a list of blocks (m * n) into nrows * ncols 2D matrix.
Arguments:
arr {3D np.array} -- A list of blocks in the format:
arr[# of block][block row size][block column size]
nrows {int} -- The target row size after combination.
ncols {int} -- The target column size after combination.
Returns:
2D np.array -- Combined matrix.
Raise:
ValueError -- The size of `arr` is not equal to `nrows * ncols`.
"""
if arr.size != nrows * ncols:
raise ValueError(f'The size of arr ({arr.size}) should be equal to '
f'nrows * ncols ({nrows} * {ncols})')
_, block_nrows, block_ncols = arr.shape
return (arr.reshape(nrows // block_nrows, -1, block_nrows, block_ncols)
.swapaxes(1, 2)
.reshape(nrows, ncols))
|
e6330606ea63eb16faee305d646b60b8327785ac
| 47,557
|
import math
def get_interactions_stats(S_edgelist, embedding, target_adjacency):
""" Interactions are edges between chains that are connected in
the source adjacency.
Args:
S (iterable):
An iterable of label pairs representing the edges in the source graph.
embedding (dict):
Mapping from source graph to target graph as a dict of form
{s: {t, ...}, ...}, where s is a source-model variable and t
is a target-model variable.
target_adjacency (dict/:class:`networkx.Graph`):
Adjacency of the target graph as a dict of form {t: Nt, ...}, where
t is a variable in the target graph and Nt is its set of neighbours.
Returns:
*stats:
Max, min, tota, average, standard deviation
"""
total = 0
max_inters = 0
N = len(embedding)
min_inters = float('inf')
# Get max min and total
interactions_dict = {}
for edge in S_edgelist:
(u, v) = edge
available_interactions = {(s, t) for s in embedding[u] for t in embedding[v] if s in target_adjacency[t]}
if not available_interactions:
raise ValueError("no edges in target graph between source variables {}, {}".format(u, v))
num_interactions = len(available_interactions)
interactions_dict[(u,v)] = num_interactions
total += num_interactions
if num_interactions > max_inters:
max_inters = num_interactions
if num_interactions < min_inters:
min_inters = num_interactions
# Get avg and standard deviation
avg_inters = total/N
sum_deviations = 0
for (u, v), num_interactions in interactions_dict.items():
deviation = (num_interactions - avg_inters)**2
sum_deviations += deviation
std_dev = math.sqrt(sum_deviations/N)
return max_inters, min_inters, total, avg_inters, std_dev
|
69b6f818b25a2cb164cf3e2d6f24a80fc7a73efc
| 47,568
|
def fklist(self, kpoi="", lab="", **kwargs):
"""Lists the forces at keypoints.
APDL Command: FKLIST
Parameters
----------
kpoi
List forces at this keypoint. If ALL (default), list for all
selected keypoints [KSEL]. If KPOI = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may also be substituted for KPOI.
lab
Force label to be listed (defaults to ALL). See the DOFSEL command
for labels.
Notes
-----
Listing applies to the selected keypoints [KSEL] and the selected force
labels [DOFSEL].
This command is valid in any processor.
"""
command = f"FKLIST,{kpoi},{lab}"
return self.run(command, **kwargs)
|
dfcb4547520d8840763ba11fd4f993ffee70279d
| 47,570
|
def taken(diff):
"""Convert a time diff to total number of microseconds"""
microseconds = diff.seconds * 1_000 + diff.microseconds
return abs(diff.days * 24 * 60 * 60 * 1_000 + microseconds)
|
64e8022bb0a80fcccc1a755fffb79121e61cad17
| 47,571
|
from typing import List
from typing import Counter
def remove_rare(sentences: List[List[str]]) -> List[List[str]]:
"""
Remove rare words (those that appear at most once) from sentences.
Parameters
----------
sentences:
List of tokenized sentences.
"""
counts: Counter = Counter()
for sentence in sentences:
counts.update(sentence)
return [[word for word in sentence if counts[word] > 1] for sentence in sentences]
|
1af60b7bb0393abf99db02abf6f4fea9d9529c15
| 47,572
|
def _general_direction(model_rxn1, model_rxn2):
"""
picks the more general of the two directions from reactions passed in
"""
r1d = model_rxn1.get_direction()
r2d = model_rxn2.get_direction()
if r1d == r2d:
return r1d
else:
return '<=>'
|
362ee4a6f033323328f869ef6e5650cc9fef9fa3
| 47,574
|
def read_table(filename, usecols=(0, 1), sep='\t', comment='#', encoding='utf-8', skip=0):
"""Parse data files from the data directory
Parameters
----------
filename: string
Full path to file
usecols: list, default [0, 1]
A list of two elements representing the columns to be parsed into a dictionary.
The first element will be used as keys and the second as values. Defaults to
the first two columns of `filename`.
sep : string, default '\t'
Field delimiter.
comment : str, default '#'
Indicates remainder of line should not be parsed. If found at the beginning of a line,
the line will be ignored altogether. This parameter must be a single character.
encoding : string, default 'utf-8'
Encoding to use for UTF when reading/writing (ex. `utf-8`)
skip: int, default 0
Number of lines to skip at the beginning of the file
Returns
-------
A dictionary with the same length as the number of lines in `filename`
"""
with open(filename, 'r') as f:
# skip initial lines
for _ in range(skip):
next(f)
# filter comment lines
lines = (line for line in f if not line.startswith(comment))
d = dict()
for line in lines:
columns = line.split(sep)
key = columns[usecols[0]].lower()
value = columns[usecols[1]].rstrip('\n')
d[key] = value
return d
|
81e70a1db8530940d73cf8242b791c3cab473b9c
| 47,585
|
import time
def time_it(func, *args, **kwargs):
"""Benchmarks a given function."""
start = time.time()
res = func(*args, **kwargs)
print(f'{func.__name__} t: {time.time() - start:.{8}f} s')
return res
|
07023c77f29ca03171ac8f725e41212f605bcdaf
| 47,586
|
def define_limit_offset(request):
""" Define limit and offset variables from request.args """
if request.args:
try:
limit = int(request.args['limit'])
offset = int(request.args['offset'])
except:
# Default limit and offset
limit = 12
offset = 0
else:
# Default limit and offset
limit = 12
offset = 0
return (limit, offset)
|
31ef7fbc70ec67c0d646024c580b591238cfecbf
| 47,591
|
def _dot(fqdn):
"""
Append a dot to a fully qualified domain name.
DNS and Designate expect FQDNs to end with a dot, but human's conventionally don't do that.
"""
return '{0}.'.format(fqdn)
|
53ee7c41dab6b88523a68fd1ee387031c0314eb1
| 47,594
|
def intersect1D(min1, max1, min2, max2):
"""
Return the overlapping state on a 1 dimensional level.
:param int/float min1:
:param int/float max1:
:param int/float min2:
:param int/float max2:
:return: Overlapping state
:rtype: bool
"""
return min1 < max2 and min2 < max1
|
2ad6b9926614b3785aab2a28ee2afb088170d2b9
| 47,597
|
def add(g, start, end):
"""
Add edge information into g
@type: g, graph (2D array)
@param: g, adjacency list
@type: start, integer
@param: start, start vertex point for edge
@type: end, integer
@param: end, end vertex point for edge
"""
g[start].append(end)
g[end].append(start)
return g
|
30032f20717034dce57b1e5adfb09e45ce2614be
| 47,601
|
def file_size(file, unit):
"""
Convert the size from bytes to other units like KB, MB or GB
Adapted from:
https://thispointer.com/python-get-file-size-in-kb-mb-or-gb-human-readable-format/
"""
base = 1024
if unit == 'KB':
size = file.size/base
elif unit == 'MB':
size = file.size/(base**2)
elif unit == 'GB':
size = file.size/(base**3)
else:
size = file.size
return f'{round(size, 2)} {unit}'
|
cab5c01470489c126470c0c5179bb3da8d30072b
| 47,602
|
def read_mappings_from_dict(index_mapping):
"""
Read event_class and event_type mappings from a python dict.
"""
evclass = [[], [], []]
evtype = [[], [], []]
for v in index_mapping['classes']:
evclass[0].append(v[0])
evclass[1].append(v[1])
evclass[2].append(v[2])
for v in index_mapping['types']:
evtype[0].append(v[0])
evtype[1].append(v[1])
evtype[2].append(v[2])
return evclass, evtype
|
cdf2706fed3cdf5786cc238e090401eeae99a8c2
| 47,608
|
import torch
def top_p_filter(
logits: torch.Tensor,
top_p: float,
min_tokens_to_keep: int,
is_probs: bool = False
) -> torch.Tensor:
"""Helper function for nucleus sampling decoding, aka. top-p decoding.
:param logits:
:param top_p:
:param min_tokens_to_keep:
:param is_probs:
:return:
"""
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# keep at least min tokens
sorted_indices_to_remove[..., :min_tokens_to_keep - 1] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(
1, sorted_indices, sorted_indices_to_remove
)
if is_probs:
scores = logits.masked_fill(indices_to_remove, 0.0)
else:
scores = logits.masked_fill(indices_to_remove, -float("Inf"))
return scores
|
7e54e5cc87afa4eb90ca7316e48b947f9647f210
| 47,610
|
def table_to_report(table, measure):
"""Return a report by taking arguments table and measure.
Args:
table: an instance generated by c.fetchall(). table is a list of tuples
and each tuple has only two values.
measure: a unit of measure in string format.
Returns:
A report in the following form, where the first vaule in an above said
tuple is before dash, the second value in the tuple is after dash, then
a space followed by measure:
'first value in tuple' - 'second value in tuple' 'measure'
Example:
John Smith - 1000 views
Sun will also rise tomorrow - 10 clicks
"""
report = ''
for i in table:
report += '{} - {}{}\n'.format(i[0], i[1], measure)
return report
|
e3417b159cd7b826c856697d48c34aecd1a81719
| 47,612
|
def has_code(line: str) -> bool:
"""
Return True if there's code on the line
(so it's not a comment or an empty line).
"""
return not line.strip().startswith("#") or (line.strip() == "")
|
ef0975ee21deda1206a1bfc728f47d1119132c70
| 47,614
|
def PairsFromGroups(groups):
"""Returns dict such that d[(i,j)] exists iff i and j share a group.
groups must be a sequence of sequences, e.g a list of strings.
"""
result = {}
for group in groups:
for i in group:
for j in group:
result[(i, j)] = None
return result
|
f449612579e54e1365da459d936e633f38d0ceac
| 47,615
|
def set_focus_on_entry(entry):
"""sets focus on a given entry"""
entry.focus()
return "break"
|
91657ec5613a95f11965f495b38248ca2ef9e23f
| 47,617
|
def _version_to_tuple(version):
"""Converts the version string ``major.minor`` to ``(major, minor)`` int tuple."""
major, minor = version.split('.')
return (int(major), int(minor))
|
921a3855fd23a597f13dab27f660cf4b0113926b
| 47,620
|
def parseExtn(extn=None):
"""
Parse a string representing a qualified fits extension name as in the
output of `parseFilename` and return a tuple ``(str(extname),
int(extver))``, which can be passed to `astropy.io.fits` functions using
the 'ext' kw.
Default return is the first extension in a fits file.
Examples
--------
::
>>> parseExtn('sci, 2')
('sci', 2)
>>> parseExtn('2')
('', 2)
>>> parseExtn('sci')
('sci', 1)
"""
if not extn:
return ('', 0)
try:
lext = extn.split(',')
except:
return ('', 1)
if len(lext) == 1 and lext[0].isdigit():
return ("", int(lext[0]))
elif len(lext) == 2:
return (lext[0], int(lext[1]))
else:
return (lext[0], 1)
|
ddbc2c3e16161431ce458eb2ff441ab8a21145ee
| 47,622
|
def _report_body(*, image: str, repo: str, run: str, stacktrace: str) -> str:
"""Format the error report."""
return (
f"Repo: {repo}\n"
f"Run URL: {run}\n"
f"Image ID: {image}\n"
f"Stacktrace:\n```py\n{stacktrace}\n```\n"
)
|
a81ea924078f0225aba3ab441aef42c3393118cb
| 47,637
|
import importlib
import time
def time_algo(call_string, module_name):
"""
Times the execution of a python call string.
:param call_string: str string that calls a python module and executes an
algorithm
:param module_name: str name of module from which function is called
:return run_time: float time in seconds required to execute python call
string
"""
module = importlib.import_module(module_name)
start = time.time()
exec(call_string)
finish = time.time()
run_time = finish - start
return run_time
|
f24e708c04a765487b3c009b7ef5f9929e4c885b
| 47,638
|
def compute_columns(n_items, n_rows):
"""Compute the required number of columns given a number of items
n_items to be displayed in a grid n_rows x n_cols"""
if n_rows > n_items:
return n_items, 1
d = n_items // n_rows
n_cols = d + (1 if n_items % n_rows else 0)
return n_rows, n_cols
|
dbc4a87d0d335055ea8f8b6115289b94cb15a655
| 47,640
|
def resolve_from_path(path):
"""Resolve a module or object from a path of the form x.y.z."""
modname, field = path.rsplit(".", 1)
mod = __import__(modname, fromlist=[field])
return getattr(mod, field)
|
fffda7518a78c72a441547116f2b33ed459adb05
| 47,642
|
import math
def svo_angle(mean_allocation_self, mean_allocation_other):
"""
Calculate a person's social value orientation angle (based on the slider measure).
params: A mean allocation to self and a mean allocation to other, based on the six primary items of the SVO slider
returns: The person's social value orientation angle
effects: None
"""
# With the default values of the slider measure, the origin is at 0,0 but the center of the circle is at 50,50
# By subtracting 50 from both mean allocations we compute the angle from the center of the circle
return math.degrees(math.atan2(mean_allocation_other - 50, mean_allocation_self - 50))
|
0b3f39309c44d6e3fee893debb54ef00786d321e
| 47,643
|
def get_hosts(path, url):
"""
Creates windows host file config data
:param path:
:param url:
:return: string
"""
info = """
# host for %%path%%
127.0.0.1\t%%url%%
""".replace("%%url%%", url).replace("%%path%%", path)
return info
|
5e606c6dd36706f5c0fe1e492143231c4bd229ee
| 47,648
|
def _normalized_import_cycle(cycle_as_list, sort_candidates):
"""Given an import cycle specified as a list, return a normalized form.
You represent a cycle as a list like so: [A, B, C, A]. This is
equivalent to [B, C, A, B]: they're both the same cycle. But they
don't look the same to python `==`. So we normalize this list to
a data structure where different representations of the cycle
*are* equal. We do this by rearranging the cycle so that a
canonical node comes first. We pick the node to be the node in
cycle_as_list that is also in sort_candidates. If there are
multiple such nodes, we take the one that's first alphabetically.
We assume a simple cycle (that is, one where each node has only
one incoming edge and one outgoing edge), which means that each
node only occurs here once, so the sort order is uniquely defined.
"""
sort_elts = [node for node in cycle_as_list if node in sort_candidates]
if not sort_elts: # probably impossible, but best to be safe
sort_elts = cycle_as_list
min_index = cycle_as_list.index(min(sort_elts))
# The weird "-1" here is because A occurs twice in the input
# cycle_as_list, but we want min_elt to occur twice in the output.
return tuple(cycle_as_list[min_index:-1] + cycle_as_list[:min_index + 1])
|
328046069999c8f3f960cc526b277a21c7daab5d
| 47,650
|
def expand_features_and_labels(x_feat, y_labels):
"""
Take features and labels and expand them into labelled examples for the
model.
"""
x_expanded = []
y_expanded = []
for x, y in zip(x_feat, y_labels):
for segment in x:
x_expanded.append(segment)
y_expanded.append(y)
return x_expanded, y_expanded
|
5cd5dbe18285fdcbc633809cd95dde17e32dd82b
| 47,651
|
def add_extension(file_name, ext='py'):
"""
adds an extension name to the file_name.
:param file_name: <str> the file name to check for extension.
:param ext: <str> add this extension to the file name.
:return: <str> file name with valid extension.
"""
if not file_name.endswith(ext):
return file_name + '.{}'.format(ext)
return file_name
|
123a8a01c70cd3bf98f189c9582796fcdfa97ee3
| 47,654
|
def get_url_with_query_params(request, location, **kwargs):
"""
Returns link to given location with query params.
Usage:
get_url_with_query_params(request, location, query_param1=value1, query_param2=value2)
returns:
http(s)://host/location/?query_param1=value1&query_param2=value2&...
"""
url = request.build_absolute_uri(location)
if kwargs:
url += "?"
for key, value in kwargs.items():
url += f"{key}={value}&"
return url
|
350003c0c86ff80db5f70ba16de6d86edadbf6e4
| 47,659
|
def is_hex_digit(char):
"""Checks whether char is hexadecimal digit"""
return '0' <= char <= '9' or 'a' <= char <= 'f'
|
172ae4a57bd77e7e33237bec77831417e961babd
| 47,660
|
def subtracttime(d1, d2):
"""Return the difference in two dates in seconds"""
dt = max(d1, d2) - min(d1, d2)
return 86400 * dt.days + dt.seconds
|
bae7668ef9b593c7ebe072d789e1b298f81cda3e
| 47,667
|
def removeWords(answer):
"""Removes specific words from input or answer, to allow for more leniency."""
words = [' town', ' city', ' island', ' badge', 'professor ', 'team ']
answer = answer.lower()
for word in words:
answer = answer.replace(word, '')
return answer
|
088debb26571dc31415591e5026972b057987229
| 47,671
|
def _adjust_n_months(other_day, n, reference_day):
"""Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided.
"""
if n > 0 and other_day < reference_day:
n = n - 1
elif n <= 0 and other_day > reference_day:
n = n + 1
return n
|
e7a46f8923fb57985e3f32a1130f34e703a58627
| 47,674
|
def mcf_to_boe(mcf=0, conversion_factor=6):
"""Converts mcf to barrels of oil equivalent using the standard 6 mcf/boe conversion factor."""
return (mcf/conversion_factor)
|
e7f7b984ec0e537512cf2b926c72c25c83a3507b
| 47,675
|
def check_won (grid):
"""return True if a value>=32 is found in the grid; otherwise False"""
for i in range(4):
for j in range(4):
if grid[i][j] >= 32:
return True
return False
|
f93751aa8073bc3e1b3965bd598a17f0c98da967
| 47,680
|
import re
def any_char_matches(substring: str, mainString: str):
"""Scans the string for any matches a certain pattern.
Parameters
----------
substring : str
The string that is used to find matches from `mainString`.
mainString : str
The `mainstring` which contains the original string.
Returns
-------
is_matching : bool
Returns `True` if the `substring` matches with the `mainSting` else `False`.
"""
is_matching = bool(re.search(substring, mainString))
return is_matching
|
b0db76f9f7ed34cd45ba118c758944afbb7b0090
| 47,681
|
def getDens(mCM):
"""
return density based on type(mCM['dens'])
mCM['dens'] can be a number or a function
Parameters:
mCM, multi-component material dictionary - see init
"""
if type(mCM['dens']) in [float, int]:
dens = mCM['dens']
else:
dens = mCM['dens'](mCM)
return dens
|
60e6baa70f5c6cd90cf4578bd9f348e947d18979
| 47,683
|
def parse_sqlplus_arg(database):
"""Parses an sqlplus connection string (user/passwd@host) unpacking the user, password and host.
:param database: sqlplus-like connection string
:return: (?user, ?password, ?host)
:raises: ValueError
when database is not of the form <user>/<?password>@<host>
"""
try:
credentials, host = database.split('@')
if '/' in credentials:
user, password = credentials.split('/')
else:
user = credentials
password = None
return (user, password, host)
except ValueError:
raise ValueError('Invalid sqlplus connection string {}: expected <user>/?<pass>@<host>'.format(database))
|
51bb3304458d4b3d3a69c694b13066e1d713a272
| 47,685
|
def is_palindrome(input_string):
"""
Checks if a string is a palindrome.
:param input_string: str, any string
:return: boolean, True if palindrome else False
>>> is_palindrome("madam")
True
>>> is_palindrome("aabb")
False
>>> is_palindrome("race car")
False
>>> is_palindrome("")
True
"""
if input_string is None or len(input_string) == 0:
return True
if input_string[0] != input_string[-1]:
return False
return is_palindrome(input_string[1:-1])
|
165df98dd983a2d84ad30bafbb70168d9599bd8d
| 47,687
|
import codecs
import base64
def convert_r_hash_hex(r_hash_hex):
""" convert_r_hash_hex
>>> convert_r_hash_hex("f9e328f584da6488e425a71c95be8b614a1cc1ad2aedc8153813dfff469c9584")
'+eMo9YTaZIjkJacclb6LYUocwa0q7cgVOBPf/0aclYQ='
"""
r_hash = codecs.decode(r_hash_hex, 'hex')
r_hash_b64_bytes = base64.b64encode(r_hash)
return r_hash_b64_bytes.decode()
|
8980e43ff4f30e69e9cfc0ed7427f787ea97d701
| 47,688
|
import re
def contains_curse(sometext):
"""
Checks a particular string to see if it contains any
NSFW type words. curse words, things innapropriate
that you might find in lyrics or quotes
:param sometext: some text
:type sometext: Str
:returns: a boolean stating whether we have a curse word or not
:rtype: Boolean
"""
return re.search(r'hoe*|bitch*|fag*|puss*|nigg*|fuck*|cunt*|shit*|dick*|cock*',sometext.lower())
|
e979fec6ba2cd88191dde304445416129a7f5049
| 47,690
|
def drop_id_prefixes(item):
"""Rename keys ending in 'id', to just be 'id' for nested dicts.
"""
if isinstance(item, list):
return [drop_id_prefixes(i) for i in item]
if isinstance(item, dict):
return {
'id' if k.endswith('id') else k: drop_id_prefixes(v)
for k, v in item.items()
}
return item
|
6fc752fa49771a0fc6e7e28e889cf29941a95a10
| 47,696
|
def getDescendantsTopToBottom(node, **kwargs):
"""
Return a list of all the descendants of a node,
in hierarchical order, from top to bottom.
Args:
node (PyNode): A dag node with children
**kwargs: Kwargs given to the listRelatives command
"""
return reversed(node.listRelatives(ad=True, **kwargs))
|
bc7a7fb1ca1ab362943f024c3dd50ce40cfc0ab5
| 47,701
|
import string
import random
def random_string_generator(size=4, chars=string.ascii_lowercase + string.digits):
"""[Generates random string]
Args:
size (int, optional): [size of string to generate]. Defaults to 4.
chars ([str], optional): [characters to use]. Defaults to string.ascii_lowercase+string.digits.
Returns:
[str]: [Generated random string]
"""
return ''.join(random.choice(chars) for _ in range(size))
|
617e20acd54f218f65f98d89b976efc1bebc095a
| 47,703
|
import torch
def cross_entropy(targ, pred):
"""
Take the cross-entropy between predicted and target.
Args:
targ (torch.Tensor): target
pred (torch.Tensor): prediction
Returns:
diff (torch.Tensor): cross-entropy.
"""
targ = targ.to(torch.float)
fn = torch.nn.BCELoss(reduction='none')
diff = fn(pred, targ)
return diff
|
22f06a7caf58710208131620bbc773d968e3f910
| 47,705
|
import torch
def tilted_loss(y_pred, y, q=0.5):
"""
Loss function used to obtain quantile `q`.
Parameters:
- y_pred: Predicted Value
- y: Target
- q: Quantile
"""
e = (y - y_pred)
return q * torch.clamp_min(e, 0) + (1-q) * torch.clamp_min(-e, 0)
|
f43ebdee74ebe10776685634859628222a9bc9ce
| 47,712
|
def generate_panel_arrays(nx, ny, panel_size, indentation, offset_x, offset_y):
"""Generate a rectangular array of nx-by-ny panels of the same panel_size
nx, ny: int, how many panels do you want in X-axis and Y-axis.
panel_size: (int, int), dimension of the panels
offset_x, offset_y: int, move around the array
indentation: int, shift between two rows of panels in X-axis.
"""
(dx, dy) = panel_size
# indentation on x axis only
Array = [(i*panel_size[0] + indentation*j%dx + offset_x, j*panel_size[1] + offset_y)
for i in range(nx)
for j in range(ny)
] # bottom-left of each panel
return Array
|
4613b6d038856aca927f33b1bf60e8a3f6449406
| 47,713
|
def data_to_category_counts(df):
"""
Extracts opportunity category counts for each NAICS code.
"""
return (
df.groupby(['Opportunity_NAICS', 'Opportunity__r.Category__c'])
.count()
.iloc[:,0]
.to_frame()
.rename(columns={df.columns[0]: 'Count'})
.reset_index()
.pivot(index='Opportunity_NAICS', columns='Opportunity__r.Category__c', values='Count')
.fillna(0)
.rename(columns={
'Commodity': 'Commodity Count',
'Construction': 'Construction Count',
'Personal Services': 'Personal Services Count'
})
.reset_index()
)
|
fa57218d5a6f439789cb7e013892df09c8207a07
| 47,714
|
import re
def resolve_query_res(query):
"""
Extracts resource name from ``query`` string.
"""
# split by '(' for queries with arguments
# split by '{' for queries without arguments
# rather full query than empty resource name
return re.split('[({]', query, 1)[0].strip() or query
|
9f5395c8dd416643c5d8460e0fbec2f83037e4fb
| 47,717
|
def calculate_plane_point(plane, point):
"""Calculates the point on the 3D plane for a point with one value missing
:param plane: Coefficients of the plane equation (a, b, c, d)
:param point: 3D point with one value missing (e.g. [None, 1.0, 1.0])
:return: Valid point on the plane
"""
a, b, c, d = plane
x, y, z = point
if x is None:
x = -(b*y + c*z + d) / a
elif y is None:
y = -(a*x + c*z + d) / b
elif z is None:
z = -(a*x + b*y + d) / c
return [x, y, z]
|
c086e5181a9d595af5a0ef493a221f2166e71423
| 47,718
|
def revert(vocab, indices):
"""Convert word indices into words
"""
return [vocab.get(i, 'X') for i in indices]
|
457831d28d26c68b19a07585f0d4de9fe31b0203
| 47,720
|
def getSnpIndicesWithinGeneWindow(snp_chrom,snp_pos,gene_chrom,gene_start,gene_stop=None,window=0):
"""
computes for a given gene, a boolean vector indicating which SNPs are close to the gene
input:
snp_chrom : chromosomal position [F]
snp_pos : position [F]
gene_chrom : gene chromosome (scalar)
gene_start : start of the gene (scalar)
gene_stop : end of the gene (scalar), if not given, gene_start is used
window : window around gene (scalar)
"""
if gene_stop is None: gene_stop = gene_start
idx = snp_chrom==gene_chrom
idx*= gene_start - window <= snp_pos
idx*= snp_pos <= gene_stop + window
return idx
|
1a87e20b8744c9d28e26bd189f7bdaa056238a2b
| 47,722
|
import math
def scalarToVector(magnitude, angle):
"""
Converts a speed and vector into a vector array [x, y, z]
X is horizontal magnitude, Y is vertical magnitude, and Z is magnitude in/out of screen
arg magnitude - the magnitude of the vector
arg angle - the direction in radians of the vector
Returns an array with the x, y, z vector
"""
# We leave z blank for now, since we don't use it
# In the future, add functionality for two angles
return [math.cos(angle) * magnitude, math.sin(angle) * magnitude, 0]
|
4d82caed233a3b4df07ff42536fcffa8643e94bf
| 47,724
|
from typing import Counter
def term_freq(tokens: list[str]) -> dict:
"""
Takes in a list of tokens (str) and return a dictionary of term frequency of each token
"""
term_count = Counter(tokens)
n = len(tokens)
return {term: count/n for term, count in term_count.items()}
|
0567a08e9050d030b8411f662e4afb8560e525be
| 47,736
|
def stag_temperature_ratio(M, gamma):
"""Stagnation temperature / static temperature ratio.
Arguments:
M (scalar): Mach number [units: dimensionless].
gamma (scalar): Heat capacity ratio [units: dimensionless].
Returns:
scalar: the stagnation temperature ratio :math:`T_0 / T` [units: dimensionless].
"""
return 1 + (gamma - 1) / 2 * M**2
|
0677524c97a245d93f9eac33c6be885819ed14e5
| 47,742
|
def get_max_depth(es, *, index):
"""Find max depth of root lineage."""
body = {
"id": "max_nested_value",
"params": {"path": "lineage", "field": "node_depth"},
}
res = es.search_template(index=index, body=body)
max_depth = res["aggregations"]["depths"]["max_depth"]["value"]
return max_depth
|
33eb318bcf20fb656b84edb349ff840ebefe07b0
| 47,743
|
def compute_gender(last_gender: str,
top_gender_male: int,
top_gender_female: int,
top_gender_7_days: str):
"""Computes the gender type of a visitor using a majority voting rule."""
def majority_voting(lst):
return max(set(lst), key=lst.count)
if top_gender_male > top_gender_female:
top_gender = 'M'
else:
top_gender = 'F'
return majority_voting([last_gender, top_gender, top_gender_7_days])
|
387f04fae4b593de54894eeac017b7fe124706c9
| 47,744
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.