content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def type_id(id):
"""
Returns an OCD identifier's type ID.
"""
return id.rsplit(':', 1)[1]
|
14e2d42b7f3cf2fa8b2a749e07fa0f8ab57a3aaa
| 70,771
|
def parse(sentence):
"""
Removes everything but alpha characters and spaces, transforms to lowercase
:param sentence: the sentence to parse
:return: the parsed sentence
"""
# re.sub(r'([^\s\w]|_)+', '', sentence)
# return sentence.lower()#.encode('utf-8')
# re.sub("^[a-zA-Z ]*$", '', sentence.lower().replace("'", ""))
whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ')
return ''.join(filter(whitelist.__contains__, sentence)).lower()
|
112bfd1048e9df97fcdcf400b261c260f2d76881
| 70,772
|
from typing import Dict
from typing import Set
def build_dependants(dependencies: Dict[str, Set[str]]) -> Dict[str, Set[str]]:
"""
Given a dependencies mapping, return the reversed dependants dictionary.
"""
dependants = {name: set() for name in dependencies.keys()}
for child, parents in dependencies.items():
for parent in parents:
dependants[parent].add(child)
return dependants
|
054860b29e73980860fcd244ec50b34fdd322a09
| 70,773
|
def process_csv(csv):
"""
Normalize and select usefull columns
** return (Tuple (numpy array, numpy array, numpy array)) **
*train_features.shape : (n, 5)
*train_targets.shape : (n)
*test_features.shape : (n, 5)
With n the numbers of rows
"""
# Replace None value by 0
csv = csv.fillna('Age', 0).fillna('Fare', 0)
# Translate sex values with binary value (0 or 1)
csv["sex"] = csv["Sex"].apply(lambda v : 1 if v == "male" else 0)
# Normalize the age columns
csv["age_normalize"] = (csv["Age"] - csv["Age"].mean()) /csv["Age"].std()
# Normalize the faire columns
csv["fare"] = (csv["Fare"] - csv["Fare"].mean()) /csv["Fare"].std()
# We select the Survived columns and export it as a numpy array
train_targets = csv[csv["type"] == "train"].select_columns(['Survived']).to_numpy()
# We do the same for the features. For training features and for testing features
train_features = csv[csv["type"] == "train"].select_columns(['sex', 'age_normalize', 'fare', 'SibSp', 'Parch']).to_numpy()
test_features = csv[csv["type"] == "test"].select_columns(['sex', 'age_normalize', 'fare', 'SibSp', 'Parch']).to_numpy()
return train_features, train_targets[:], test_features
|
bf748eedf916392f3c5889fee73a254dd3e9c038
| 70,774
|
def _parameters_exists_in_docstring(docstring: str) -> bool:
"""
Get boolean of whether Parater part exists in docstring
or not.
Parameters
----------
docstring : str
Docstring to be checked.
Returns
-------
result_bool : bool
If exists, True will be set.
"""
is_in: bool = 'Parameters\n ---' in docstring
if not is_in:
return False
return True
|
d8aa9a6851218282b700e3d37921bf17a27ba5d7
| 70,778
|
def cats_game(board):
"""Checks if the any of the values in the board list has not been used"""
if board[0][0] == 1 or board[0][1] == 2 or board[0][2] == 3 or \
board[1][0] == 4 or board[1][1] == 5 or board[1][2] == 6 or \
board[2][0] == 7 or board[2][1] == 8 or board[2][2] == 9:
return False
else:
return True
|
a1b029fea65d05c6c3b26c6c2cce151ca7aa4517
| 70,779
|
def new_mdecl_wrapper_t__getitem__(self,index):
"""Provide access to declaration.
If passed a standard index, then return contained decl.
Else call the getitem method of the contained decls.
"""
if isinstance(index, (int, slice)):
return self.declarations[index]
else:
return self.__getattr__("__getitem__")(index)
|
134b22dcc18362d4564ea3895492baa51e9d6405
| 70,784
|
def get_batch_inds(batch_size, idx, N):
"""
Generates an array of indices of length N
:param batch_size: the size of training batches
:param idx: data to split into batches
:param N: Maximum size
:return batchInds: list of arrays of data of length batch_size
"""
batchInds = []
idx0 = 0
toProcess = True
while toProcess:
idx1 = idx0 + batch_size
if idx1 >= N:
idx1 = N
toProcess = False
batchInds.append(idx[idx0:idx1])
idx0 = idx1
return batchInds
|
780ec20e98ec30141eaace50191983d610482545
| 70,790
|
def format_cdr_properties(
geometries,
densities,
id_col_name="tower_id",
density_col_name="density"
):
"""
Converts the format of additional CDR properties from being in a DataFrame
to being in a dictionary indexed by the tower node id
Args:
geometries (dict): object where the keys are the node id and the value
is an object with the geometry and the area.
densities (Pandas.DataFrame): DataFrame with column with node id and
corresponding density of visitors at that node
id_col_name (string): Name of the densities DataFrame column for node id
density_col_name (string): Name of the densities DataFrame column for
density.
Returns:
(dict): object with a key for each tower node id and a value that is an
object with values for area and density of that node
"""
props = {}
ids = densities[id_col_name].unique()
for tower_id in ids:
area = None
tower_id = str(int(tower_id))
if tower_id in geometries:
area = geometries[tower_id]['area']
density = None
densities[id_col_name] = densities[id_col_name].apply(int).apply(str)
if area is not None and area > 0:
densities[density_col_name] = densities[density_col_name] / area
maximum = densities[density_col_name].max()
densities[density_col_name] = densities[density_col_name] / maximum
first_match = densities[densities[id_col_name] == tower_id].head(1)
if not first_match.empty:
density = first_match[density_col_name].iloc[0]
props[tower_id] = {'area': area, 'density': density}
return props
|
23c92dacd6eb8c329167429946c2d6550027e08c
| 70,791
|
def simplify_edge_attribute_name(G, key, name_list, simple_name):
"""
Simplify an arbitrary list of name values for a given key into one.
Parameters
----------
G : networkx Graph/DiGraph/MultiGraph/MultiDiGraph/...
Graph where we want to simplify an attribute.
key : str
Name of the edges' attributes.
name_list : list
List of values for the given attribute we want to merge into one.
simple_name : str
Name for the fusion of every values in the given list.
Returns
-------
G : networkx Graph/DiGraph/MultiGraph/MultiDiGraph/...
Graph with the modified attribute.
"""
G = G.copy()
for edge in G.edges:
if key in list(G.edges[edge].keys()):
if G.edges[edge][key] in name_list:
G.edges[edge][key] = simple_name
return G
|
0e5a95c7c39d300eb51434eb66148331c4037a66
| 70,792
|
def myfuncPrimeSieve(n):
"""
This function generates and returns prime numbers from 2 up to n
via the Sieve of Eratosthenes algorithm.
Example of usage:
getAns = myfuncPrimeSieve( 20 )
print(getAns)
"""
thelist = list(range(3, n, 2)) #generates odd numbers starting from 3
theprimes = [2]
while thelist: # True as long as list thelist has elements
temp = thelist.pop(0)
theprimes = theprimes + [temp]
if (temp*temp > n):
theprimes = theprimes + thelist
thelist = []
else:
for x in thelist:
if (x % temp) == 0:
thelist.remove(x)
return theprimes
|
abef8a3d3f3c921840caa17bdf9b900cc230a617
| 70,793
|
def create_regular_grid(area_defn, tile_size, stride=None):
"""
Defines a regular grid of (overlapping) tiles.
:param area_defn: dictionary, defines one or multiple rectangularly-shaped geographic regions from which
DSM patches will be sampled. The dictionary is composed of the following key-value pairs:
x_extent: list of n tuples, where n denotes the number of rectangular regions (stripes).
Each tuple defines the upper-left and lower-right x-coordinate of a rectangular
region (stripe).
y_extent: list of n tuples, where n denotes the number of rectangular regions (stripes).
Each tuple defines the upper-left and lower-right y-coordinate of a rectangular
region (stripe).
Assumption: The i.th tuple of x_extent and i.th tuple of y_extent define a geographically
rectangular region (stripe).
:param tile_size: int, tile size in pixels
:param stride: int, stride in pixels (if None: stride equals tile_size)
:return tile_position: list of tuples, i.th tuple (uly, ulx) specifies the upper-left image coordinates of the
i.th tile (w.r.t. the full raster)
:return region_wo_overlap: list of tuples, i.th tuple (uly, ulx, lry, lrx) specifies the pixels of the i.th tile
that do not overlap with any other tile
"""
if stride is None:
stride = tile_size
tile_position = []
region_wo_overlap = []
# Number of regions specified in area_defn
num_regions = len(area_defn['x_extent'])
# Iterate over each region
for i in range(num_regions):
# Extent of the i.th region
x = area_defn['x_extent'][i]
y = area_defn['y_extent'][i]
# Upper-left coordinates of the tile (w.r.t. full raster)
uly = y[0]
lry = y[0]
# Initialization
border_uly = 0
border_lry = stride - 1
# Split the i.th region into a grid of regular tiles
while lry < y[1]:
# Initialization
ulx = x[0]
lrx = x[0]
border_ulx = 0
border_lrx = stride - 1
# Compute the lower-right y-coordinate of the tile
lry = uly + tile_size - 1
# Check if the tile overlaps the region (in y-direction): if yes, shift the tile upwards such that its
# lower border coincides with the lower border of the region
if lry >= y[1]:
border_uly += lry - y[1]
lry = y[1]
uly = y[1] - tile_size + 1
border_lry = tile_size - 1
while lrx < x[1]:
# Compute lower-right x-coordinate of the tile
lrx = ulx + tile_size - 1
# Check if the tile overlaps the area (in x-direction): if yes, shift the tile to the left such that
# its right border coincides with the right border of the region
if lrx >= x[1]:
border_ulx += lrx - x[1]
lrx = x[1]
ulx = x[1] - tile_size + 1
border_lrx = tile_size - 1
# Save the upper-left corner coordinates of the tile
tile_position.append((int(uly), int(ulx)))
# Save the pixels of the tile that do not overlap with any other tile
region_wo_overlap.append((int(border_uly), int(border_ulx), int(border_lry), int(border_lrx)))
ulx += stride
border_ulx = tile_size - stride
uly += stride
border_uly = tile_size - stride
return tile_position, region_wo_overlap
|
9ca35a48b882243ba50e68c2bf0c9e50099fb2ef
| 70,796
|
import torch
def sum_hessian(loss, hidden):
"""Given scalar tensor 'loss' and [b, d] batch of hidden activations 'hidden', computes [d, d] size sum of hessians
for all entries in the batch.
We make use of the fact that grad^2(f) = grad(grad(f)) and that sum of grads = grad of sum. So, sum(grad^2(f)) is
computed as grad(sum(grad(f))).
"""
d = hidden.size(1)
grad = torch.autograd.grad(loss, hidden, retain_graph=True, create_graph=True)[0]
sum_grad = torch.sum(grad, dim=0)
hessian = hidden.new_zeros(d, d)
for i in range(d):
hessian[i, :] = torch.autograd.grad(sum_grad[i], hidden, retain_graph=True)[0].sum(dim=0)
return hessian
|
3e8ec30561568652cf403ab10ec2d353708370c2
| 70,798
|
def standard_codec_name(name: str) -> str:
"""
Map a codec name to the preferred standardized version.
The preferred names were taken from this list published by IANA:
U{http://www.iana.org/assignments/character-sets/character-sets.xhtml}
@param name:
Text encoding name, in lower case.
"""
if name.startswith("iso8859"):
return "iso-8859" + name[7:]
return {
"ascii": "us-ascii",
"euc_jp": "euc-jp",
"euc_kr": "euc-kr",
"iso2022_jp": "iso-2022-jp",
"iso2022_jp_2": "iso-2022-jp-2",
"iso2022_kr": "iso-2022-kr",
}.get(name, name)
|
e9f30c9cb9da065f300900923e9b9ce5bac255e9
| 70,801
|
def price_change_color_red_green(val: str) -> str:
"""Add color tags to the price change cell
Parameters
----------
val : str
Price change cell
Returns
-------
str
Price change cell with color tags
"""
val_float = float(val.split(" ")[0])
if val_float > 0:
return f"[green]{val}[/green]"
return f"[red]{val}[/red]"
|
dedfaff32d88141af69d70e636c84191ebf24707
| 70,803
|
def minimize_attachment(attachment, doc_id):
"""
Takes an attachment obtained from a document in an experiment object and
strips it down to a subset of desired fields. The document @id, given by
doc_id, is prepended to the attachment href.
"""
minimized_attachment = {}
for key in ('md5sum', 'href'):
if key in attachment:
if key == 'href':
minimized_attachment[key]=doc_id + attachment[key]
elif key == 'md5sum':
minimized_attachment[key] = attachment[key]
return minimized_attachment
|
7e6b8dfbf53f840e6b46a2aa8acb1b7e1017fa46
| 70,804
|
def is_container(obj):
"""Check if `object` is a list or a tuple.
Args:
obj:
Returns:
True if it is a *container*, otherwise False
"""
return isinstance(obj, (list, tuple))
|
09db5b1d136b6db1969097d19f7d63d1750971ab
| 70,805
|
import re
def is_uid(string):
"""Return true if string is a valid DHIS2 Unique Identifier (UID)"""
return re.compile('^[A-Za-z][A-Za-z0-9]{10}$').match(string)
|
9e1c1d8dc2697a8535018b15662a69edfdbc8eaf
| 70,821
|
def split_list_with_len(lst: list, length: int):
"""
return a list of sublists with len == length (except the last one)
"""
return [lst[i:i + length] for i in range(0, len(lst), length)]
|
1133a7948e0ff37e3c1652ddeb266a9e8eb94f79
| 70,822
|
import json
def read_json(json_file="json_file_not_specified.json"):
"""
Reads a json file and it returns its object representation, no extra checks
are performed on the file so, in case anything happens, the exception will
reach the caller
:param json_file: path to the file in json format to read
:return: an object representation of the data in the json file
"""
with open(json_file) as jf:
return json.load(jf)
|
a75a97bac175ff05d648f9c32cd066529be6906c
| 70,828
|
from typing import List
import re
def board_is_nrf(content: List[str]) -> bool:
"""Check if board is nRF based.
Args:
content: DT file content as list of lines.
Returns:
True if board is nRF based, False otherwise.
"""
for line in content:
m = re.match(r'^#include\s+(?:"|<).*nrf.*(?:>|").*', line)
if m:
return True
return False
|
c8b83127d142c5512b4e8129beecd6410f6bbce3
| 70,829
|
def get_jvm_class(cl):
"""Builds JVM class name from Python class
"""
return 'org.apache.{}.{}'.format(cl.__module__[2:], cl.__name__)
|
7569f5be6768f87a347c7b84fb358f3e7fd9ef38
| 70,838
|
def client(app, db):
"""Return a Web client, used for testing, bound to a DB session."""
return app.test_client()
|
da1265c0a693316e9892a098be1ebb1043860902
| 70,839
|
def index(item, seq):
"""Helper function that returns -1 for non-found index value of a seq"""
if item in seq:
return seq.index(item)
else:
return -1
|
85b50e1e66c050f5072ce834672b130c7a7f0a46
| 70,840
|
from typing import Dict
import requests
def get_json(url: str) -> Dict:
"""Get JSON from remote URL.
"""
response = requests.get(url)
return response.json()
|
7a62fd2417a1b80979dc8eccebce41dc4d9cc2e1
| 70,843
|
def get_span(tag):
"""Returns a (start, end) extent for the given extent tag."""
return tag.attrib['start'], tag.attrib['end']
|
06f16e78b60b98065012cf3fdc19bfccf3625fb8
| 70,847
|
import getpass
def host_username() -> str:
"""Get the current username from the OS."""
return getpass.getuser()
|
cc2eac760d30f61832349568eec27e339602f093
| 70,849
|
def borehole_thermal_resistance(pipe, m_flow_borehole, cp_f):
"""
Evaluate the effective borehole thermal resistance.
Parameters
----------
pipe : pipe object
Model for pipes inside the borehole.
m_flow_borehole : float
Fluid mass flow rate (in kg/s) into the borehole.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.K)
Returns
-------
R_b : float
Effective borehole thermal resistance (m.K/W).
"""
# Coefficient for T_{f,out} = a_out*T_{f,in} + [b_out]*[T_b]
a_out = pipe.coefficients_outlet_temperature(
m_flow_borehole, cp_f, nSegments=1)[0].item()
# Coefficient for Q_b = [a_Q]*T{f,in} + [b_Q]*[T_b]
a_Q = pipe.coefficients_borehole_heat_extraction_rate(
m_flow_borehole, cp_f, nSegments=1)[0].item()
# Borehole length
H = pipe.b.H
# Effective borehole thermal resistance
R_b = -0.5*H*(1. + a_out)/a_Q
return R_b
|
36d3c0c484d892843ccd56750c0d7fa95468abe0
| 70,852
|
import re
def getidtype(identifier):
""" try and determine the type of identifier given """
# is the string an inchikey?
m = re.search('^[A-Z]{14}-[A-Z]{10}-[A-Z]$', identifier)
if m:
return "inchikey"
# is the string an inchi?
n = re.search('^InChI=1S?/', identifier)
if n:
return "inchi"
# is the string a CASRN?
o = re.search('^[0-9]{2,7}-[0-9]{2}-[0-9]$', identifier)
if o:
return "casrn"
# is the string a ChEMBL ID?
p = re.search('^CHEMBL[0-9]+$', identifier)
if p:
return "chemblid"
# is the string a DSSTOX ID?
q = re.search('^DTXSID[0-9]+$', identifier)
if q:
return "dsstox"
return "other"
|
f853d512029b0034e9657a7260b233f3b9a9303e
| 70,855
|
def compute_metrics(outputs, targets, metrics, ids=None):
"""
Computes the evaluation metrics for the given list of output and target images.
:param outputs: list of output images
:param targets: list of gold standard images
:param metrics: dictionary containing each metric as {metric_name: metric_function}, the metric function must have
two arguments as ``metric_function(output, target)``.
:param ids: (Optional) list of image identifiers
:return: list of dictionaries containing the metrics for each given sample
:Example:
>>> outs = [np.asarray([[0, 1, 0, 1]]), np.asarray([[0, 0, 1, 1]])]
>>> tgts = [np.asarray([[0, 0, 1, 1]]), np.asarray([[0, 0, 1, 1]])]
>>> compute_metrics(outs, tgts, metrics={'dsc': niclib.metrics.dsc, 'acc': niclib.metrics.accuracy})
[{'id': '0', 'dsc': 0.5, 'acc': 0.5},
{'id': '1', 'dsc': 1.0, 'acc': 1.0}]
"""
assert len(outputs) == len(targets), 'len(outputs):{} != len(targets):{}'.format(len(outputs), len(targets))
assert isinstance(metrics, dict), 'Provide metrics as a dictionary containing {metric_name: metric_function}'
all_metrics = []
for n, (output, target) in enumerate(zip(outputs, targets)):
case_metrics = {'id': str(n) if ids is None else ids[n]}
for metric_name, metric_func in metrics.items():
case_metrics.update({metric_name: metric_func(output, target)})
all_metrics.append(case_metrics)
return all_metrics
|
348b26b71e36701cbd319a18db5bc2d1597beee7
| 70,856
|
def get_file_contents_as_string(filepath):
"""
Read a file and returns its contents as one big string.
"""
ret = []
with open(filepath, 'r') as infile:
for line in infile.readlines():
ret.append(line)
ret = "".join(ret)
return ret
|
9370958576a10cb4c2cd0efd98045bd2d7b156bc
| 70,857
|
def mkpad(items):
"""
Find the length of the longest element of a list. Return that value + two.
"""
pad = 0
stritems = [str(e) for e in items] # cast list to strings
for e in stritems:
index = stritems.index(e)
if len(stritems[index]) > pad:
pad = len(stritems[index])
pad += 2
return pad
|
8c9d69431116827d2bd0eaac72d1c54a3997c890
| 70,858
|
def DFToXY(dataframe):
"""
Divide a dataframe producted by BuildDataFrames methode between features
and labels
Parameters
----------
dataframe : pandas DataFrame
dataframe with features and labels
Returns
-------
x : pandas DataFrame
dataframe of features.
y : pandas Series
labels
"""
y = dataframe["Label"]
x = dataframe.drop("Label", axis = 1)
return x, y
|
f74680438f2e31c1ad581b3ae31fd713cbfce0aa
| 70,861
|
def get_assignment_detail(assignments):
"""
Iterate over assignments detail from response.
:param assignments: assignments detail from response.
:return: list of assignment elements.
"""
return [{
'ID': assignment.get('id', ''),
'FirstName': assignment.get('firstName', ''),
'LastName': assignment.get('lastName', ''),
'ReceiveEmails': assignment.get('receiveEmails', ''),
'Email': assignment.get('email', ''),
'Username': assignment.get('username', '')
} for assignment in assignments]
|
a0e61908386c3972cae84912d5eb3c181b6b7ed5
| 70,862
|
import hashlib
def make_password(service_provider_id, service_provider_password,
timestamp):
"""
Build a time-sensitive password for a request.
"""
return hashlib.md5(
service_provider_id +
service_provider_password +
timestamp).hexdigest()
|
eb68df2bdc5aac51f84341df085bf9320057eebb
| 70,864
|
def PyMapping_Items(space, w_obj):
"""On success, return a list of the items in object o, where each item is a tuple
containing a key-value pair. On failure, return NULL. This is equivalent to
the Python expression o.items()."""
return space.call_function(space.w_list,
space.call_method(w_obj, "items"))
|
dec1501c5a1632e49bb7cddafa6a8cbe4802b3d9
| 70,871
|
def is_present(marks, text):
"""
return True, if all marks present in given text
"""
return all([mark in text for mark in marks])
|
0ea198520b1e46f1c9b26a9533e0785140348776
| 70,873
|
import csv
def read_csv(f, Model):
"""
Given a CSV file with a header, read rows through `Model` function.
Returns generator function.
"""
reader = csv.reader(f, dialect="excel")
next(reader, None) # skip the header row
return (Model(row) for row in reader)
|
e7b247947aa882ba5feb317898d4629b9a420752
| 70,875
|
def is_2d(fs):
"""Tests wether a function space is 2D or 3D"""
return fs.mesh().geometric_dimension() == 2
|
24dadb7ff6f6810a0ee9cfbc496bf41050270425
| 70,879
|
def indent_lines(s, indent=4, skip=0):
"""Indents the lines in the given string.
Args:
s: the string
indent (4): the number of spaces to indent
skip (0): the number of lines to skip before indenting
Returns:
the indented string
"""
lines = s.split("\n")
skipped_lines = lines[:skip]
if skipped_lines:
skipped = "\n".join(skipped_lines)
else:
skipped = None
indent_lines = lines[skip:]
if indent_lines:
indented = "\n".join((" " * indent) + l for l in indent_lines)
else:
indented = None
if skipped is not None and indented is not None:
return skipped + "\n" + indented
if skipped is not None:
return skipped
if indented is not None:
return indented
return s
|
f21d1cbff97c1d45a5ab46e6521a403539225440
| 70,880
|
import numbers
def discrete_signal(signal, step_size):
"""Discretize signal
Parameters
----------
signal: pd.Series
Signals for betting size ranged [-1, 1]
step_size: float
Discrete size
Returns
-------
pd.Series
"""
if isinstance(signal, numbers.Number):
signal = round(signal / step_size) * step_size
signal = min(1, signal)
signal = max(-1, signal)
else:
signal = (signal / step_size).round() * step_size
signal[signal > 1] = 1
signal[signal < -1] = -1
return signal
|
183af7cf6ca30daaebb44b0d41c860b001109136
| 70,887
|
def compute_strati_ratio(T, S, alpha, beta, grid):
"""
Compute the stratification ratio
R = (alpha * dT/dz + beta * dS/dz) / (alpha * dT/dz - beta * dS/dz)
"""
dT_dz = -grid.derivative(
T, axis="Z", boundary="extend"
) # - sign to take z in the right direction
dS_dz = -grid.derivative(S, axis="Z", boundary="extend")
R = (alpha * dT_dz + beta * dS_dz) / (alpha * dT_dz - beta * dS_dz)
return R
|
ead5554c467a9dd876aa15914f806993c888d06f
| 70,889
|
def readfile(openedfile):
"""Read the contents of a file."""
openedfile.seek(0)
return str(openedfile.read())
|
c8be0033794c5316381045a492c666fb90fdc378
| 70,891
|
import itertools
def Flatten(x: list) -> list:
"""Flatten a list."""
return list(itertools.chain.from_iterable(x))
|
e7504a7f85eed2b81609c7fbfd46c45555ce7118
| 70,892
|
def dotproduct(X, Y):
"""Return the sum of the element-wise product of vectors X and Y."""
return sum(x * y for x, y in zip(X, Y))
|
8c2f515c63cae018e4065c2913b610045c9e0f00
| 70,898
|
def seconds_to_MMSS_colon_notation(sec):
"""Convert integer seconds into MM:SS colon format. If sec=121, then return '02:01'. """
assert isinstance(sec, int) and sec <= 99*60 + 59 and sec >= 0
return '%02d:%02d' % (int(sec/60.0), sec % 60)
|
6716dea4c3c9885fc056bf61d2d3366f7e13c7d3
| 70,906
|
def parse_fasta(fh):
"""Parses a RefSeq fasta file.
Fasta headers are expected to be like:
>NR_165790.1 Tardibacter chloracetimidivorans strain JJ-A5 16S ribosomal RNA, partial sequence
Args:
fh: filehandle to the fasta file
Returns:
A fasta_dict like {'name': ['seq1', 'seq2'], etc. }
"""
fasta_dict = {}
current_header = ''
for line in fh:
line = line.strip()
if line.startswith('>'): # it's a header line
current_header = line[1:]
# Add current species to dict if it doesn't exist
if not current_header in fasta_dict:
fasta_dict[current_header] = ['']
else:
# add a new entry for this species
fasta_dict[current_header].append('')
else: # Sequence line
fasta_dict[current_header][-1] += line
return fasta_dict
|
22340a0c526085ef3280d460f654d0914f08b7b2
| 70,907
|
def max_available_power_rule(mod, prj, tmp):
"""
**Constraint Name**: GenVarStorHyb_Max_Available_Power_Constraint
**Enforced Over**: GEN_VAR_STOR_HYB_OPR_TMPS
Power provision plus upward services cannot exceed available power, which
is equal to the available capacity multiplied by the capacity factor
plus the net power from storage.
"""
return mod.GenVarStorHyb_Provide_Power_MW[prj, tmp] \
+ mod.GenVarStorHyb_Upward_Reserves_MW[prj, tmp] \
<= mod.GenVarStorHyb_Available_Power_MW[prj, tmp] \
+ mod.GenVarStorHyb_Discharge_MW[prj, tmp] \
- mod.GenVarStorHyb_Charge_MW[prj, tmp]
|
17f6c9ff570b15bafc49272599cfbe5832b8e62d
| 70,910
|
def reward(total_reward, percentage):
"""
Reward for a problem with total_reward done to percentage.
Since the first a few test cases are generally very easy, a linear
approach will be unfair. Thus, the reward is given in 20:80 ratio.
The first 50% gives 20% of the reward, and the last 50% gives 80%
of the reward.
Thus, if percentage <= 0.5, then the percentage of reward given is:
0.2 * (percentage / 0.5) = 0.4 * percentage
And if percentage >= 0.5, then the weighed percentage is:
0.2 + 0.8 * ((percentage - 0.5) / 0.5)) = 1.6 * percentage - 0.6
"""
percentage /= 100
if percentage <= 0.5:
weighed_percentage = 0.4 * percentage
else:
weighed_percentage = 1.6 * percentage - 0.6
return round(total_reward * weighed_percentage)
|
957b0248d44c8033ceaf852e2780224f3fb379f9
| 70,918
|
def load_dat(filename: str) -> list:
"""
Loads the contents of the DAT data file into a list with each line of the file
into an element of the list.
"""
with open(file = f'{filename}.dat', mode = u'r') as dat_file:
return dat_file.read().split(u'\n')
|
25f13fedb8bb0d2927e794b20691c648b8502bc5
| 70,920
|
def bps_mbps(val: float) -> float:
"""
Converts bits per second (bps) into megabits per second (mbps).
Args:
val (float): The value in bits per second to convert.
Returns:
float: Returns val in megabits per second.
Examples:
>>> bps_mbps(1000000)
1.0
>>> bps_mbps(1129000)
1.13
"""
return round(float(val) / 1000000, 2)
|
13520e00c393a647ccdc2ba0021445970e169fd2
| 70,921
|
def check_consistency_names_grounding_dict(grounding_dict, names_map):
"""Check that a grounding dict and names map have consistent names
"""
groundings = {grounding for grounding_map in grounding_dict.values()
for grounding in grounding_map.values()
if grounding != 'ungrounded'}
return groundings == set(names_map.keys())
|
a406225485d4d121da86eada198a913724d22bec
| 70,923
|
def checkNoneOption(value):
"""
Check if a given option has been set to the string 'None' and return
None if so. Otherwise return the value unchanged
"""
if value is None:
return value
if value == 'None':
return None
if isinstance(value, list):
if len(value) == 1 and value[0] == 'None':
return None
return value
|
5ba1c9da12ecdb3f6d9562bac4017934c66393a3
| 70,925
|
import re
def get_jinja_variables(text):
"""Gets jinja variables
Args:
line (string): text to get jinja variables
Returns:
[list]: returns list of jinja variables
"""
variables = []
regex_pattern = regex_pattern = re.compile(
"(\\{{)((.|\n)*?)(\\}})", re.MULTILINE)
for line in regex_pattern.finditer(text):
variables.append(line.group(2))
return variables
|
2a68851ff318a2de8072466e14305caf64d7d65b
| 70,928
|
def true_s2l(value: float) -> float:
"""Convert SRGB gamma corrected component to linear"""
if value <= 0.04045:
return value / 12.92
return ((value + 0.055) / 1.055) ** 2.4
|
0e0a4123589397a0ee246b1bc12ccaaf550aaea4
| 70,931
|
def freeze(obj):
"""Freeze a state (dict), for memoizing."""
if isinstance(obj, dict):
return frozenset({k: freeze(v) for k, v in obj.items()}.items())
if isinstance(obj, list):
return tuple([freeze(v) for v in obj])
return obj
|
0cd224d292dbf2f5835b4154d894755ca652be13
| 70,933
|
def get_item(dictionary, index):
"""Returns a value from a dictionary"""
return dictionary.get(index)
|
5134ab38a3d33f995a9f6358aa1949865c0c820c
| 70,935
|
def dist(p1, p2):
"""Distance between p1 and p2"""
return abs(p2 - p1)
|
6b59fd1354b1f01cfe67bf73ca1d9db3aa7689af
| 70,939
|
import torch
def _convert_boxes_to_roi_format(boxes):
"""
Convert rois into the torchvision format.
:param boxes: The roi boxes as a native tensor[B, K, 4].
:return: The roi boxes in the format that roi pooling and roi align in torchvision require. Native tensor[B*K, 5].
"""
concat_boxes = boxes.view((-1, 4))
ids = torch.full_like(boxes[:, :, :1], 0)
for i in range(boxes.shape[0]):
ids[i, :, :] = i
ids = ids.view((-1, 1))
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
|
72ce30f5d7a92b3a09eb692c88f610da26b1edeb
| 70,941
|
async def get(spfy, session, url):
"""
Makes a request to the Spotify API with the appropriate
OAuth headers.
:return: the json object for the response
"""
headers = {
**spfy._auth_headers(),
"Content-type": "application/json"
}
async with session.get(url, headers=headers) as resp:
response = await resp.json()
return response
|
e56486f0a85ada9c090439bd7a30503e47d33488
| 70,942
|
def _get_favorites(request):
"""
Gets the favorite articles' ids from request.user
Input: request
@return: <list> List of article ids
"""
# rows = [{'id': 2}, {'id': 4} ... ] (A list of article ids)
rows = request.user.favorite_article.values('id')
# favorites = [2, 4, ...] using list comprehension
favorites = [row['id'] for row in rows]
return favorites
|
42a8025aeb0c5b2847344c21c66a17f3c386ab70
| 70,949
|
def read_igor_J_gene_parameters(params_file_name):
"""Load raw genJ from file.
genJ is a list of genomic J information. Each element is a list of three
elements. The first is the name of the J allele, the second is the genomic
sequence trimmed to the CDR3 region for productive sequences, and the last
is the full germline sequence. For this 'raw genJ' the middle element is an
empty string to be filled in later.
Parameters
----------
params_file_name : str
File name for a IGOR parameter file.
Returns
-------
genJ : list
List of genomic J information.
"""
with open(params_file_name, 'r') as params_file:
J_gene_info = {}
in_J_gene_sec = False
for line in params_file:
if line.startswith('#GeneChoice;J_gene;'):
in_J_gene_sec = True
elif in_J_gene_sec:
if line[0] == '%':
split_line = line[1:].split(';')
J_gene_info[split_line[0]] = [split_line[1], int(split_line[2])]
else:
break
genJ = [[]]*len(list(J_gene_info.keys()))
for J_gene in list(J_gene_info.keys()):
genJ[J_gene_info[J_gene][1]] = [J_gene, '', J_gene_info[J_gene][0]]
return genJ
|
b853081955db529c8f7b7db9491f7631902deb23
| 70,951
|
def import_class(model_name):
"""
Imports a predefined detection class by class name.
Args:
model_name: str
Name of the detection model class (example: "MmdetDetectionModel")
Returns:
class_: class with given path
"""
module = __import__("sahi.model", fromlist=[model_name])
class_ = getattr(module, model_name)
return class_
|
cd4a8d4fd29e8977e3800d64487dabd0158363d5
| 70,952
|
def _join_codes(fg, bg):
"""Join `fg` and `bg` with ; and surround with correct esc sequence."""
colors = ';'.join(filter(lambda c: len(c) > 0, (fg, bg)))
if colors:
return '\x1b[' + colors + 'm'
return ''
|
88e8bc886f100113dcf2221b5827f0acf5c76845
| 70,954
|
import math
def identityProbability(gs: int, mr: float = 1e-08) -> float:
"""Return the probabilty for a sequence
to not change over n generations of evolution.
**Keyword arguments:**
gs -- number of generations
mr -- mutation rate (default is 1e-08)
"""
return math.pow(1-mr, gs)
|
c8be5ef9c459e709482b69c0f4d7ace64364b639
| 70,958
|
def RegionPack(region_id, center=None, verts=None):
""" This function packs a region_id and the constructor overrides for that Region
into a tuple. Only center or verts can be overridden currently and only one of
those per Region. The resulting tuple looks like:
(region_id, {region_constructor_args}) """
region_args = {}
if center != None:
assert verts is None
region_args['center'] = center
if verts != None:
assert center is None
region_args['verts'] = verts
return (region_id, region_args)
|
cdf023f41b877c1ab213552a3bc8c7a38bc1a7ec
| 70,959
|
from typing import Callable
def pushcall(call: Callable, from_other_thread: bool = False) -> None:
"""pushcall(call: Callable, from_other_thread: bool = False) -> None
Pushes a call onto the event loop to be run during the next cycle.
Category: General Utility Functions
This can be handy for calls that are disallowed from within other
callbacks, etc.
This call expects to be used in the game thread, and will automatically
save and restore the ba.Context to behave seamlessly.
If you want to push a call from outside of the game thread,
however, you can pass 'from_other_thread' as True. In this case
the call will always run in the UI context on the game thread.
"""
return None
|
b56b8d63bd265010e5daf94ee46e59bd194c13bf
| 70,961
|
def splitIntoSingleMutations(mutation):
"""
split multiple mutation into single mutation list, e.g. 'A:N25R/A:N181D' -> ['A:N25R', 'A:N181D']
"""
single_mutations = []
start = 0
for i in range( len(mutation) ):
if mutation[i] == '/':
single_mutations.append(mutation[start:i])
start = i+1
single_mutations.append(mutation[start:])
return single_mutations
|
33edaa51b99b50d7b8b74c7f55157a4389db4a2f
| 70,969
|
import re
def freeze_vars(variables, pattern):
"""Removes backbone+fpn variables from the input.
Args:
variables: all the variables in training
pattern: a reg experession such as ".*(efficientnet|fpn_cells).*".
Returns:
var_list: a list containing variables for training
"""
if pattern:
variables = [v for v in variables if not re.match(pattern, v.name)]
return variables
|
23de336ea09e2b054930c575f5c99af0fbcd82b6
| 70,970
|
from typing import AsyncGenerator
from typing import Optional
from typing import Callable
from typing import List
async def async_list(
gen: AsyncGenerator,
filter: Optional[Callable] = None) -> List:
"""Turn an async generator into a here and now list, with optional
filter."""
results = []
async for x in gen:
if filter and not filter(x):
continue
results.append(x)
return results
|
f9b984fd27fc33fa907738c8cd9dfd7c2d86c05c
| 70,978
|
def scale_census_variables(census):
"""
Function to scale Men, Women, Employed and Citizen
variables in census by TotalPop to get a percentage.
Input: dataframe of census data.
Output: dataframe of census data scaled to population (%).
"""
census.Men = 100*census.Men/census.TotalPop
census.Women = 100*census.Women/census.TotalPop
census.Citizen = 100*census.Citizen/census.TotalPop
census.Employed = 100*census.Employed/census.TotalPop
return census
|
39dae100f2121732fe5f0d91f47a598dbfef092c
| 70,980
|
def make_tag_dict(tag_list):
"""Returns a dictionary of existing tags.
Args:
tag_list (list): a list of tag dicts.
Returns:
dict: A dictionary where tag names are keys and tag values are values.
"""
return {i["Key"]: i["Value"] for i in tag_list}
|
3432db43ac28c19d3abb0b8de03e0ad229a761a0
| 70,981
|
def create_translation_fieldname(translation):
"""
Description:
Generates a unique fieldname based on a given Translation instance
Args:
translation (Translation): Translation instance from our Translation model
Returns:
str: The generated field name
"""
field = translation.item.field.name
language = translation.language.name
fieldname = "{} in {} (id={})".format(field, language, translation.id)
return fieldname
|
de567834d710469c5b05f7427b8c6cc678f463ee
| 70,982
|
def shift_within_range(value, min, max, reverse=False):
"""Shift numeric value within range."""
values = range(min, max + 1)
if reverse:
index = values.index(value) - 1
else:
index = values.index(value) + 1
return values[index % len(values)]
|
a8ffb3abfa7fb598e28359b5e38ac56859e3b551
| 70,983
|
def default_error_handler(exc: Exception) -> str:
"""Default string formatting exception handler."""
return str(exc)
|
89df06d69055fdf9704ff573f141aba40dc39d58
| 70,984
|
def tcp_encode(string: str) -> bytes:
"""Encode string to TCP message."""
return string.encode() + b'\r\n'
|
66715f6547b883394fbfea80edfde6b48b060825
| 70,989
|
def buildNameCountry(cfile):
"""
Take an open .csv file with format country, username and create a dictionary
indexed by username with value country
"""
retDict = {}
for [country, username] in cfile:
retDict[username] = country
return retDict
|
dbaf6b343b896c405aeb36b958cd2cdcad64c712
| 70,990
|
def number_split(num):
"""
12345678 => 12,345,678
:param num: the number needs to be formulated
:return: the string converted from formulated number
"""
return '{:,}'.format(int(num))
|
a7cc03733d6ef9ae031d4fa8f2094c04260bd94c
| 70,991
|
def param_undefined_info(param_name, default_value):
"""
Returns info warning an undefined parameter.
"""
return "Parameter warning: the hyper-parameter " + \
"'{}' is not specified, will use default value '{}'" \
.format(param_name, default_value)
|
15334d816ad91f4a2b3a5a8c40cb4db8895d62ce
| 70,993
|
def greater_than(ds, value):
"""Return a boolean array with True where elements > value
Parameters:
-----------
ds: xarray Dataset
The array to mask
value: float, xarray Dataset
The value(s) to use to mask ds
"""
return ds > value
|
9d634bb70fb5f4f6b6bdf13d56f9279e56e337ae
| 70,997
|
def has_next(seq, index):
"""
Returns true if there is at least one more item after the current
index in the sequence
"""
next_index = index + 1
return len(seq) > next_index
|
607efe35930efaa2d37f0148629c65ec0d6f4fce
| 71,000
|
def mm2m(da):
"""Convert mm to m.
Args:
da (xarray.DataArray): Data in mm.
Returns:
xarray.DataArray: Data in m.
"""
return da / 1000.0
|
c7e0c8a9e0c5194a38f7e5e58e4fdb07355b3b23
| 71,009
|
def binary_printable(str):
"""Return a "hex" representation of of the bytes in STR."""
hex_dict = {"\x00":"00", "\x01":"01", "\x02":"02", "\x03":"03",
"\x04":"04", "\x05":"05", "\x06":"06", "\x07":"07",
"\x08":"08", "\x09":"09", "\x0a":"0a", "\x0b":"0b",
"\x0c":"0c", "\x0d":"0d", "\x0e":"0e", "\x0f":"0f",
"\x10":"10", "\x11":"11", "\x12":"12", "\x13":"13",
"\x14":"14", "\x15":"15", "\x16":"16", "\x17":"17",
"\x18":"18", "\x19":"19", "\x1a":"1a", "\x1b":"1b",
"\x1c":"1c", "\x1d":"1d", "\x1e":"1e", "\x1f":"1f",
"\x20":"20", "\x21":"21", "\x22":"22", "\x23":"23",
"\x24":"24", "\x25":"25", "\x26":"26", "\x27":"27",
"\x28":"28", "\x29":"29", "\x2a":"2a", "\x2b":"2b",
"\x2c":"2c", "\x2d":"2d", "\x2e":"2e", "\x2f":"2f",
"\x30":"30", "\x31":"31", "\x32":"32", "\x33":"33",
"\x34":"34", "\x35":"35", "\x36":"36", "\x37":"37",
"\x38":"38", "\x39":"39", "\x3a":"3a", "\x3b":"3b",
"\x3c":"3c", "\x3d":"3d", "\x3e":"3e", "\x3f":"3f",
"\x40":"40", "\x41":"41", "\x42":"42", "\x43":"43",
"\x44":"44", "\x45":"45", "\x46":"46", "\x47":"47",
"\x48":"48", "\x49":"49", "\x4a":"4a", "\x4b":"4b",
"\x4c":"4c", "\x4d":"4d", "\x4e":"4e", "\x4f":"4f",
"\x50":"50", "\x51":"51", "\x52":"52", "\x53":"53",
"\x54":"54", "\x55":"55", "\x56":"56", "\x57":"57",
"\x58":"58", "\x59":"59", "\x5a":"5a", "\x5b":"5b",
"\x5c":"5c", "\x5d":"5d", "\x5e":"5e", "\x5f":"5f",
"\x60":"60", "\x61":"61", "\x62":"62", "\x63":"63",
"\x64":"64", "\x65":"65", "\x66":"66", "\x67":"67",
"\x68":"68", "\x69":"69", "\x6a":"6a", "\x6b":"6b",
"\x6c":"6c", "\x6d":"6d", "\x6e":"6e", "\x6f":"6f",
"\x70":"70", "\x71":"71", "\x72":"72", "\x73":"73",
"\x74":"74", "\x75":"75", "\x76":"76", "\x77":"77",
"\x78":"78", "\x79":"79", "\x7a":"7a", "\x7b":"7b",
"\x7c":"7c", "\x7d":"7d", "\x7e":"7e", "\x7f":"7f",
"\x80":"80", "\x81":"81", "\x82":"82", "\x83":"83",
"\x84":"84", "\x85":"85", "\x86":"86", "\x87":"87",
"\x88":"88", "\x89":"89", "\x8a":"8a", "\x8b":"8b",
"\x8c":"8c", "\x8d":"8d", "\x8e":"8e", "\x8f":"8f",
"\x90":"90", "\x91":"91", "\x92":"92", "\x93":"93",
"\x94":"94", "\x95":"95", "\x96":"96", "\x97":"97",
"\x98":"98", "\x99":"99", "\x9a":"9a", "\x9b":"9b",
"\x9c":"9c", "\x9d":"9d", "\x9e":"9e", "\x9f":"9f",
"\xa0":"a0", "\xa1":"a1", "\xa2":"a2", "\xa3":"a3",
"\xa4":"a4", "\xa5":"a5", "\xa6":"a6", "\xa7":"a7",
"\xa8":"a8", "\xa9":"a9", "\xaa":"aa", "\xab":"ab",
"\xac":"ac", "\xad":"ad", "\xae":"ae", "\xaf":"af",
"\xb0":"b0", "\xb1":"b1", "\xb2":"b2", "\xb3":"b3",
"\xb4":"b4", "\xb5":"b5", "\xb6":"b6", "\xb7":"b7",
"\xb8":"b8", "\xb9":"b9", "\xba":"ba", "\xbb":"bb",
"\xbc":"bc", "\xbd":"bd", "\xbe":"be", "\xbf":"bf",
"\xc0":"c0", "\xc1":"c1", "\xc2":"c2", "\xc3":"c3",
"\xc4":"c4", "\xc5":"c5", "\xc6":"c6", "\xc7":"c7",
"\xc8":"c8", "\xc9":"c9", "\xca":"ca", "\xcb":"cb",
"\xcc":"cc", "\xcd":"cd", "\xce":"ce", "\xcf":"cf",
"\xd0":"d0", "\xd1":"d1", "\xd2":"d2", "\xd3":"d3",
"\xd4":"d4", "\xd5":"d5", "\xd6":"d6", "\xd7":"d7",
"\xd8":"d8", "\xd9":"d9", "\xda":"da", "\xdb":"db",
"\xdc":"dc", "\xdd":"dd", "\xde":"de", "\xdf":"df",
"\xe0":"e0", "\xe1":"e1", "\xe2":"e2", "\xe3":"e3",
"\xe4":"e4", "\xe5":"e5", "\xe6":"e6", "\xe7":"e7",
"\xe8":"e8", "\xe9":"e9", "\xea":"ea", "\xeb":"eb",
"\xec":"ec", "\xed":"ed", "\xee":"ee", "\xef":"ef",
"\xf0":"f0", "\xf1":"f1", "\xf2":"f2", "\xf3":"f3",
"\xf4":"f4", "\xf5":"f5", "\xf6":"f6", "\xf7":"f7",
"\xf8":"f8", "\xf9":"f9", "\xfa":"fa", "\xfb":"fb",
"\xfc":"fc", "\xfd":"fd", "\xfe":"fe", "\xff":"ff"}
new_str = ""
for ch in str:
new_str = new_str + hex_dict[ch]
return new_str
|
e90727463e91d15974c1e0ca6064a9d5f0cd15c7
| 71,011
|
def reverse_dotted_decimals(ipaddress):
"""
Reverse the order of the decimals in the specified IP-address. E.g.
"192.168.10" would become "10.168.192"
"""
return '.'.join(ipaddress.split('.')[::-1])
|
b86779437e92aa1c85a83ea9c6f9906e1a22e351
| 71,013
|
def get_Zb(k, l):
"""Return Z for k-l length suffix."""
assert 0 < k and 0 <= l <= k
return pow(2, k-l) - 1*(l<k)
|
e90d4cc4fb98f9aa2580e11b93cef3624465e88a
| 71,016
|
import csv
def input_performances(name):
"""
Generates from a .csv file the list of actions and the performance dictionary.
:param name: Name of the .csv file which must contain on the first line the names
of the actions, on the second line the names of the criteria and on the
following lines the performance of each action against each criterion.
:return A: List of strings corresponding to the names of the actions in the order
in which they are given in the csv file.
:return P: Dictionary in which the keys are the names of the actions and the values
are sub-dictionary where the keys are the criteria and the values are the
performances.
"""
A = []
C = []
P = {}
with open(name, 'r', newline='') as P_csv:
reader = csv.reader(P_csv, delimiter=',')
line_count = 0
for row in reader:
if line_count == 0:
[A.append(item) for item in row]
elif line_count == 1:
[C.append(item) for item in row]
else:
perf_A = {}
for j in range(len(row)):
perf_A[C[j]] = float(row[j])
P[A[line_count-2]] = perf_A
line_count += 1
return A, P
|
366afa5736d700f717706db86037e2f400b70442
| 71,018
|
def get_selected_text(self):
"""
Return selected text from view.
Parameters
----------
self : class view
Returns
----------
str
Selected text
"""
sel_text = self.view.sel()
sel_text = self.view.substr(sel_text[0])
return sel_text
|
4b4ea7c7d90ffc42baa9655a3f888a1795c990e9
| 71,023
|
def apply_filtering(tag, text):
"""Alter certain metadata elements according to the specified rules."""
if tag == 'PubNumber':
return text + '.pdf'
elif tag == 'Keyword':
return text.title()
elif tag == 'DegreeCode':
if text == "Ph.D.":
return "Dissertation"
else:
return text
|
28a4fb49f4d1c83d27c8d3459e4415a9c1b773a3
| 71,026
|
def card_to_flds(card):
"""Create fields string fro the given card."""
parts = [card.word, card.info, card.transcription,
'[sound:%s]' % card.sound if card.sound else '']
return '\x1f'.join(parts)
|
0b78487bcb230cb9aa20720683a56dc57116ac10
| 71,035
|
import math
def calc_norm(k):
"""
Calculate the Euclidean norm on an n-dimensional Euclidean space Rn.
"""
c = 0
for i in range(len(k)):
c += k[i] ** 2
x = math.sqrt(c)
return x
|
006a3b863c8f4909dfa6fe47cdf0eda33958e051
| 71,036
|
import random
def random_ip(pattern=None):
"""
Takes a pattern as a string in the format of #.#.#.# where a # is an
integer, and a can be substituded with an * to produce a random octet.
pattern = 127.0.0.* would return a random string between 127.0.0.1 and
127.0.0.254
"""
if pattern is None:
pattern = '*.*.*.*'
num_asterisks = 0
for c in pattern:
if c == '*':
num_asterisks += 1
rand_list = [random.randint(1, 255) for i in range(0, num_asterisks)]
for item in rand_list:
pattern = pattern.replace('*', str(item), 1)
return pattern
|
8246f38df9ce2c6ed71c1c088e8f27f888c33f4d
| 71,042
|
def invert_brackets(lhs: str) -> str:
"""
Helper function to swap brackets and parentheses in a string
"""
res = ""
pairs = ["()", "[]", "{}", "<>", "/\\"]
open_close = {x[0]: x[1] for x in pairs}
close_open = {x[1]: x[0] for x in pairs}
for char in lhs:
if char in open_close:
res += open_close[char]
elif char in close_open:
res += close_open[char]
else:
res += char
return res
|
a264b24c083e3fec641d9dbb03dd2c8f743b76aa
| 71,045
|
import random
def recommend_random(query, movies, k=10):
"""
Recommends a list of k random movie ids
"""
movies = movies.drop(index=query.keys())
list_ids = random.choices(
movies.index, weights=None, cum_weights=None, k=k)
return list_ids
|
96874c0177cb397c0725b1c837143536451a603e
| 71,050
|
def get_factors(x):
""" This function takes a number and returns its factors"""
listf = []
#print("The factors of",x,"are:")
for i in range(1, x + 1):
if x % i == 0:
#print(i)
listf.append(i)
return listf
|
3156c5a48ccfdb4491f4043d26541d965e60c577
| 71,052
|
def get_exp_label(val) -> str:
"""
:param val: numeric label to format
:return: label formatted in scientific notation
Format a label in scientific notation, using Latex math font.
For example, 10000 -> 10^4;
"""
# Get the power of 10
exp_val = 0
remaining_val = int(val)
while (remaining_val % 10 == 0 and remaining_val > 0):
exp_val += 1
remaining_val = remaining_val // 10
if remaining_val > 1:
return r"$\mathdefault{" + str(remaining_val) + r"\!·\!{10}^" + str(exp_val) + r"}$"
else:
return r"$\mathdefault{" + r"{10}^" + str(exp_val) + r"}$"
|
189d36550feeb7389e9c60f8a512d96f09703cf8
| 71,054
|
def name_splitter(name: str):
"""Extract gig name & start time from string
Args:
name (str): name item from ParisCat GetStack
Returns:
str, str: title, start_time
"""
# Initialise Reponse Variables
title = None
start_time = None
# Generally title & start time are provided in a single string split by either "//" or "/"
start_name_split = [x.strip('/').strip() for x in name.split('/',1) if x != '']
if len(start_name_split) == 2:
title = start_name_split[1]
start_time = start_name_split[0]
return start_time, title
# Some names don't contain a start time & thus are length 1
elif len(start_name_split) == 1:
title = start_name_split[0]
start_time = None
return start_time, title
|
07ab1e30ed1dce61c4188766578327f7ff940828
| 71,056
|
def get_type_from_path(path):
"""Get the "file type" from a path.
This is just the last bit of text following the last ".", by definition.
"""
return path.split('.')[-1]
|
ecf59c13bc5bd5dc3a1749de0ff3ff01f40ad037
| 71,064
|
import torch
def masked_accuracy(scores,targets,tgt_pad_index = 0):
"""
Calculate accuracy for a given scores and targets, ignoring it for padding_index
@params scores(Tensor): output scores - shape(batch, tgt_seq_len-1, tgt_vocab_size)
@params target(Tensor): gold target - shape(batch,tgt_seq_len) - Discard 1st time step
@params tgt_pad_index(int): Target pad index for which loss is ignored
@returns accuracy(Float): Accuracy
"""
with torch.no_grad():
targets = targets[:,1:]
mask = (targets!=tgt_pad_index)
num_words = mask.sum().float()
preds = torch.argmax(scores, dim=-1)
truths = (preds == targets)*mask
accuracy = truths.sum()/num_words
return accuracy
|
8975b4c0b6e7dfed7a5aefae38bb81e9acc4eb6c
| 71,065
|
def process_STATS_ASCII_data_line(line,nchan):
"""This processes one line of a STATS data file
@param line : string
One line from an ASCII STATS data file
@param nchan : number of signal channels in the file
@return: list of lists
Lists of the means, rms, kurtoses and skews for the subchannels
at one sampling time.
"""
data = line.split()
mean = []
rms = []
skew = []
kurtosis = []
sec = int(data[0])
ms = int(data[1])
for i in range(2,2+4*nchan,4):
mean.append(float(data[i]))
rms.append(float(data[i+1]))
kurtosis.append(float(data[i+2]))
skew.append(float(data[i+3]))
return (sec,ms,mean,rms,kurtosis,skew)
|
ee3f46ac18efc5ff66f97ba14900437e44d90697
| 71,070
|
def ord_to_str(ord_list):
"""
Convert list of character orders to string
:param ord_list: list of character orders
:type ord_list: int or list[int]
:return: corresponding string
:rtype: str
"""
if isinstance(ord_list, int):
ord_list = [ord_list]
s = ''
for o in ord_list:
s += chr(o)
return s
|
3141f1130d7f105e93e1cce14d4e31a585d23c40
| 71,071
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.