content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_parent_list(topic_type):
"""Get list of topic type's super types, including topic type itself."""
__result_types = []
__result_types.append(topic_type)
__t = topic_type
while __t.parent != None:
__result_types.insert(0, __t.parent)
__t = __t.parent
return __result_types | f8285d95674043d47063f2c8eb5885a2fc61d319 | 105,986 |
def modifies(g, n, modifiers):
"""
Tests whether any of the modifiers of node n are in
any of the categories listed in 'modifiers'
"""
pred = g.predecessors(n)
if not pred:
return False
pcats = []
for p in pred:
pcats.extend(p.getCategory())
return bool(set(pcats).intersection([m.lower() for m in modifiers])) | 9c5ae2be81d74b822100ed93c091bafca79e6d4d | 105,987 |
def choose_poi_and_neighbors(gdf, placekey, neighbor_radius, projection = 'EPSG:3857'):
"""
Identifies the "target" POI and its nearby neighbors, based on the `neighbor_radius` parameter (by default expressed in meters).
"""
# classify Neighbors v. Target POI
gdf['POI'] = 'Neighbor'
gdf.loc[gdf['placekey'] == placekey, 'POI'] = 'Target'
# transform to a projected coordinate reference system
gdf_proj = gdf.to_crs(projection)
# get the buffer for filtering neighbors
target = gdf_proj.loc[gdf_proj['POI'] == 'Target']
target_buffer = target.geometry.buffer(neighbor_radius)
# find the neighbors
output_proj = gdf_proj.loc[gdf_proj.intersects(target_buffer.unary_union)]
# transform back to original coordinate reference system
output = output_proj.to_crs(gdf.crs)
return output | 1f6b945023ef480e7f6433979d6b6b7b723c34c3 | 105,989 |
def format_txt(signature):
"""
Remove excess spaces and newline characters.
"""
return ' '.join(' '.join(signature.split('\n')).split()) | c0a8251219f4c9454666e159f695ffe92ff9e476 | 105,990 |
import random
def weighted_choice(items):
"""
Chooses a random element from items, where items is a list of tuples in
the form (item, weight). weight determines the probability of choosing its
respective item.
"""
weight_total = sum((item[1] for item in items))
n = random.uniform(0, weight_total)
# print(f"total: {weight_total} {n}")
for item, weight in items:
if weight > n:
# print(f"{weight} {n}")
return item
n -= weight
return items[-1][0] | 3e346de08e8d1aa9aa2ae3363f773250eb5105a0 | 105,993 |
def filter_green_square(words:tuple, letter:str, offset:int) -> tuple:
"""
Given a letter and an offset, return the words in in the list
that have a letter in that position.
"""
return tuple( word for word in words if word[offset] == letter ) | f785c7bd18eb25e39fca4bcc4ce126c4a9b479f7 | 106,000 |
def aes_pad_ (data):
"""Adds padding to the data such that the size of the data is a multiple of
16 bytes
data: the data string
Returns a tuple:(pad_len, data). pad_len denotes the number of bytes added
as padding; data is the new data string with padded bytes at the end
"""
pad_len = len(data) % 16
if (0 != pad_len):
pad_len = 16 - pad_len
pad_bytes = bytearray (15)
data += str(pad_bytes[:pad_len])
return (pad_len, data) | 55915b3d06f6000a2541362cc1731ecd18cb7365 | 106,001 |
def add_optional(user_event, data):
"""Adds an optional field if it exists."""
if user_event.location:
data.append(('location', user_event.location))
if user_event.content:
data.append(('description', user_event.content))
return data | b8206a2d71d0087dba92aa28ea10361286341dbc | 106,002 |
def _parse_text(value):
"""Default coercion function which assumes text is UTF-8 encoded"""
return value.decode('utf-8') | b43d8b36017db1cacf502395cd7f0ac5967757fe | 106,005 |
def check_uniqueness(str_):
"""Check if given string has all unique characters and return True
:param str_: Given string
:returns: True, if all characters are unique. Else, False.
"""
char_dict = {}
for c in str_:
if char_dict.get(c) is None:
char_dict[c] = 1
else:
print("Duplicate character: %s" % c)
return False
return True | 1c144e1102732907dea5a07648872accc7919691 | 106,008 |
def _rename_indexes(left_df, right_df, lsuffix, rsuffix):
"""
Helper method for other sjoin methods.
Renames indexes to numeric for compatibility with rtree.
Returns renamed DataFrames as well as old indexes and their names.
Parameters
----------
left_df, right_df : GeoDataFrame
The geodataframe's being joined.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
Returns
-------
left_df, right_df : GeoDataFrame
The geodataframe's being joined, with indexes renamed.
index_left : string
Original index for left_df, to be restored by _join_results.
index_right : string
Original index for right_df, to be restored by _join_results
left_index_name : string
Original index name for left_df, to be restored by _join_results
right_index_name : string
Original index name for right_df, to be restored by _join_results
"""
# store index names
index_left = "index_%s" % lsuffix
index_right = "index_%s" % rsuffix
# due to GH 352
if any(left_df.columns.isin([index_left, index_right])) or any(
right_df.columns.isin([index_left, index_right])
):
raise ValueError(
"'{0}' and '{1}' cannot be names in the frames being"
" joined".format(index_left, index_right)
)
# the rtree spatial index only allows limited (numeric) index types, but an
# index in geopandas may be any arbitrary dtype. so reset both indices now
# and store references to the original indices, to be reaffixed later.
# GH 352
left_df = left_df.copy(deep=True)
try:
left_index_name = left_df.index.name
left_df.index = left_df.index.rename(index_left)
except TypeError:
index_left = [
"index_%s" % lsuffix + str(l) for l, ix in enumerate(left_df.index.names)
]
left_index_name = left_df.index.names
left_df.index = left_df.index.rename(index_left)
left_df = left_df.reset_index()
right_df = right_df.copy(deep=True)
try:
right_index_name = right_df.index.name
right_df.index = right_df.index.rename(index_right)
except TypeError:
index_right = [
"index_%s" % rsuffix + str(l) for l, ix in enumerate(right_df.index.names)
]
right_index_name = right_df.index.names
right_df.index = right_df.index.rename(index_right)
right_df = right_df.reset_index()
return left_df, right_df, left_index_name, right_index_name, index_left, index_right | 1eabe0f979f2638cf73bac05c336ec86eed450eb | 106,011 |
from typing import Dict
from typing import Any
def make_keydict(key_to_type: Dict[str, Any]) -> str:
"""
Returns the python code for declaring a dictionary that
changes the returned strings to their correct types
Parameters
----------
key_to_type: dict
keys are the field names in the returned structure, values
are the functions to call to cast to correct type
Returns
-------
keydict_declare: str
String to paste into stub function to create the desired dictionary
"""
accum = "{"
for key, val in key_to_type.items():
# no need to cast str to str
if val != "str":
accum += f"'{key}':{val},\n"
accum += "}"
return f"keydict : Dict[str,Any] = {accum}" | 51f8f022f5208e88e884e2c0b7fbed9ccf436453 | 106,016 |
def _convert_labels_dict_to_list(parent):
"""Covert "labels" from dictionary into list of "name", "value" pairs.
This makes the resulting BigQuery schema more consistent when the json
object has arbitrary user supplied fields.
Args:
parent: dict object.
Returns:
The modified dict object.
"""
labels_dict = parent['labels']
labels_list = [{'name': key, 'value': val}
for (key, val) in labels_dict.items()]
parent['labels'] = labels_list
return parent | 5158995008b19d156687d634559a2e3c6cea0275 | 106,017 |
import re
def parse_joint_latex(tex_str):
"""
Parse .tex string into a different blog posts.
Each section considered one blog post.
Return [(section, content)] where section = section string and
content is all the remaining string until the next section or end of doc.
"""
# clean up tex. Remove unnecessary commands at the end of string.
tex_str = re.sub(r'\\'+'end{document}', '', tex_str)
tex_str = re.sub(r'\\'+'bibliography{.+?}', '', tex_str)
tex_str = re.sub(r'\\'+'bibliographystyle{.+?}', '', tex_str)
list_posts = []
# character index
prev_end_sec_ind = -1
sec_content_start = -1
sec_title = None
for m in re.finditer(r'\\'+'section{(.+?)}\n\n', tex_str, re.DOTALL):
prev_end_sec_ind = m.start()
if sec_title is not None:
# add to the list
assert sec_content_start >= 0
sec_content = tex_str[sec_content_start:prev_end_sec_ind]
list_posts.append((sec_title, sec_content))
# beginning of a section
sec_title = m.group(1)
sec_content_start = m.end()
# add the last post
sec_content = tex_str[sec_content_start:]
list_posts.append((sec_title, sec_content))
return list_posts | b757243ee1f0070fb455ee3fe3df7dd59c8c8246 | 106,021 |
import re
def camel_snake_converter(string: str, snake_to_camel: bool = False):
"""
Converts camel case strings to snake case strings, and vice-versa.
Parameters:
`string`: The string to be converted
`snake_to_camel`: Determines which way to convert. \
Default value will convert camel case to snake case.
Returns:
The converted string.
"""
if snake_to_camel:
snakes: list[str] = re.findall(r"_[a-z]", string)
replacements = [x.upper()[1] for x in snakes]
for s, r in zip(snakes, replacements):
string = string.replace(s, r)
return string
else:
capitals: list[str] = re.findall(r"[A-Z]", string)
replacements = [f"_{x.lower()}" for x in capitals]
for c, r in zip(capitals, replacements):
string = string.replace(c, r)
return string | 9d11521f865f9a52905280655e7c81b478e66279 | 106,022 |
import torch
def _compute_spsa_gradient(loss_fn, x, delta, samples, iters):
"""
Approximately compute the gradient of `loss_fn` at `x` using SPSA with the
given parameters. The gradient is approximated by evaluating `iters` batches
of `samples` size each.
"""
assert len(x) == 1
num_dims = len(x.size())
x_batch = x.expand(samples, *([-1] * (num_dims - 1)))
grad_list = []
for i in range(iters):
delta_x = delta * torch.sign(torch.rand_like(x_batch) - 0.5)
delta_x = torch.cat([delta_x, -delta_x])
with torch.no_grad():
loss_vals = loss_fn(x + delta_x)
while len(loss_vals.size()) < num_dims:
loss_vals = loss_vals.unsqueeze(-1)
avg_grad = (
torch.mean(loss_vals * torch.sign(delta_x), dim=0, keepdim=True) / delta
)
grad_list.append(avg_grad)
return torch.mean(torch.cat(grad_list), dim=0, keepdim=True) | a75444750378d50650d8eadc7538e397b905743e | 106,024 |
import copy
def create_model_diag_conf(rose_conf, templ_conf, model, diag, index):
"""
Add model and diag sections to configuration
rose_conf : original rose configuration
templ_conf : template configuration, will be copied
model : model name
diag : diag name
index : processor id
returns a configuration
"""
conf = copy.deepcopy(templ_conf)
mname = 'namelist:models(' + model + ')'
dname = 'namelist:diags(' + diag + ')'
conf[mname] = rose_conf[mname]
conf[dname] = rose_conf[dname]
conf['general']['clear_netcdf_cache'] = 'false'
return conf | 916ae66a9e558f8ac2ffda62207607fbebf65014 | 106,028 |
def remove_non_deployable_operators(operators: list):
"""Removes operators that are not part of the deployment pipeline.
If the non-deployable operator is dependent on another operator, it will be
removed from that operator's dependency list.
Args:
operators (list): original pipeline operators.
Returns:
A list of all deployable operators.
"""
deployable_operators = [operator for operator in operators if operator["notebookPath"]]
non_deployable_operators = list()
for operator in operators:
if operator["notebookPath"] is None:
# checks if the non-deployable operator has dependency
if operator["dependencies"]:
dependency = operator["dependencies"]
# looks for who has the non-deployable operator as dependency
# and assign the dependency of the non-deployable operator to this operator
for op in deployable_operators:
if operator["operatorId"] in op["dependencies"]:
op["dependencies"] = dependency
non_deployable_operators.append(operator["operatorId"])
for operator in deployable_operators:
dependencies = set(operator["dependencies"])
operator["dependencies"] = list(dependencies - set(non_deployable_operators))
return deployable_operators | f073ba72f555a6231c334291eaddc2e2a7f48496 | 106,032 |
def contact_helper(current,points):
"""
return True if at least one point contacts the cup, return False otherwise
Parameter current: the cup
Precondition: current is a list
Parameter points: infomation of points
Precondition: points is a list
"""
for point in points:
if ((point[0]-current[0])**2+(point[1]-current[1])**2)**0.5<=15:
return True
return False | 1226a1753ed62a8b5def45249fe576c6e22573db | 106,033 |
def bitmask_from_text(mask, text):
"""Initialize a bitmask from text.
Builds an integer value from text containing bit names that should be set. The
complement of :func:`decode_bitmask`. For example::
>>> COLORS = define_bitmask('COLORS','Primary colors',RED=0,BLUE=1,GREEN=4)
>>> '{0:b}'.format(bitmask_from_text(COLORS,'GREEN|BLUE'))
'10010'
Args:
mask: A bitmask type, normally created with :func:`create_bitmask`, that defines
the symbolic bit names that are allowed.
text: A list of bit names separated by '|'.
Returns:
int: Integer with bits set for each bit name appearing in the text.
Raises:
ValueError: invalid text specification.
"""
if not hasattr(mask, '__dict__'):
raise ValueError('Invalid bitmask.')
value = int(0)
for bit_name in text.split('|'):
if bit_name not in mask.__dict__:
raise ValueError('Invalid bit name: {0}.'.format(bit_name))
value = value | mask.__dict__[bit_name]
return value | 3035a69f357f7e6c61152120f9215b9fee54b1a9 | 106,035 |
def format_geo(geography_id):
"""
Format geography id for use within an SDF
:param geography_id: string; corresponds to DV360 geography id
:return: string; formatted for SDF usage
"""
return '{};'.format(geography_id) | 63d56fd91419ce150452acb2b35dc0f6b53388b7 | 106,038 |
def get_next_token(tokens, match_token):
"""
Get the next token after the match_token in the list of tokens.
"""
next_token = False
for token in tokens:
if token == match_token:
next_token = True
if next_token:
return token
return '' | 026ca5728e9b37026559c25cea049e79905e9a51 | 106,039 |
import math
def getCenterFrequency(filter_band):
"""
Intermediate computation used by the mfcc function.
Compute the center frequency (fc) of the specified filter band (l)
This where the mel-frequency scaling occurs. Filters are specified so that their
center frequencies are equally spaced on the mel scale
"""
if filter_band == 0:
center_frequency = 0
elif filter_band >= 1 and filter_band <= 14:
center_frequency = (200.0 * filter_band) / 3.0
else:
exponent = filter_band - 14
center_frequency = math.pow(1.0711703, exponent)
center_frequency = center_frequency * 1073.4
return center_frequency | 8110aa5fb8c22ae35d96698c28816c22818aa691 | 106,045 |
def get_bool_from_response(response):
"""
Given a valid bool response, returns the response in boolean form.
str -> bool
"""
return response == 'y' | 8705b2de1cf1e95484f301138fa29656c9e084ae | 106,046 |
import socket
def check_port(host, port):
"""Check if ports are open"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
conn = sock.connect_ex((host, port))
if conn == 0:
# port is not available
return False
return True | 88999cb4ff18dd65861381084942e2cc5339e36c | 106,047 |
def format_tags(tags):
""" Reformats tags
:param dict tags: dict of data pipeline tags (e.g. {key1: val1, key2: val2, key3: val3})
:returns: list of dicts (e.g. [{key: key1, value: val1}, {key: key2, value: val2}, {key: key3, value: val3}])
"""
return [dict(key=k, value=v) for k, v in tags.items()] | 8f928b7bd9cdf3bb4dcad502af3a1f6cc318de6b | 106,051 |
def ask(question, *validators):
"""
Asks a single question and validates it against a list of validators.
When an answer fails validation, the complaint is printed and the question
is asked again.
:param question: The question to ask.
:param validators: The list of validators that the answer must pass.
:return: The answer, converted to its final form by the validators.
"""
answer = None
while answer is None:
answer = input(question)
for validator in validators:
answer, complaint = validator(answer)
if answer is None:
print(complaint)
break
return answer | 8a0d71d1a966640cbed2320a575aa27ec73cbd47 | 106,055 |
def tipo_edicao_Q(edicao):
"""
Define se edição do artigo é ordinária ou extra.
"""
return 'Extra' if len(edicao.split('-')) > 1 else 'Ordinária' | bf12358b954431d71a7ef30239c51421cfe71536 | 106,057 |
from pathlib import Path
def cif_dic_path(top_dir: Path) -> Path:
"""Return path to minimized CIF-Core dictionary."""
return top_dir / "tests" / "dic2owl" / "static" / "cif_core_minimized.dic" | e311122b78a01a34f3567947b6be0c80bb857eba | 106,060 |
def decode_to_str(bytes):
"""Decode byte list `bytes` to a unicode string trying utf-8 encoding first then latin-1.
"""
if bytes is None:
return None
try:
return bytes.decode('utf-8')
except:
return bytes.decode('latin-1') | 89af403f78fe2b4944c9a597ae58b4149f170263 | 106,065 |
def get_prefix_from_ns_name(ns_name):
"""Parses prefix from prefix-identifier
:param ns_name: The name of a namespace
:returns: The prefix ending with a '-' or None if there is no '-'
"""
dash_index = ns_name.find('-')
if 0 <= dash_index:
return ns_name[:dash_index + 1] | 47706474abda4291cfa9eb8295669d45314bab87 | 106,067 |
import json
def read_spatial_params(body_part_json):
"""
Reads from a JSON formatted file the body spatial parameters.
Args:
body_part_json (json): a JSON formatted file specifying a
list of dicts, where each dict consists of a body anchor and
parts, and displacement is the sum of displacements from
each anchor to each part
Returns:
a list of dicts: each dict consists of an anchor and a list of
parts, where the displacement from the anchor to each part would
be compueted and summed
"""
with open(body_part_json) as data_file:
spatial_params_list = json.load(data_file)['spatial']
return spatial_params_list | 98600b4e7c08c35d47633d6cc19a5c37f4f837f0 | 106,069 |
import re
def rm_tags(t: str) -> str:
"""
Remove html tags.
e,g, rm_tags("<i>Hello</i> <b>World</b>!") -> "Hello World".
"""
return re.sub('<([^>]+)>', '', t) | b6cbe7d0db4f0b75c65b98df3b510ed69f68e6d7 | 106,071 |
def validate_fields(*fields):
"""
Checks if any of the provided fields are empty
Returns: True if any of the fields is empty
"""
return not all(fields) | c055b0cd62bba90cf5261781e331a3e93620a656 | 106,072 |
def get_first_gallery(entry):
"""Returns the first gallery plugin of a blog Entry."""
for plugin in entry.content.get_plugins():
if plugin.get_plugin_name().title() == 'Folder':
return plugin
return False | 5af3622430bf7a9b87719cf6cf8c13f3f846f070 | 106,073 |
def FindMapping(mappings, addr):
"""Find the mapping given addr.
Returns the mapping that contains addr.
Returns None if there is no such mapping.
"""
min = 0
max = len(mappings) - 1
while True:
if max < min:
return None
mid = (min + max) // 2
if mappings[mid].end <= addr:
min = mid + 1
elif mappings[mid].start > addr:
max = mid - 1
else:
return mappings[mid] | a01fdd905a20e87c7cb474ee4a552ebf6980d742 | 106,077 |
def max_distributions(cdf, m):
""" Computes the CDF of `m` i.i.d. r.v.'s X distributed according to `cdf`.
Parameters
----------
cdf : NumPy array
Cumulative distribution such that cdf[x] = Pr(X <= x).
m : int
Number of random variables X_1, ... X_m to take maximum of.
Returns
-------
NumPy array of the same lenght as `cdf`.
Cumulative distribution function of Z = max{X_1, X_2, ... X_m}.
"""
return cdf ** m | fa00d2986a280261434c25247571c59e86e0b31c | 106,078 |
def dayfinish(day):
"""Takes an integer day and returns the correct finish for it
1 = 'st', 2 = 'nd', 3 = 'rd', 4-10 = 'th' etc...."""
if day > 3 and day < 21:
return 'th' # special cases
daystr = str(day)
if len(daystr) > 1:
daystr = daystr[-1]
if daystr == '1':
return 'st'
elif daystr == '2':
return 'nd'
elif daystr == '3':
return 'rd'
else:
return 'th' | 70427dcf9af5be27d74615fa738f97fd90ada05b | 106,081 |
def generate_param_getter(param_name: str, param_type: str) -> str:
"""Generates Python code for a parameter getter.
Args:
param_name: The name of the parameter
param_type: The type of the parameter.
Returns:
str: Python code for the parameter getter.
"""
return " @property\n" \
" def " + param_name + "(self) -> '" + param_type + "':\n" \
" return self._" + param_name + "\n\n" | df89a867becbd37b9d988ee9faf1593e613cc486 | 106,082 |
def timestamps_from_paths(paths, ignore_fn=None):
"""
From a list of paths, recursively return the timestamps
from all non-hidden and non-ignored paths
args:
A list of paths to recursively explore
ignore_fn: Ignore paths where this callback returns True
returns:
a list of timestamps
"""
if not isinstance(paths, list):
paths = [paths]
def keep(f):
return f.name[0] != "." and not (ignore_fn is not None and ignore_fn(f))
def recurse_times(path):
times = []
if path.exists() and keep(path):
if path.is_dir():
for f in path.list():
times += recurse_times(f)
else:
times += [(path, path.stat().st_mtime)]
return times
times = []
for path in paths:
times += recurse_times(path)
return sorted(times, key=lambda i: i[0]) | dd48e3fd5026880cd47ed7d7fa3ef82d1e6f34c2 | 106,088 |
def _all_in(smaller, larger):
"""Check that all items in the smaller iterable is in the larger iterable.
"""
for item in smaller:
if item not in larger:
return False
return True | edbad13f2c3a70ff1620146d7ba9fdf0b460dd1e | 106,093 |
def node_inputs_in_expected_order(model):
"""Verifies that the node inputs are ordered in the way that FINN expects
them. When a node has a mixture of static (= constant, initialized) inputs
and dynamic inputs, the dynamic input should come first, followed by the
static one. Only verifiable for a small subset of op_types for now.
Returns {"node_inputs_in_expected_order": Bool}."""
op_types = ["MatMul", "Conv", "Add", "Mul"]
nodes = filter(lambda x: x.op_type in op_types, model.graph.node)
all_OK = True
for n in nodes:
all_OK = all_OK and len(list(n.input)) == 2
# input 0 should be dynamic, no initializer
all_OK = all_OK and (model.get_initializer(n.input[0]) is None)
# input 1 should be static (unless eltwise add)
if n.op_type != "Add":
all_OK = all_OK and (model.get_initializer(n.input[1]) is not None)
return {"node_inputs_in_expected_order": all_OK} | 5bc026aeffefbfca000fd8db7c7d3b0aaa7c3b5a | 106,095 |
import re
def validate_student_id(student_id, is_year1=False):
"""
Get and verify student id
:param student_id:
:param is_year1: whether the student is year 1 student
:return: a boolean
"""
if not is_year1:
if not re.match(r'\d{8}', student_id):
return False
else:
return True
else:
if not re.match(r'Year 1-.*-\d{2}.*', student_id):
return False
else:
return True | 0ab87e41811ff6d1e8c54fe8c58c30c86aa9eeac | 106,098 |
import six
import math
def __get_target_length(size):
"""
Figures out the increased size of the string per
IBM Globalization Design Guideline A3: UI Expansion.
https://www-01.ibm.com/software/globalization/guidelines/a3.html
:param size: Current size of the string.
:returns: The desired increased size.
"""
target_lengths = {
six.moves.range(1, 11): 3,
six.moves.range(11, 21): 2,
six.moves.range(21, 31): 1.8,
six.moves.range(31, 51): 1.6,
six.moves.range(51, 71): 1.4,
}
target_length = 0
if size > 70:
target_length = int(math.ceil(size * 1.3))
else:
for r, v in target_lengths.items():
if size in r:
target_length = int(math.ceil(size * v))
return target_length | 8c1c246e2b4f972d4588fa3ab44d34ffe3f184d8 | 106,101 |
import hashlib
def url_keygen(prod_id: int) -> str:
"""
Generates a key that MTGJSON will use for redirection
:param prod_id: Seed
:return: URL Key
"""
return hashlib.sha256(str(prod_id).encode()).hexdigest()[:16] | 6f8e828683518b1ff05f325b9d203974139d68e0 | 106,103 |
import re
def clean_lyrics(lyrics: str) -> str:
"""Perform some simple operations to clean the lyrics."""
lyrics = lyrics.strip()
# remove unwanted characters
lyrics = re.sub(r"[^\w\[\]()/ \"',\.:\-\n?!]+", "", lyrics)
# reduce to one space only
lyrics = re.sub(r" +", " ", lyrics)
# reduce to max 2 new lines in a row
lyrics = re.sub(r"\n{2,}", "\n\n", lyrics)
# remove space before newline
lyrics = re.sub(r" +?\n", "\n", lyrics)
return lyrics | e7b2f75fa71a9e6223c9074c06c2425c829f23d0 | 106,105 |
def get_opcodes(traces, lineno, op_list):
"""Get all opcodes in op_list on a given line."""
return [x for x in traces[lineno] if x[0] in op_list] | 3c01e275634f8da9cf9add4651643f9c83d68848 | 106,106 |
import math
def distanceBetweenCoords(lat1, lon1, lat2, lon2):
"""
This uses the haversine formula to calculate the great-circle distance
between two points.
Parameters
----------
lat1 : float
The latitude of the first point
lon1 : float
The longitude of the first point
lat2 : float
The latitude of the second point
lon2 : float
The longitude of the second point
"""
earthRadius = 6371.0 # earths radius in km
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
deltaPhi = math.radians(lat2 - lat1)
deltaLambda = math.radians(lon2 - lon1)
a = math.sin(deltaPhi/2.0)**2 + \
math.cos(phi1)*math.cos(phi2)*(math.sin(deltaLambda/2.0)**2)
c = 2.0*math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = earthRadius*c
return d | 41f9f59bc125d5415a2fc1de8a0639d45c7093ef | 106,108 |
def load_voxel_params(param):
"""
Based on the lidar range and resolution of voxel, calcuate the anchor box
and target resolution.
Parameters
----------
param : dict
Original loaded parameter dictionary.
Returns
-------
param : dict
Modified parameter dictionary with new attribute `anchor_args[W][H][L]`
"""
anchor_args = param['postprocess']['anchor_args']
cav_lidar_range = anchor_args['cav_lidar_range']
voxel_size = param['preprocess']['args']['voxel_size']
vw = voxel_size[0]
vh = voxel_size[1]
vd = voxel_size[2]
anchor_args['vw'] = vw
anchor_args['vh'] = vh
anchor_args['vd'] = vd
anchor_args['W'] = int((cav_lidar_range[3] - cav_lidar_range[0]) / vw)
anchor_args['H'] = int((cav_lidar_range[4] - cav_lidar_range[1]) / vh)
anchor_args['D'] = int((cav_lidar_range[5] - cav_lidar_range[2]) / vd)
param['postprocess'].update({'anchor_args': anchor_args})
# sometimes we just want to visualize the data without implementing model
if 'model' in param:
param['model']['args']['W'] = anchor_args['W']
param['model']['args']['H'] = anchor_args['H']
param['model']['args']['D'] = anchor_args['D']
return param | 6e364e23ecc450ed6d12879ba50217897adcfc4d | 106,110 |
def ZigZagDecode(value):
"""Inverse of ZigZagEncode()."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0) | cfa93e215cdc98968651a7c7e9e3344ff9e3f16e | 106,111 |
def get_class_globals(klass):
"""Returns a set of the globals defined in the given class.
Globals are identified to be uppercase and do not start with an underscore.
Arguments:
klass -- The class whose globals need to be fetched.
Returns:
set -- of the class globals.
"""
return {getattr(klass, attribute) for attribute in dir(klass) if not attribute.startswith('_') and attribute.isupper()} | 8167da9903ad55885f2cdb89a4886019ae939ee5 | 106,119 |
def to_binary(string: str) -> str:
"""
Convert String to numerical binary numbers
returns numerical binary string
:param string: String to convert
Example:
>>> to_binary("test")
>>> "1110100 1100101 1110011 1110100"
"""
bin_conv = []
for c in string:
ascii_val = ord(c)
binary_val = bin(ascii_val)
bin_conv.append(binary_val[2:])
return ' '.join(bin_conv) | d5a1de81da94840345a86ba1ee298d792668f79a | 106,128 |
def spline_type_to_id(spline_type):
""" spline_type_to_id(spline_type)
Method to map a spline name to an integer ID. This is used so that
set_cubic_spline_coefs() can be efficient.
The spline_type can also be a number between -1 and 1, representing
the tension for a Cardinal spline.
"""
# Handle tension given for Cardinal spline
tension = 0.0
if isinstance(spline_type, (float, int)):
if spline_type >= -1 and spline_type <= 1:
tension = float(spline_type)
spline_type = 'Cardinal'
else:
raise ValueError('Tension parameter must be between -1 and 1.')
# Get id
if spline_type.lower() in ['c', 'card', 'cardinal', 'catmull–rom']:
return tension # For catmull-rom, we use default tension 0
elif spline_type.lower() in ['b', 'basis', 'basic']:
return 2.0
elif spline_type.lower() in ['herm', 'hermite']:
return 3.0
elif spline_type.lower() in ['lag', 'lagrange']:
return 4.0
elif spline_type.lower() in ['lanc', 'lanczos']:
return 5.0
elif spline_type.lower() in ['near', 'nearest']:
return 97.0
elif spline_type.lower() in ['lin', 'linear']:
return 98.0
elif spline_type.lower() in ['quad', 'quadratic']:
return 99.0
else:
raise ValueError('Unknown spline type: ' + str(spline_type)) | 355e091d7cdfbae4ad0531d0f2e31f2533641b3c | 106,131 |
def createTable(values):
"""Creates a table from a list of one dimensional dicts. Keys of the first dict will be used as headings.
:param values list woth dicts that only have one layer of dicts (subdicts lead to ugly results)
and each dict needs to have the same keys or they won't be displayed or an error will be raised
:returns: HTML table content that can be put into a HTML table tag
"""
# table body
tableMain = """
<tr>
%(heading)s
</tr>
%(content)s
"""
# get all the keys from
keys = [x for x in values[0].keys()]
headings = ""
for key in keys:
headings += "<th>%s</th>" % key
content = ""
try:
for dataset in values:
content += "<tr>\n"
for key in keys:
if key == "bbox":
content += " <td class='bbox'>%s</td>\n" % dataset[key]
else:
content += " <td>%s</td>\n" % dataset[key]
content += "</tr>\n"
except KeyError as e:
raise KeyError("dicts must have the same keys")
table = tableMain % {"heading": headings, "content": content}
return table | 71bc2265363fc93a54928683a1d106c13f9efe1e | 106,132 |
def _wrap_apply(series_or_dataframe, func, **kwargs):
"""Wrap a pandas.Series or pandas.DataFrame apply(func, **kwargs) call."""
return series_or_dataframe.apply(func, **kwargs) | 876c4fe6e65da3d7676b0df77d885448d0f91237 | 106,134 |
from typing import Any
def any(iterable: Any, /) -> bool:
"""Return True if bool(x) is True for any x in the iterable.
If the iterable is empty, return False.
"""
for item in iterable:
if item:
return True
else:
return False | 2ebb6ebfbdb78f73f07fc7cf196439b655292983 | 106,135 |
def transition_2x3() -> dict:
"""
A manually specified transition dict
for comparison with the "create_dict_of_adjacent_tile_indices" above
"""
transition_dic={}
transition_dic[1]=[2,4]
transition_dic[2]=[1,5,3]
transition_dic[3]=[2,6]
transition_dic[4]=[1,5]
transition_dic[5]=[4,2,6]
transition_dic[6]=[3,5]
return transition_dic | d4140690f28e0527ff312fd938be2949e2cdaaca | 106,136 |
def get_pool(module, system):
"""Return Pool or None"""
try:
return system.pools.get(name=module.params['pool'])
except:
return None | 0ba4809487ac207968e6923e5b7a0c04dcc7759f | 106,138 |
import itertools
def generate_all_combinations(d):
"""All permutations of dict elements.
Source:
https://stackoverflow.com/questions/38721847/how-to-generate-all-combination-from-values-in-dict-of-lists-in-python
"""
keys, values = zip(*d.items())
return [dict(zip(keys, v)) for v in itertools.product(*values)] | 8871abb9b94a38a71e45a253953a64e6ce20a2ea | 106,140 |
def getTfidfFeat(words, dictionary, tfidf_model):
""" Generate tf-idf feature in VW format."""
# bow feats
bow = dictionary.doc2bow(words)
# tf-idf feats
tfidf = tfidf_model[bow]
feat = ""
if len(tfidf) > 0:
for f in tfidf:
feat += "%s:%s " % (f[0], f[1])
feat = feat[:-1]
return feat | 8fadf15e9a658eb7edfa179bbe1341a7c1879dc7 | 106,147 |
def job_config(job):
"""Extract config dictionary from GraphQL result"""
return {x["key"]: x["value"] for x in job["config"]} | d9500a460c661b4714f0f4b744b4ee1a2c58701d | 106,157 |
import string
def isHexString(data):
"""
Test if a string contains only hex digits.
"""
return all(c in string.hexdigits for c in data) | 03636cabb1db0a95563204d7c52d1df04666e4c9 | 106,159 |
def ensure(assertion, message=None):
"""
Checks an assertion argument for truth-ness. Will return ``True`` or
explicitly raise ``AssertionError``. This is to deal with environments
using ``python -O` or ``PYTHONOPTIMIZE=``.
:param assertion: some value to evaluate for truth-ness
:param message: optional message used for raising AssertionError
"""
message = message or assertion
if not assertion:
raise AssertionError(message)
return True | 91164c8b278b5aa761aa48d81850c27a2d2e86e2 | 106,160 |
def emulator_type(emulator):
"""Identifies the type of emulator."""
return emulator["emulator_type"] | db33730c5f06ff1e50b592dc23050d9c7049c7b0 | 106,162 |
def first_key(dictionary):
"""Get the first key."""
keys = list(dictionary.keys())
if len(keys) > 0:
return keys[0]
return "" | ddb74a662f74d0ad81910187bf02e3ede3239c4f | 106,165 |
def build_edges(nodes):
"""Build the forward and backward edges on the given list of ParsedNodes
and return them as two separate dictionaries, each mapping unique IDs to
lists of edges.
"""
backward_edges = {}
# pre-populate the forward edge dict for simplicity
forward_edges = {node.unique_id: [] for node in nodes}
for node in nodes:
backward_edges[node.unique_id] = node.depends_on_nodes[:]
for unique_id in node.depends_on_nodes:
forward_edges[unique_id].append(node.unique_id)
return forward_edges, backward_edges | c5b8af5004cde8deae9c21705abdba30c5aff8a0 | 106,167 |
def get_atts(card):
"""Return dict with name, set, and price of card"""
card_name = card["name"]
set_name = card["set_name"]
prices = card["prices"]
card_atts = {
"name": card_name,
"set": set_name,
"price_normal": prices["usd"],
"price_foil": prices["usd_foil"]
}
print("Card Name: {}\nSet: {}".format(card_name, set_name))
print("Normal: {}\nFoil: {}".format(prices["usd"], prices["usd_foil"]))
return(card_atts) | 75ca5ad53398a6b177023bc8aff5ff91ffff7342 | 106,172 |
def merge(values_1, values_2, labels, join='inner'):
"""Merge two dictionaries. The resulting dictionary will map key values to
dictionaries. Each nested dictionary has two elements, representing the
values from the respective merged dictionary. The labels for these elements
are defined by the labels argument.
The join method allows for four types of merging:
- inner: Keep only those keys that are in the intersection of both
dictionaries.
- outer: Keep all keys from the union of both dictionaries.
- left-outer: Keep all keys from the first dictionary.
- right-outer: Keep all keys from the second dictionary.
Raises a ValueError if the number of given labels is not two or if an
invalid join method is specified.
Parameters
----------
vaues_1: dict
Left side of the join.
values_2: dict
Right side of the join.
join: enum['inner', 'outer', 'left-outer', 'right-outer'], default='inner'
Join method identifier.
Returns
-------
dict
Raises
------
ValueError
"""
if len(labels) != 2:
raise ValueError('invalid label list {}'.format(labels))
label_1, label_2 = labels
result = dict()
if join == 'inner':
for key, value in values_1.items():
if key in values_2:
result[key] = {label_1: value, label_2: values_2[key]}
elif join == 'outer':
for key, value in values_1.items():
result[key] = {label_1: value, label_2: values_2.get(key)}
# Add elements in the second dictionary that are not part of the
# result yet.
for key, value in values_2.items():
if key not in result:
result[key] = {label_1: None, label_2: value}
elif join == 'left-outer':
for key, value in values_1.items():
result[key] = {label_1: value, label_2: values_2.get(key)}
elif join == 'outer':
for key, value in values_2.items():
result[key] = {label_1: values_1.get(key), label_2: value}
else:
raise ValueError('invalid join method {}'.format(join))
return result | 282b358c44a53a0f88afab5af8beb639c45b90d0 | 106,174 |
def levenshtein_distance(str1, str2):
"""Returns the Levenshtein Distance and the ratio as compared to the entire sequence
source:https://rosettacode.org/wiki/Levenshtein_distance#Python
"""
m = len(str1)
n = len(str2)
lensum = float(m + n)
d = []
for i in range(m+1):
d.append([i])
del d[0][0]
for j in range(n+1):
d[0].append(j)
for j in range(1,n+1):
for i in range(1,m+1):
if str1[i-1] == str2[j-1]:
d[i].insert(j,d[i-1][j-1])
else:
minimum = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+2)
d[i].insert(j, minimum)
ldist = d[-1][-1]
ratio = (lensum - ldist)/lensum
return {'distance':ldist, 'ratio':ratio} | b839636df1614a6cffbf45d6066173194f3db429 | 106,175 |
from datetime import datetime, timedelta
def nextday_datestr(datestr):
"""
Return the next day's datestr of the given datestr.
20171101 will be returned if 20171031 was given, for example.
"""
nextday = datetime.strptime(datestr, '%Y%m%d') + timedelta(days=1)
return nextday.strftime('%Y%m%d') | 0412649a515cac936deb637d8b0266e6dcab1bc8 | 106,178 |
import math
def y_values_between_points(average, left_pt, right_pt):
"""Return the list of values between left_pt and right_pt."""
start = int(math.floor(left_pt.x))
end = int(math.ceil(right_pt.x)) + 1
return [average[i] for i in range(start, end)] | cbfcd56137f5bc670e8fa61e8a731ca26dba8e25 | 106,180 |
def split_docstring(doc):
"""Split docstring into first line (header) and full body."""
return (doc.split("\n", 1)[0], doc) if doc is not None else ("", "") | 5684a5d43eabd39762b9515b3ac965fc2a0ba953 | 106,184 |
import importlib
def import_serializer_class(location):
"""
Resolves a dot-notation string to serializer class.
<app>.<SerializerName> will automatically be interpreted as:
<app>.serializers.<SerializerName>
"""
if not isinstance(location, str):
return location
pieces = location.split('.')
class_name = pieces.pop()
if not pieces:
raise ValueError('Please ensure class string is fully qualified with its containing module')
if pieces[ len(pieces)-1 ] != 'serializers':
pieces.append('serializers')
module = importlib.import_module( '.'.join(pieces) )
return getattr(module, class_name) | a08ecfed7e49567107e5a3deb8d51582204a9e29 | 106,185 |
def remove_stopwords(df, file, path='', column = "Word"):
""" Remove stopwords from a dataframe choosing
a specific column in which to remove those words
Parameters:
-----------
df : pandas dataframe
Dataframe of counts per word per user
file : string
Name of file that contains the stopwords
path : string, default ''
Path of the file that contains the stopwords
column : string, default 'Word'
Column to clean
Returns:
--------
df : pandas dataframe
Dataframe of counts per word per user
excluding the stopwords
"""
# Remove stopwords
with open(path + file) as stopwords:
stopwords = stopwords.readlines()
stopwords = [word[:-1] for word in stopwords]
df = df[~df[column].isin(stopwords)]
return df | 001cd22a8d62c53ea074ae0a290ce8974d857343 | 106,188 |
def cppn_rem(
template,
grid: list,
) -> list:
"""Obtain the list of elements to be removed according to the generated CPPN
Parameters
----------
template
The unit template parameters
grid : list
The grid generated by the CPPN
Returns
-------
list
The list of element IDs to be removed
"""
# Initialisations
rem = []
# Create a copy of the grid
g_temp = grid[:]
# Reverse the order of the grid
g_temp = g_temp[::-1]
# Calculate the element ID offset according to the boundary thickness
offset = template.x_e*template.b + template.b + 1
# Loop through rows of the grid
for i in g_temp:
# Loop through the elements in the current row
for j in range(0, len(i)):
# Check if the current element needs to be removed
if i[j] == 0:
# Add the element ID to the list of element IDs to be removed
rem.append(j + offset)
# Increment the offset
offset += template.x_e
return rem | 2fd1eab71b3d1e448e38f15ae0ddbb5071f6b95e | 106,191 |
from typing import Dict
def make_attrib(name: str, value: str, ns: str) -> Dict[str, str]:
"""
Creates a dict entry to be added im the attribs of a `etree.Element`
:param name:
:param value:
:param ns:
:return:
"""
return {f'{{{ns}}}{name}': value} | a0073d2689f332e4c4308a79448f5d43b4fc4cae | 106,194 |
def target_confirmation(participants_list, silent=False):
"""
Target participants to be contacted for confirmation of session slots
"""
participants_confirmed = participants_list[2]
participants = participants_confirmed[
participants_confirmed["Timeslots Confirmed"] == "No"
]
if silent is False:
print(
f"{len(participants)}"
+ " participants to seek confirmation of session slots."
)
return participants | f8b2b0ab9e33e24d728283b2ac3a05a894a6a2b8 | 106,199 |
def is_file(label):
"""
Checks if the given label is a file.
"""
return label.endswith(".py") or label.endswith(".md") | 5410f33380eda4e6b109a0d627210de23f0450e9 | 106,208 |
import copy
def extract_data(dataset, queries):
"""
From [dataset], extract antecedent annotations for [queries].
Returns (new_data, num_queries) where:
- [new_data] is a subset of [dataset] which contains annotations only for spans in [queries]
- [num_queries] is number of annotated spans in [new_data]
"""
stop = len(queries)
new_data = []
num_queries = 0
# iterate over [dataset] with stopping condition for faster search
for doc in dataset:
if stop <= 0:
# already queried labels for all sampled spans
break
doc_key = doc['doc_key']
if doc_key in queries:
# at least one span in doc was queried
gold_map = doc['antecedent_map']
# get most recent antecedent for queried spans
sub_map = {}
for span in queries.get(doc_key, []):
if span in gold_map:
ante = gold_map[span]
if type(ante) is list:
# grab most recent antecedent if not '0' cluster
ante = [max(ante)]
sub_map[span] = ante
else:
sub_map[span] = None
num_queries += 1
if len(sub_map) > 0:
# create new copy of doc with new sub_map
new_doc = copy.copy(doc)
new_doc['antecedent_map'] = sub_map
new_data.append(new_doc)
stop -= 1
return new_data, num_queries | debbc07eacfd561df82848fe6fd1d78057965336 | 106,215 |
def needs_review(revision):
"""
Returns bool if revision needs review.
If autolabel is empty, assume true.
"""
return revision['autolabel'].get('needs_review', True) | a8c550ba7c65201f462489d0257c1406ca51d924 | 106,217 |
def test_for_small_float(value: float, precision: int) -> bool:
"""
Returns True if 'value' is a float whose rounded str representation
has fewer significant figures than the number in 'precision'.
Return False otherwise.
"""
if not isinstance(value, (float)):
return False
if value == 0:
return False
value_as_str = str(round(abs(value), precision))
if "e" in str(value):
return True
if "." in value_as_str:
left, *_right = value_as_str.split(".")
if left != "0":
return False
if (
round(value, precision) != round(value, precision + 1)
or str(abs(round(value, precision))).replace("0", "").replace(".", "")
== str(abs(round(value, precision + 1))).replace("0", "").replace(".", "")
== ""
):
return True
else:
return False | 05c5a8d4202db24c52307b26675120205961b5ef | 106,221 |
from typing import List
def get_IM_comp_count(comp: List[str]):
"""Counts the components, geom is considered 0.5 + 1 for each of
the two components required to calculate it. I.e. if its only geom
then 2.5.
Ellipsis means everything, so just 3.5
Other components are just counted as one.
E.g. If geom is specfied and 000 (used for geom calculation)
the result is still 2.5, as the other geom component (090) would
still be calulcated.
"""
if "ellipsis" in comp:
return 3.5
# Count geom as 2.5
if len(comp) == 1 and comp[0] == "geom":
return 2.5
count = len(comp)
if "geom" in comp:
# Add count for geom components not specified explicitly
count = (
count
- 0.5
+ len([1 for geom_comp in ["000", "090"] if geom_comp not in comp])
)
return count | 11b9ea7ffc0b0d1a00fd6dbd50e8b09a9dbafce8 | 106,225 |
import re
def single_line(x: str):
"""Convert a sql command into a single line (remove newlines)."""
return re.sub(r'\s*\n+\s*', ' ', x) | 582ae8817bbbf521053a58f124024ef84df9ca14 | 106,227 |
def integer_ceil(a, b):
"""Return the ceil integer of a div b."""
quanta, mod = divmod(a, b)
if mod:
quanta += 1
return quanta | 0e5bc7da5d9ee4d0f13a30cb552adce6a1dfcfd8 | 106,228 |
def get_airports_dict(airports_df):
"""
:param airports_df: dataframe of airports, columns: ['country', 'fullname', 'iata', 'latitude', 'longitude']
:return: dictionary of airports where column "iata" is the key and value is list ['country', 'fullname', 'latitude', 'longitude']
"""
return airports_df.set_index('iata').transpose().to_dict('list') | d00ab55f50812e1407a7e0cfff78d0ac15cd3506 | 106,229 |
def calcGeometricMean (theList):
"""Calculates the geometric mean of a list of values.
:param theList: The list of which the geometric mean
:type theList: list
:return: The geometric mean of the values of theList
:rtype: float
"""
product = 1
root = 0
for value in theList:
product *= value
root += 1
return product ** (1 / root) | 923ca02c2bb39e65fd7f25eaf0becaae03bdeb18 | 106,231 |
def select_csv_columns(line: str, ncols: int = 8) -> str:
"""Select first ncols in a line from a csv.
Parameters
----------
ncols: int
Number of column to select.
Returns
-------
selected_cols: str
Selected ncols of csv.
"""
return ",".join(line.split(",")[:ncols]) | 833428e9da3bfdf1dcb5ecc214e6e176af9cfa61 | 106,236 |
import hashlib
def hash_string(text: str) -> bytes:
"""Calculate a hash of a string"""
btext = bytes(text, "utf-8")
h = hashlib.sha256()
h.update(btext)
d = h.digest()
return d | 2cc47354024c422d26a7fd05a7761e1c6a42d3a7 | 106,237 |
from bs4 import BeautifulSoup
def parse_html(content):
""" This step is included in Step 2: this will use beautiful soup to
modify certain tags of the returned content
Parameters
----------
content : returned from the notebook export
Returns
------
soup (parsed html content)
"""
soup = BeautifulSoup(content, 'html.parser')
if soup.table:
for tag in soup.find_all('table'):
tag['class'] = 'table-responsive table-striped'
tag['border'] = '0'
return soup | 629e1f21d7b918884256c74893c248ebcffc1c8c | 106,238 |
def sanitize_code(code):
""" Make a canonical text version of a code - a 5 digit, 0-padded string
"""
try:
code = "%05i"%code
except TypeError:
code=str(code)
return code | 9292740a8c17633b6e3f4afcbd3798595e95b0bc | 106,239 |
def get_resource_id(event):
"""
Returns the CFN Resource ID with format <cluster_name>_aws-auth.
:param event: the CFN event
:return: the CFN resource id
"""
cluster_name = event["ResourceProperties"]['ClusterName']
resource_id = cluster_name + "_aws-auth"
return resource_id | e0d5c2e0bd14e5aa78c925814e039437ecfea788 | 106,240 |
def less_than(val1, val2):
"""
Simple function that returns True if val1 <= val2 and False otherwise.
:param val1: first value of interest
:param val2: second value of interest
:return: bool: True if val1 <= val2 and False otherwise
"""
if round(val1, 3) <= round(val2, 3): # only care to the 3rd position after decimal point
return True
else:
return False | bce2d238c9399c7e3de957c66aaa9656e17c2a44 | 106,241 |
def get_diff(old: dict, new: dict, value: str, statistics=False):
""" Get the difference between old and new osu! user data. """
if not new or not old:
return 0
if statistics:
new_value = float(new["statistics"][value]) if new["statistics"][value] else 0.0
old_value = float(old["statistics"][value]) if old["statistics"][value] else 0.0
else:
new_value = float(new[value]) if new[value] else 0.0
old_value = float(old[value]) if old[value] else 0.0
return new_value - old_value | 3354fc212916bea4596d1a78a46a6a2aff2d465d | 106,245 |
def maybe_job_id(value: str) -> bool:
"""Check whether the string looks like a job id"""
return value.startswith("job-") | 74818d62490c39b0ceee2a4b867b352ab8c600f1 | 106,247 |
import uuid
def get_random_dataset_name() -> str:
"""Get the random dataset name.
Returns:
A random dataset name.
"""
return f"test{uuid.uuid4().hex}" | 048973a6f620e1e549b29690ffda56d4cbbce7cf | 106,252 |
import torch
def calculate_rms(samples):
"""
Calculates the root mean square.
Based on https://github.com/iver56/audiomentations/blob/master/audiomentations/core/utils.py
"""
return torch.sqrt(torch.mean(torch.square(samples), -1, keepdim=True)) | cee3522fa7da5cd71cbe5403f48e5b6f7f4dae3a | 106,253 |
def with_pattern(pattern):
"""Attach a regular expression pattern matcher to a custom type converter
function.
This annotates the type converter with the :attr:`pattern` attribute.
EXAMPLE:
>>> import parse
>>> @parse.with_pattern(r"\d+")
... def parse_number(text):
... return int(text)
is equivalent to:
>>> def parse_number(text):
... return int(text)
>>> parse_number.pattern = r"\d+"
:param pattern: regular expression pattern (as text)
:return: wrapped function
"""
def decorator(func):
func.pattern = pattern
return func
return decorator | 806ecb7dfcaee2581b75243d48c96405de3a9807 | 106,255 |
from typing import Callable
from typing import Any
import inspect
def get_func_args(func: Callable[[Any], Any]):
"""Returns a list of arguments for the function"""
sig = inspect.signature(func)
return [
arg_name
for arg_name, param in sig.parameters.items()
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
] | 8b7f3f06b5667b9fa0498ce013f8294bd24dc186 | 106,256 |
def get_arr_item(arr, index):
"""
功能:求数组的某个索引的值
参数:
arr:n维数组
index:n维数组的索引,1维数组,比如 [2, 3, 4],表示 arr[2][3][4]
返回值:数组的某个索引的值, arr[index]
"""
# 索引所包含元素的个数
count = len(index)
# 用递归方法,获取 arr[index]
a = arr[index[0]]
for i in range(1, count):
a = a[index[i]]
return a | 0dfe0d924b34cac859b44a6dfd583490686cb56f | 106,264 |
def shipping_cost(num_copy):
"""Finds total shipping cost for a given number of copies
"""
return 3 + 0.75 * (num_copy - 1) | d1589035464097d8cc96d91efeceec4c3a2cffc1 | 106,266 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.