content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def resposta_tamanho(res):
"""
resposta_tamanho: resposta --> inteiro positivo
resposta_tamanho(res) devolve o numero de elementos da resposta res.
"""
return len(res) | fe69b4f049ee183226ef61c1c1937541a2a7bd95 | 103,253 |
import random
import string
def generate_admin_id(db) -> str:
"""Generates a unique admin id
Args:
db: db connection object
Returns:
str
"""
admin_id = "".join(
random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits)
for _ in range(16)
)
admin_id = "adm_" + admin_id
if db.find_one({"user_id": admin_id}):
admin_id = generate_admin_id(db)
return admin_id | 30c79942c0fe886096cd4ddb9e6f4679d6430850 | 103,256 |
def NB_calc(TP, FP, POP, w):
"""
Calculate Net Benefit (NB).
:param TP: true positive
:type TP: int
:param FP: false positive
:type FP: int
:param POP: population or total number of samples
:type POP: int
:param w: weight
:type w: float
:return: NB as float
"""
try:
NB = (TP - w * FP) / POP
return NB
except (ZeroDivisionError, TypeError):
return "None" | 45ce455ab869b92114ebc52ec3ff92883fb2570e | 103,262 |
def get_vehicle_urls_with_prefix(url_prefix, url_link_):
"""
:param url_prefix: str, a prefix to be appended to an incomplete vehicle url, e.g https://abc.africa/ng/
:param url_link_: a url link
:return: a prefixed url link
"""
return url_prefix + url_link_ | 3d40d14fd4b0b0760ea341852cb64a946febdbe0 | 103,267 |
def version_update_scale(old_version_string, new_version_string):
""""
takes in version numbers old_version and new_version as strings, compares them,
and returns "major" "minor" or "micro" to indicate scale of update required to get to new.
Returns None if the versions are the same
major -> 1.x to 2.x
minor -> 1.4 to 1.7
micro -> 1.4.3 to 1.4.4
"""
old_version = old_version_string.split('.')
new_version = new_version_string.split('.')
if old_version[0] != new_version[0]:
return "major"
elif old_version[1] != new_version[1]:
return "minor"
elif old_version == new_version:
return None
else:
return "micro" | 0f0f28eae15e400c9fc8e0e79c0808163e718cb2 | 103,268 |
def _WithoutSuffix(string, suffix):
""" Returns a copy of string 'string', but with suffix 'suffix' removed.
Raises ValueError if string does not end with suffix. """
if not string.endswith(suffix):
raise ValueError('_WithoutSuffix: string %s does not end with suffix %s' % (
string, suffix))
return string[:-len(suffix)] | f4393b5722275f39c295da2d9e763ff54e050af1 | 103,270 |
def get_text(answer: list) -> str:
"""Extract only the text from the answers.text column
Args:
answer: the answer.
"""
return answer[0] | eddf15f182a869ff7a862e66ddb31b9238a869d3 | 103,275 |
def exclusion_payload(passed_keywords: dict) -> dict:
"""Create a properly formatted exclusion payload.
{
"comment": "string",
"groups": [
"string"
],
"value": "string"
}
"""
returned_payload = {}
if passed_keywords.get("comment", None):
returned_payload["comment"] = passed_keywords.get("comment", None)
if passed_keywords.get("groups", None):
returned_payload["groups"] = passed_keywords.get("groups", None)
if passed_keywords.get("value", None):
returned_payload["value"] = passed_keywords.get("value", None)
return returned_payload | d9578ae0b6a84b4c0a7d73a2d05e8fa4e5d5af3c | 103,276 |
def covariance(x, y):
"""Compute covariance between two 2D tensors.
Parameters
----------
x : torch.tensor
Torch tensor of shape `(n_samples, horizon)`
y : torch.tensor
Tensor of shape `(n_samples, horizon)`
Returns
-------
cov : torch.tensor
Torch tensor of shape `(n_samples,)`.
"""
n_samples, horizon = x.shape
mean_x = x.mean(dim=1, keepdim=True)
mean_y = y.mean(dim=1, keepdim=True)
xm = x - mean_x # (n_samples, horizon)
ym = y - mean_y # (n_samples, horizon)
cov = (xm * ym).sum(dim=1) / horizon
return cov | 32deebca38d67979b9f5df6716df6f9cdb3693ed | 103,282 |
def extract_three_floats(elem):
""" Return three floats extracted from the text of the element. """
try:
vals = elem.text.split()
if len(vals) == 3:
float_vals = [float(x) for x in vals]
if len(float_vals) == 3:
return float_vals
except:
print("There was an error parsing CDL values")
raise
return None | 32a3abce40049950016d93ac3ccfa1ac0e69252a | 103,284 |
from typing import Optional
from typing import Callable
def get_custom_attribute(item: dict, attribute_code: str, coerce_as: Optional[Callable] = None):
"""
Get a custom attribute from an item given its code.
For example:
>>> get_custom_attribute(..., "my_custom_attribute")
"0"
>>> get_custom_attribute(..., "my_custom_attribute", bool)
False
:param item:
:param attribute_code:
:param coerce_as: optional callable that is called on the attribute value if it's set.
This is useful to circumvent Magento's limitation where all attribute values are strings.
:return: attribute value or None.
"""
for attribute in item.get("custom_attributes", []):
if attribute["attribute_code"] == attribute_code:
value = attribute["value"]
if coerce_as:
if coerce_as == bool:
# "0" -> False / "1" -> True
return bool(int(value))
return coerce_as(value)
return value | 89b49a39de6269064bf6f99defedda35346c85b2 | 103,286 |
def count_nb_fov(recipe):
"""Count the number of different fields of view that can be defined from
the recipe.
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Can only contain the keys
'pattern', 'fov', 'r', 'c', 'z', 'ext' or 'opt'.
Returns
-------
nb_fov : int
Number of different fields of view in the recipe.
"""
# check recipe is a dictionary
if not isinstance(recipe, dict):
raise Exception("The recipe is not valid. It should be a dictionary.")
# check the fov key exists
if "fov" not in recipe:
return 1
# case where fov is directly a string
elif isinstance(recipe["fov"], str):
return 1
# case where fov is a list of strings
elif isinstance(recipe["fov"], list):
return len(recipe["fov"])
# non valid cases
else:
raise ValueError("'fov' should be a List or a str, not {0}"
.format(type(recipe["fov"]))) | f81ead75efe1e416599f3485fe8ada9d71951ab5 | 103,287 |
def clean_header(df):
"""
This functions removes weird characters and spaces from column names, while keeping everything lower case
"""
df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
return df | 9899ac38796d9f6f6788b270208cd018153eafd9 | 103,295 |
from datetime import datetime
def get_start_date(product):
"""
Define start date of product version.
Parameters
----------
product : string
product specification
Returns
-------
datetime : datetime.datetime
timestamp of start date
"""
dt_dict = {'M2T1NXLND.5.12.4': datetime(1980, 1, 1)}
return dt_dict[product] | 8028251515504ff77ad08ee4e25938d4fb9e2bfd | 103,298 |
def organization_current_get(request):
"""
Return the current organization slug.
Required user level: None
All operations performed will be performed by this organization.
If no organization has been chosen, it will return None.
Example return value:
"inter-actief"
"""
if request.organization:
return request.organization.slug
else:
return None | 481c1b5cea9875edda173c01d32b6a8d093cc64e | 103,300 |
async def ping():
"""A test ping endpoint."""
return {"ping": "I'm alive!"} | 3e12e2ec3b4d7fd26b64ca9e2269a24c5ab995e6 | 103,307 |
def sort_all_slice_files_acquisition_time(files):
""" Sort the DICOM files based on acquisition time.
Args:
----
files (list): List containing the pydicom datasets of the DICOM Files.
Returns:
-------
slice_sorted_acq_time (list): List containing the file paths of the DICOM Files sorted by acquisition time.
"""
slice_sorted_acq_time = []
skipcount = 0
for f in files:
if hasattr(f, 'AcquisitionTime'):
slice_sorted_acq_time.append(f)
else:
skipcount = skipcount + 1
print("skipped, no AcquisitionTime: {}".format(skipcount))
return sorted(slice_sorted_acq_time, key=lambda s: s.AcquisitionTime) | 5f8eac1485733b4bb6e407e23883698b834a5897 | 103,308 |
def get_args(parameters):
"""Generate parameters lists to be passed to templates.
The generated lists will be passed to the rendering template.
Args:
parameters (dict): pipeline parameters as
{<name>:(<type>,<value>)}
Returns (dict): a dict composed of:
- 'parameters_names': parameters names as list
- 'parameters_types': parameters types as list
- 'parameters_values': parameters default values as list
"""
sorted_parameters = dict(sorted(parameters.items()))
parameters_names = list(sorted_parameters.keys())
parameters_types = [arg[0] for arg in sorted_parameters.values()]
parameters_values = [arg[1] for arg in sorted_parameters.values()]
return {
'parameters_names': parameters_names,
'parameters_types': parameters_types,
'parameters_values': parameters_values
} | b9d5532e503188b7228282b3f1bf2bd6e50a6f64 | 103,309 |
import math
def get_coord_sphere(long, lat, r) -> tuple:
""" Let r = radius of a sphere, omega = longitude angle, and phi = latitude angle.
Omega = 0 at London's longitude. phi = 0 at the equator.
r varies from r = 0 to R Earth's radius to R + h where h is the height above the surface of Earth.
Then x = r cos(phi)cos(omega),
z = r cos(phi)sin(omega),
and y = r sin(phi)
>>> get_coord_sphere(0,90,20)
(0.0, 20.0, 1.2246467991473533e-15)
>>> get_coord_sphere(45,45,20)
(10.0, 14.14213562373095, 10.000000000000002)
>>> get_coord_sphere(-80,20,20)
(-18.50833156796647, 6.840402866513374, 3.2635182233306983)
>>>
"""
omega = math.radians(long)
phi = math.radians(lat)
z = r * math.cos(phi) * math.cos(omega)
x = r * math.cos(phi) * math.sin(omega)
y = r * math.sin(phi)
return x, y, z | a3d1e9012fc5af3f3db9c55ee91d6ab84e12225f | 103,310 |
import re
def snake_case(arg: str):
"""Convert string to snake_case.
Non-alphanumeric characters are replaced with _.
CamelCase is replaced with snake_case.
"""
# replace non-alphanumeric characters with _
tmp = re.sub(r'\W', '_', arg)
# replace X with _x
tmp = re.sub(
r'(?<=[a-z])[A-Z](?=[a-z])',
lambda c: '_' + c.group(0).lower(),
tmp
)
# lower-case first character
tmp = re.sub(
r'^[A-Z](?=[a-z])',
lambda c: c.group(0).lower(),
tmp
)
return tmp | 917e7d35776c569254c266bb79f9948866a55222 | 103,312 |
def create_result_path(prefix, path, nr):
"""
returns path for new file. prefix is e.g. '64/test/', path is e.g.
'000000/000000144.png', nr is number of cropped image
"""
return prefix + path[:-4] + '_' + '{:04d}'.format(nr) + '.png' | 60803b4590415c929807a306c6846036313a0677 | 103,313 |
def turn_off_empty_axes(n_plots_y, n_plots_x, ax):
""" Turns off empty axes in a `n_plots_y` by `n_plots_x` grid of plots.
Args:
n_plots_y (int) : See above.
n_plots_x (int) : See above.
ax (matplotlib.axes) : Matplotlib object containing grid of plots.
"""
for vi in range(n_plots_y):
for vj in range(n_plots_x):
# if nothing plotted on ax, it will contain `inf`
# in axes lims, so clean up (turn off)
if "inf" in str(ax[vi, vj].dataLim):
ax[vi, vj].axis("off")
return ax | b2f240c5f897955c8326611872656e1ff7ec73de | 103,321 |
def is_prime(num):
"""Returns True if `num` is prime
"""
if num < 2: return False
for x in range(2, num):
if num % x == 0:
return False
return True | 3c52d12914f9fb79558caf0f03ade4eb9983abc0 | 103,322 |
import platform
def win_safe_path(path):
"""
Remove leading 'slash' in Windows paths, which should be relative or begin with
a drive letter, not a slash.
"""
if path is None or path == '':
return None
# Sometimes in Windows, you end up with a path like '/C:/foo/bar' --- not sure why.
if platform.system().lower() == "windows":
if path[0] in ["/", "\\"]:
return path[1:]
return path | f8df9a6718501b202ccdc90d2a73aafc5fafae2f | 103,325 |
def GetNotebookRoot(panes, notebook_id):
"""
Returns the L{AuiPaneInfo} which has the specified `notebook_id`.
:param `panes`: a list of L{AuiPaneInfo} instances;
:param `notebook_id`: the target notebook id.
"""
for paneInfo in panes:
if paneInfo.IsNotebookControl() and paneInfo.notebook_id == notebook_id:
return paneInfo
return None | 2717e4caca26a6ad086477a91a6e749326c28ea2 | 103,326 |
def is_boundary_edge(a, b, bdy_edges):
"""
Checks whether edge (a, b) is in the list of boundary edges
"""
for edge in bdy_edges:
a0, b0 = edge
if a == a0 and b == b0:
return True
return False | cb06f33aab140c4a03537aa75e59dbafbfb87458 | 103,334 |
def IsJsFile(ref):
"""Returns true if the provided reference is a Javascript file."""
return ref.endswith('.js') | 60f665ae47ea1211e731cd5cf3111d03b6dd4ca5 | 103,335 |
def format_number(x):
"""
Formats a nonnegative number either as an integer if it rounds to
at least ten, or as a decimal rounded to two significant digits.
"""
if x == 0:
return '0'
else:
i = int(round(x))
if i < 10:
# Get `d` as decimal rounded to two significant digits.
xx = x
n = 0
while xx < 10:
xx *= 10
n += 1
f = '{:.' + str(n) + 'f}'
return f.format(x)
else:
return str(i) | 56675a5b596592848092a0c6889032d047f6bef0 | 103,337 |
def colon_what(colon_word, words):
"""A simple utility to re-join :<word> commands for error reporting.
"""
if words:
return '{} {}'.format(colon_word, ' '.join(words))
else:
return colon_word | d21965ceab8bd12235615f71c4dde49a4211d12a | 103,338 |
def get_min_maf(mafStr):
"""Choose minimum MAF from MAF scores concatenated by :"""
maf_min = 1.0
for maf in mafStr.split(':'):
if float(maf)<maf_min:
maf_min =float(maf)
return maf_min | 35f5b28597a2572bd6805e3ae5c30e42db081e8e | 103,343 |
import torch
def dual_complete(u, v, s, alpha, beta, eps):
"""
min_{u>=0, v<=0} d(u, v)
= E_xy [ u(x)alpha(x) + v(y)beta(y) + Softplus(1/eps)(s-u-v) ]
"""
u = torch.as_tensor(u, device=s.device).reshape((-1, 1))
v = torch.as_tensor(v, device=s.device).reshape((1, -1))
if eps > 0:
sp = torch.nn.Softplus(1. / eps)(s - u - v)
else:
sp = torch.nn.ReLU()(s - u - v)
return (u * alpha).mean() + (v * beta).mean() + sp.mean() | d0008182434509a7192495519373882f2c1f1e67 | 103,347 |
def nth_fibonacci(number: int) -> int:
"""Returns value of nth fibonacci sequence.
Args:
number (int): sequential number
Examples:
>>> assert nth_fibonacci(4) == 2
"""
number -= 1
counter, helper = 0, 1
while number:
counter, helper = helper, counter + helper
number -= 1
return counter | 0479b4e6499c70b9a1905707eb6ea189ed1aaf65 | 103,351 |
def __copy__(self) :
"""Return a copy of self"""
return type(self)(self); | 2179c5b5f5ccc86621af4c0ea16164731fd8e9fe | 103,353 |
import fnmatch
def match_extension(name, extensions=None):
"""
Determines if a file name matches an extension
:param name: filename to be examined
:param extensions: fnmatch file pattern
:return: boolean True, if match is successful
"""
if extensions is None:
extensions = [u"*.*"]
state = []
for pattern in extensions:
state.append(fnmatch.fnmatch(name, pattern))
return True in state | f6163b5af0b18d3e75e4bc11af7fa64f198efe04 | 103,354 |
def replace_by_index(stretch, seq):
"""
Given a start and end point in a string (in format 'start:end') and a sequence, will replace characters within
that stretch with the letter N.
:param stretch: Start and end index to replace (in format 'start:end')
:param seq: Sequence to change.
:return: Sequence modified to have Ns where specified by stretch.
"""
stretch = stretch.split(':')
start = int(stretch[0])
end = int(stretch[1])
seq = seq[:start] + 'N' * (end - start) + seq[end:]
return seq | acf7fba5e11707e712ca3a42413a5dda39690cb1 | 103,359 |
import torch
def torch_pad_and_concatenate(tensor1, tensor2, padding_index=-100):
"""Concatenates `tensor1` and `tensor2` on first axis, applying padding on the second if necessary."""
if len(tensor1.shape) == 1 or tensor1.shape[1] == tensor2.shape[1]:
return torch.cat((tensor1, tensor2), dim=0)
# Let's figure out the new shape
new_shape = (tensor1.shape[0] + tensor2.shape[0], max(tensor1.shape[1], tensor2.shape[1])) + tensor1.shape[2:]
# Now let's fill the result tensor
result = tensor1.new_full(new_shape, padding_index)
result[: tensor1.shape[0], : tensor1.shape[1]] = tensor1
result[tensor1.shape[0] :, : tensor2.shape[1]] = tensor2
return result | 0aa5177f19a5c4639d3d24fcf64ac451622ea537 | 103,362 |
def get_logfc_mmm_matrix(df, alpha=0.05):
"""
For each gene, get the logfc median absolute deviation (MAD), max, and
median values across treatments.
"""
# Significance filter
sig = df["padj"] <= alpha
# Filter by significance, group by gene, and aggregate
# log2foldchange by mad, max and median statistics
mmm_df = (
df[sig]
.groupby(["gene"])
.agg({"log2foldchange": ["mad", "max", "median"]})
)
return mmm_df | aaafdb08589d88c535e6ef10ed31e8c51a1201c7 | 103,364 |
import random
def generate_individual(a, b, n):
"""Generates a list of n random parameters between a and b"""
return [random.uniform(a,b) for i in range(n)] | d808842c92235f1facb04416615b9c77aa03b1c3 | 103,365 |
from pathlib import Path
import hashlib
def hash_dir(vocab_folder_path: Path) -> str:
"""
Generate an MD5 digest from the contents of a directory
:param vocab_folder_path: Path
:returns MD5 digest
"""
hash_obj = hashlib.sha256()
for vocab_file in vocab_folder_path.glob('*.csv'):
with vocab_file.open('rb') as fp:
hash_obj.update(fp.read())
return hash_obj.hexdigest() | 5b190b03b174ebdee923dc69b6cedce89b4e6458 | 103,369 |
def trim(s: str) -> str:
"""Returns a trimmed string with preceding and trailing whitespace removed."""
return s.strip() | 09dcf607308ad214f5e79fa124780c200c0f534e | 103,370 |
def rvs_from_distribution(*params, batch_size, distribution, size=None, random_state=None):
"""Transform the rvs method of a scipy like distribution to an operation in ELFI.
Parameters
----------
params :
Parameters for the distribution
batch_size : number of samples
distribution : scipy-like distribution object
size : tuple
Size of a single datum from the distribution.
random_state : RandomState object or None
Returns
-------
random variates from the distribution
Notes
-----
Used internally by the RandomVariable to wrap distributions for the framework.
"""
if size is None:
size = (batch_size, )
else:
size = (batch_size, ) + size
rvs = distribution.rvs(*params, size=size, random_state=random_state)
return rvs | 6bea237cf8c540f37bc834bc52c211d103202bee | 103,373 |
def my_formatwarning(message, category, filename, lineno):
""" Return a warning message, formatted for Visual Studio """
return "%s(%i) : warning: %s" % (filename, lineno, message) | b0fdede8c445b898e66e329fc3f9ab705dca0237 | 103,376 |
def _scores_to_labels(scores, classes):
"""Convert a list of scores into a sorted list of (label,score) for which the score is positive
If no score is positive, return the largest.
The returned list is sorted in decreasing order, so the first entry is the class label to be assigned
in non-multilabel case
"""
if max(scores) <= 0:
# return [(classes[numpy.argmax(scores)], max(scores))] # Return the highest class
return [("others", 1.0)] # Return "others" when there is no positive score
else:
result = sorted([(classes[idx], value) for idx, value in enumerate(scores) if value > 0],
key=lambda x: x[1], reverse=True) # Return only positive scores
# result = sorted([(classes[idx], value) for idx, value in enumerate(scores)],
# key=lambda x: x[1], reverse=True) # Return all scores
return result | 30f427ab6d07ae74abb2a9189dc572725c1f1101 | 103,389 |
def _reorder_bounds(bounds):
"""Reorder bounds from shapely/fiona/rasterio order to the one
expected by Overpass, e.g: `(x_min, y_min, x_max, y_max)` to
`(lat_min, lon_min, lat_max, lon_max)`.
"""
lon_min, lat_min, lon_max, lat_max = bounds
return lat_min, lon_min, lat_max, lon_max | 183563a72f3e24c2bd367fab87647d638821063d | 103,390 |
def approx2step(val, x0, dx):
"""Approximate value, val, to closest increment/step, dx, starting from x0."""
while True:
if x0 > val: break
x0 += dx
return x0 | e379e947f3ff358e1645581b139eb7ee3e82c484 | 103,403 |
def is_valid_id(var):
"""Checks if id is integer"""
return isinstance(var, int) | 37f82fe5060965c2d2ffd74fea066ee77ddc1c11 | 103,405 |
def replaceMultiple(mainString, toBeReplaces, newString):
"""
Replace a set of multiple sub strings with a new string
"""
# Iterate over the strings to be replaced
for elem in toBeReplaces:
# Check if string is in the main string
if elem in mainString:
# Replace the string
mainString = mainString.replace(elem, newString)
return mainString | 99649f5694db9c9b98e4be8e9a3096bb72b5f628 | 103,407 |
def get_net_uuid(neutron_client, net_name):
"""Determine whether deprecated external network mode is in use.
:param neutron_client: Authenticated neutronclient
:type neutron_client: neutronclient.Client object
:param net_name: Network name
:type net_name: string
:returns: Network ID
:rtype: string
"""
network = neutron_client.list_networks(name=net_name)['networks'][0]
return network['id'] | 8d58d6a344124a0ad908ceed019d6e8e67852cbd | 103,408 |
from typing import Callable
def is_default_resolver(func: Callable) -> bool:
"""Check whether the function is a default resolver or a user provided one."""
return getattr(func, "_is_default", False) | 69fce98e63553fde8c9a1c05d1506ace9af4042c | 103,413 |
def internal_handler(filename: str):
""" skip handling filenames with a leading _ """
return False # Just about all other handler checks | a5eb617e68b91f48935937857a157061f22c71f6 | 103,414 |
def remove_full_rowspans(rows):
"""Remove rows in which all cells have the same text."""
return [row for row in rows if len(set(row)) > 1] | af1e4ccbd7e88e79e0f2bd01bf6dfaba4315bea3 | 103,416 |
def _getEntry(root):
"""
Get the node in the xml data that contains the result from the query to save. If multiple entries are found
in the query result, this function raises an error.
:param root: Root of the XML data from the arXiv query
:raises: ValueError is the entry cannot be extracted from the XML data
:returns: Node that contains the results from the query
"""
entry = None
for i in root:
if i.tag.endswith('entry'):
if entry is not None:
raise ValueError('Multiple entries in result')
entry = i
return entry | aba900d39c1a52b63409ff267be865d0eba7247b | 103,418 |
def union(list_a: list, list_b: list) -> list:
"""Return the union of two lists"""
if list_a is None:
list_a = [None]
if list_b is None:
list_b = [None]
return list(set(list_a) | set(list_b)) | 4cc4c7fd6214a281115def00ae52a80f6194fd69 | 103,420 |
def crcl_schwartz(height: float, creatinine: float) -> float:
""" Method returns creatinine clearance using the Schwartz method
Parameters
----------
height
height in cm
creatinine
serum creatinine value in umol/L
Returns
-------
float
creatinine clearance
"""
k = 0.413
crcl = k * height / (creatinine / 88.42)
return crcl | a36628d1a19fe46201694bb132c49fb0184d3227 | 103,421 |
def _append_spc_date_to_storm_ids(primary_id_strings, spc_date_string):
"""Appends SPC date to each storm ID.
N = number of storm objects
:param primary_id_strings: length-N list of primary IDs.
:param spc_date_string: SPC date (format "yyyymmdd").
:return: primary_id_strings: Same as input but with new IDs.
"""
return [
'{0:s}-{1:s}'.format(p, spc_date_string) for p in primary_id_strings
] | de5d54dfb322bdbf4ab7e261526b6e295ea1900c | 103,427 |
def check_dead(left_hp: int, right_hp: int) -> bool:
"""
Method to check if either player is dead
:param left_hp: Hit points of priority player
:param right_hp: Hit points of right player
:return: True if somebody is dead, else False
"""
if left_hp <= 0:
return True
elif right_hp <= 0:
return True
else:
return False | ecf2d88e95eb07b865a7d3ea7e004d7932a365e1 | 103,431 |
import pickle
def build_save_route_by_stop(stop_times_file, FOLDER):
"""
This function saves a dictionary to provide easy access to all the routes passing through a stop_id.
Args:
stop_times_file (pandas.dataframe): stop_times.txt file in GTFS.
FOLDER (str): path to network folder.
Returns:
route_by_stop_dict_new (dict): keys: stop_id, values: list of routes passing through the stop_id. Format-> dict[stop_id] = [route_id]
"""
print("building routes_by_stop")
stops_by_route = stop_times_file.drop_duplicates(subset=['route_id', 'stop_sequence'])[
['stop_id', 'route_id']].groupby('stop_id')
route_by_stop_dict_new = {id: list(routes.route_id) for id, routes in stops_by_route}
with open(f'./dict_builder/{FOLDER}/routes_by_stop.pkl', 'wb') as pickle_file:
pickle.dump(route_by_stop_dict_new, pickle_file)
print("routes_by_stop done")
return route_by_stop_dict_new | 8665a1673826851a2531808ecf6842ef021a0a28 | 103,433 |
def get_listed_buffers(nvim_):
"""Get a list of buffers that haven't been deleted. `nvim.buffers` includes
buffers that have had `:bdelete` called on them and aren't in the buffer
list, so we have to filter those out.
"""
return set(
buf.number for buf in nvim_.buffers if nvim_.eval("buflisted(%d)" % buf.number)
) | ab085e301655956bfdb97e05c59bbf1e181d7701 | 103,436 |
def smooth_array(input, smooth):
"""Smooth array using exponential moving average
Args:
input: input data
smooth: smooth factor, 0<=smooth<1
Returns:
Smoothed array
"""
return input.ewm(alpha=(1 - smooth)).mean() | dc964700de6fc1df7d638fa9472811231d512a65 | 103,439 |
import hashlib
def get_hash_string(text):
"""
Get hash from string
Args:
String: text to hash
Returns:
String: Hash
"""
ret = ''
d = hashlib.md5(text.encode() )
ret = d.hexdigest()
return ret | 6b86c6e752a01dc7175bec9ddf81e0ac36bbd7f6 | 103,442 |
def UsersInvolvedInHotlists(hotlists):
"""Returns a set of all users who have roles in the given hotlists."""
result = set()
for hotlist in hotlists:
result.update(hotlist.owner_ids)
result.update(hotlist.editor_ids)
result.update(hotlist.follower_ids)
return result | f724a933d67e952b9dc62cf0f6d411312b9010d1 | 103,444 |
def _read_netcdf_grid_shape(root):
"""Read the grid shape from a GEBCO NetCDF file.
Parameters
----------
root : netcdf_file
A NetCDF file.
Returns
-------
tuple of int
The shape of the grid as number of rows, then columns.
"""
return root.variables["dimension"][:] | 5b1e22c8a4669038f15ed7a0bf31f57387772f8d | 103,446 |
def mu_CRRA(cons, gamma):
"""
Return marginal value of CRRA utility function.
:params: cons: consumption.
:params: gamma: relative risk aversion.
:return: mu: martinal utility.
"""
mu = cons**-gamma
return mu | 3e562249eec810dfdb72cd1e4396fd424fcead31 | 103,447 |
def deduplicate_edges(edges):
"""
Takes an iterable of edges and makes sure there are no reverse edges
:param edges: iterable of edges
:return: uniq_edges: unique set of edges
"""
uniq_edges = set()
for u, v in edges:
if (v, u) not in uniq_edges:
uniq_edges.add((u, v))
return uniq_edges | 13fbd99b7e60392299d517382d725f46a6a631e1 | 103,449 |
def load_names(filename):
""" load names from a text file (one per line) """
with open(filename, 'r') as fid:
names = [l.strip() for l in fid]
return names | 12bac943d3db482769bcd1e8a1a4579f5768368b | 103,451 |
def preproc_meta(metadata):
"""
For easier access, convert the metadata list into a dictionary where
the ids are the keys
"""
res = {}
for x in metadata:
try:
k = x["_id"]
except KeyError:
continue
res[k] = x
return res | e47aeffcf1b1896ee66ca7f70c8ab96d0dbc8d12 | 103,455 |
def get_fd_gradients(mp, deltas, multi_state_elt=None):
"""Calculate centered finite difference gradients for each of the
parameters of the model parameterisation mp.
"deltas" must be a sequence of the same length as the parameter list, and
contains the step size for the difference calculations for each parameter.
"multi_state_elt" selects a particular state for use when mp is a multi-
state parameterisation.
"""
p_vals = mp.get_param_vals()
assert len(deltas) == len(p_vals)
fd_grad = []
for i in range(len(deltas)):
val = p_vals[i]
p_vals[i] -= deltas[i] / 2.0
mp.set_param_vals(p_vals)
if multi_state_elt is None:
rev_state = mp.get_state()
else:
rev_state = mp.get_state(multi_state_elt=multi_state_elt)
p_vals[i] += deltas[i]
mp.set_param_vals(p_vals)
if multi_state_elt is None:
fwd_state = mp.get_state()
else:
fwd_state = mp.get_state(multi_state_elt=multi_state_elt)
fd_grad.append((fwd_state - rev_state) / deltas[i])
p_vals[i] = val
# return to the initial state
mp.set_param_vals(p_vals)
return fd_grad | 65e97f73e51676ef89b324c5e52395375dff38d4 | 103,462 |
def read_list(data, delims="[]", split=",", strip=" \n\t'"):
"""Reads a formatted string and outputs a list.
The string must be formatted in the correct way.
The start character must be delimiters[0], the end character
must be delimiters[1] and each element must be split along
the character split. Characters at the beginning or
end of each element in strip are ignored. The standard list format is of the
form '[array[0], array[1],..., array[n]]', which is used for actual lists.
Other formats are used for tuples and dictionaries.
Args:
data: The string to be read in. '[]' by default.
delims: A string of two characters giving the first and last character of
the list format. ',' by default.
split: The character between different elements of the list format.
strip: Characters to be removed from the beginning and end of each
element. ' \n\t' by default.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
A list of strings.
"""
try:
begin = data.index(delims[0])
end = data.index(delims[1])
except ValueError:
raise ValueError("Error in list syntax: could not locate delimiters")
rlist = data[begin+1:end].split(split)
for i in range(len(rlist)):
rlist[i] = rlist[i].strip(strip)
# handles empty lists correctly
if len(rlist) == 1 and rlist[0] == "":
rlist = []
return rlist | 74ac4074cf251f65e5758640b514f35db0995f99 | 103,471 |
def truncate_labels(labels, min_doc_count):
"""
labels is an array of label sets.
remove labels that occur in less than min_doc_count documents
Sample input:
[
['foo','bar','baz'],
['foo','quux']
]
Sample output (for min_doc_count=2):
[
['foo'],
['foo']
]
:param labels: list of lists of strings OR a pandas.Series column with strings (comma-separated tags)
:param min_doc_count: integer
:return: list of lists of strings
"""
if min_doc_count == 0:
return labels
label_index = dict()
for label_row in labels:
for label in label_row:
if label_index.get(label) is None:
label_index[label] = 1
else:
label_index[label] = label_index[label] + 1
good_labels = []
for label, doc_count in label_index.items():
if doc_count >= min_doc_count:
good_labels.append(label)
new_labels = []
for label_row in labels:
new_labels.append([label for label in label_row if label in good_labels])
return new_labels | 72166782aa8ee28e8fd366d605a875562b3b46ac | 103,472 |
def map_keys(batch):
"""Dataset returns 'image' and 'targets'. We want 'inputs' and 'label'."""
batch['inputs'] = batch['image']
batch.pop('image')
batch['label'] = batch['targets']
return batch | e024c4062b1671565bbd097b480c98fd6528685f | 103,473 |
def copy(a, order='K'):
"""Creates a copy of a given array on the current device.
This function allocates the new array on the current device. If the given
array is allocated on the different device, then this function tries to
copy the contents over the devices.
Args:
a (clpy.ndarray): The source array.
order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major
(Fortran-style) order.
When `order` is 'A', it uses 'F' if `a` is column-major and
uses `C` otherwise.
And when `order` is 'K', it keeps strides as closely as
possible.
Returns:
clpy.ndarray: The copy of ``a`` on the current device.
See: :func:`numpy.copy`, :meth:`clpy.ndarray.copy`
"""
# If the current device is different from the device of ``a``, then this
# function allocates a new array on the current device, and copies the
# contents over the devices.
return a.copy(order=order) | 06fcdd69bea4815534ba6acefafad1b1b9b613ba | 103,474 |
def fizzbuzz(n: int) -> str:
"""
Outputs 'fizz', if the input is dividable by 5.
Outputs 'buzz', if the input is dividable by 7.
Outputs 'fizzbuzz', if the input is dividable by 5 and 7.
Example:
>>> fizzbuzz(35)
'fizzbuzz'
>>> fizzbuzz(36)
''
:param n: Positive integer
:return: String 'fizz', 'buzz', 'fizzbuzz' oder empty string
"""
if type(n) != int:
raise TypeError(f'int expected as input, got {type(n)}')
if n < 0:
raise ValueError(f'Positive input expected')
return f"{'fizz' if n % 5 == 0 else ''}{'buzz' if n % 7 == 0 else ''}" | d9211ed76345e26028f14a91f9373199bb36129b | 103,477 |
import copy
def merge_facts(orig, new, additive_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
orig (dict): existing facts
new (dict): facts to update
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Returns:
dict: the merged facts
"""
additive_facts = ['named_certificates']
facts = dict()
for key, value in orig.iteritems():
if key in new:
if isinstance(value, dict) and isinstance(new[key], dict):
relevant_additive_facts = []
# Keep additive_facts_to_overwrite if key matches
for item in additive_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
facts[key] = merge_facts(value, new[key], relevant_additive_facts)
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
# Fact is additive so we'll combine orig and new.
if isinstance(value, list) and isinstance(new[key], list):
new_fact = []
for item in copy.deepcopy(value) + copy.copy(new[key]):
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
else:
facts[key] = copy.copy(new[key])
else:
facts[key] = copy.deepcopy(value)
new_keys = set(new.keys()) - set(orig.keys())
for key in new_keys:
facts[key] = copy.deepcopy(new[key])
return facts | 61eee791eaebbf888cccbe2c6c73204d2cef9c30 | 103,480 |
def two_number_sum(arr, target):
"""Takes an array and target sum and return a pair of elements from that array
that sum to the target.
Args:
arr (list): array of elements
target (int): target sum
"""
# lookup that keeps track of already tracked elements
lookup = { arr[0]: True }
for element in arr[1:]:
diff = target - element
if diff in lookup:
return [ element, diff ]
lookup[element] = True
return [] | 78cb3100a8c45c82ce22067cf5afd635651759a1 | 103,482 |
def _match_version_number(text, regex):
"""! Extracts the version component represented by `regex` from `text`
@param [in] `text` Filename with version string 'MAJOR.MINOR.BUILD'
@param [in] `regex` Regex pattern of component to look for
@return The version component as integer
"""
match = regex.search(text)
return int(match.group(1)) | 1a07eca02d66488d96f751eb5279508b7a6fe1f1 | 103,486 |
def prompt_yes_no(question, default=True):
"""Prompt the user through the console for yes or no"""
while True:
choice = input(question).lower()
if choice in ['yes', 'y']:
return True
elif choice in ['no', 'n']:
return False
elif not choice:
return default | 9ec09c2239f6f7daae13bfabbdaf7108524425e7 | 103,493 |
def shift_bytes(key: int, byte: int) -> int:
"""Subtract byte by key"""
return byte - key | 75b307efa34f4cda5119a53f93352d4f587d957b | 103,496 |
import time
def get_response_time(chatbot, statement='Hello'):
"""
Returns the amount of time taken for a given
chat bot to return a response.
:param chatbot: A chat bot instance.
:type chatbot: ChatBot
:returns: The response time in seconds.
:rtype: float
"""
start_time = time.time()
chatbot.get_response(statement)
return time.time() - start_time | ed92dc25884afc5a131dcda1602c44b3007171ef | 103,504 |
def _update_cache_value_node_references(cache_value_nodes, traverser):
"""Updates value node references in the cache."""
if cache_value_nodes:
cache_value_nodes = {
key: traverser.visit_value_node(value_node)
for key, value_node in cache_value_nodes.items()
}
return cache_value_nodes | 492fe0004887c9abceb1083237b25c0befce011f | 103,505 |
def getIDs(items):
"""Given a list of Spotify objects with IDs, return the IDs"""
ids = []
for i in range(len(items)):
ids.append(items[i]['id'])
return ids | b08b5164a3892ed9d94e689f29017ea1b045d411 | 103,506 |
def tableToDicts(header, entries):
"""Converts a tuple of header names, and a list of entry tuples, to a list of dictionaries
"""
dicts = []
for entry in entries:
dicts.append(dict(zip(header, entry)))
return dicts | 3b7f7939b32f65e116a57683dca84cb80328aaaa | 103,507 |
def zscore(input_x, dim=0):
"""Compute zscore over dim dimension.
Parameters
----------
input_x: Tensor
Input tensor on which zscore will be applied
dim: int, optional
Dimension along which to operate. Default is 0.
Returns
-------
zscore: Tensor
A standardized tensor with the sale dimension as input_x
"""
xt = input_x.transpose(0, dim)
xt = (xt - xt.mean(0)) / xt.std(0)
return xt.transpose(dim, 0) | 347d5390e41a38b712eaf2d7283d223f69dbeaee | 103,516 |
def fromname(self,name):
"""Return the particle data given the PDG name. Returns None if not found."""
for id in self.ids():
if self[id].name == name: return self.get(id)
return None | 266631bd84ad0b59c56f5aa5032ec3bfcb5c0fbe | 103,520 |
def remove_sorting_column(player_data_list):
"""Remove sorting qualifier column.
Args:
player_data_list: list with player data
Returns:
player data list with sorting column removed
"""
return [entry[:-1] for entry in player_data_list] | 34794441e69a6df58b1bdc829ec03e645f3883b2 | 103,526 |
from typing import OrderedDict
def flatten(d, parent_key='', separator='__'):
"""
Flatten a nested dictionary.
Parameters
----------
d: dict_like
Dictionary to flatten.
parent_key: string, optional
Concatenated names of the parent keys.
separator: string, optional
Separator between the names of the each key.
The default separator is '_'.
Examples
--------
>>> d = {'alpha': 1, 'beta': {'a': 10, 'b': 42}}
>>> flatten(d) == {'alpha': 1, 'beta_a': 10, 'beta_b': 42}
True
>>> flatten(d, separator='.') == {'alpha': 1, 'beta.a': 10, 'beta.b': 42}
True
"""
items = []
for k, v in d.items():
new_key = parent_key + separator + k if parent_key else k
if isinstance(v, (dict, OrderedDict)):
items.extend(flatten(v, new_key, separator).items())
else:
items.append((new_key, v))
return OrderedDict(items) | 7a919c423a512c9a2900976dfe771acd34c46579 | 103,529 |
def get_position(maze, element):
"""Get the position of an element in the maze (pony, domokun, or end-point)"""
if element == 'pony':
return int(maze['pony'][0])
elif element == 'domokun':
return int(maze['domokun'][0])
elif element == 'end-point':
return int(maze['end-point'][0])
return None | dea73e06b39b84ad741b2f287cb3d4540efe12c7 | 103,533 |
def rosen(x):
"""Generalized n-dimensional version of the Rosenbrock function"""
return sum(100*(x[1:]-x[:-1]**2.0)**2.0 +(1-x[:-1])**2.0) | 8a07ca0c112d253bb37b95647e9f1d061e68dab5 | 103,534 |
def strip_name(nickname: str) -> str:
"""
This function accepts one input `nickname` and returns the input string
minus any tags.
An IRC tag is anything starting with "`[`". Further, anything following a
`[` is truncated.
Args:
nickname (str): raw nickname to strip tags
Returns:
str: nickname stripped of tags
"""
split_string = nickname.split("[")
return split_string[0] | b2120314932926a3a3b2a8aa857f1e458c3079e2 | 103,537 |
def get_field(key_value_pair_list, key):
"""
Given a list of key-value pairs (dicts with keys 'key' and 'value'),
find the entry that has the provided `key` and return its value.
If no `key` is found, return None.
It assumes that each `key` only appears one in `key_value_pair_list`,
so the first appearance is returned.
"""
entry = list(filter(lambda d: d['key'] == key, key_value_pair_list))
if len(entry) == 0:
return None
return entry[0]['value'] | a6e2069609c26c8947513d972bf973c05910c3e9 | 103,540 |
def functional_border_sizes(border_size):
"""Calculate border sizing used in process to gen user specified border size
If border_size is negative then a stand-in border size is used to allow better keypoint tracking (i think... ?);
negative border is then applied at end.
:param border_size: user supplied border size
:return: (border_size, neg_border_size) tuple of functional border sizes
>>> functional_border_sizes(100)
(100, 0)
>>> functional_border_sizes(-10)
(100, 110)
"""
# 用户提供的边框宽度是负数
if border_size < 0:
# neg_border_size等于100加上负数的绝对值
neg_border_size = 100 + abs(border_size)
# border_size为100
border_size = 100
# 是非负数
else:
neg_border_size = 0
return border_size, neg_border_size | 3ffd428aed3680c736192ef4568eea8916995ea7 | 103,554 |
def hamming(str1, str2):
"""Calculates the Hamming distance between 2 strings"""
return sum(c1 != c2 for c1, c2 in zip(str1, str2)) | dcee1bbef18b09b20406eda23a9bfbc02b893fca | 103,557 |
import torch
def ravel_parameters(para_dict):
"""
parameters: learnable variables only
para_dict = dict(model.named_parameters())
"""
return torch.cat([p.detach().view(-1) for n, p in para_dict.items() if p.requires_grad]) | 50be20490e58b5c52c684ab60ad4454718bc8824 | 103,561 |
import requests
def get_call(vars, url, header):
"""Generic GET call
:param vars: Imported variables
:type vars: Python dict
:param url: GET call URL
:type url: String
:param header: GET call parameters
:type header: Python dict
:return: GET call response JSON
:rtype: Python dict
"""
r = requests.get(vars["base_url"] + url, headers=header)
return r.json() | 6d693030adf6ea6e54462c5af20e77e303b0a87e | 103,572 |
def extract_model_configs(full_entry):
""" Given a full entry, extract model configurations and put into a dict. """
model = ["ico_encoder", "article_encoder", "attn", "cond_attn",
"tokenwise_attention", "pretrain_attention",
"tune_embeddings", "no_pretrained_word_embeddings"]
problem = ["article_sections", "data_config"]
d = {}
for m in model:
d[m] = full_entry[m]
for p in problem:
d[p] = full_entry[p]
return d | debb3af7016cd0594563e02b65b498048091c672 | 103,573 |
def count_csv_files(files_string):
"""Count number of DataStream CSV files in census tract
Parses a string of CSV file paths to determine how many
individual files there are.
Args:
files_string (string): single string of file paths separated by
commas
Returns:
counts (int): number of CSV files
"""
new_files_string = str(files_string)
if new_files_string == "nan":
return 0
return new_files_string.count(",") + 1 | e00fbdb677392599c94b43ca8190e16ff974d438 | 103,576 |
def cf_to_dec(cf):
"""Compute decimal form of a continued fraction.
Parameters
----------
cf : array-like
coefficients of continued fraction.
Returns
-------
x : float
floating point representation of cf
"""
if len(cf) == 1:
return cf[0]
else:
return cf[0] + 1/cf_to_dec(cf[1:]) | 80ff7af19f8b9f8eba49b488fe4e5fe089b3ef46 | 103,581 |
def contextOf(sentence, r, n):
"""Returns r-radius context of n-th token of the sentence.
Args:
sentence (list of dicts): List of tokens.
r (int): Radius of context.
n (int): Position of the center of the context.
Returns:
dict: Context within radius. Left side will be mirrored. Example:
let sentence [a, b, c, d, e, f, g]
for r=2 and n=0 dict becomes {
"center": a,
"context": [
{b, __position=1},
{c, __position=2}
]
}
for r=3 and n=2 dict becomes {
"center": c,
"context": [
{b, __position=-1},
{a, __position=-2},
{d, __position=1},
{e, __position=2},
{f, __position=3}
]
}
"__position" property enumerates context in that manner:
center is: C
numerals: A B |C| D E F
sentence: -2 -1 0 1 2 3
"""
lRange = n - r if (n - r) >= 0 else 0
return {
"context": [
# This is left context.
# Token dict will be updated with `i` property, which is the
# position of the token in the sentence.
# Left context is reversed and negative-enumerated, so that tokens
# will be enumerated in this way:
#
# |------------ n (n=3, r=3)
# sentence: A B C |D| (here D is the center)
# numerals: -3 -2 -1 0
{**token, **{"__position": -i - 1}}
for i, token
in enumerate(list(reversed(sentence[lRange:n])))
] + [
# The right context will be positive-enumerated.
{**token, **{"__position": i + 1}}
for i, token
in enumerate(sentence[n + 1:n + r + 1])
],
"center": sentence[n],
} | b9cc2a34bcd92c27686ada9b36964d8cc2e4bed5 | 103,582 |
def out_first_order(triples):
"""
Sort a list of triples so outward (true) edges appear first.
"""
return sorted(triples, key=lambda t: t.inverted) | 14c859a81aae8a6a953601486ae122b9791ca7c5 | 103,583 |
def upload_as_child(drive, filename, folder_id):
"""
Upload a file to a parent folder
Args:
drive (GoogleDrive object): Access to Google Drive
filename (str): Name of file to be uploaded
folder_id (str): Parent folder drive ID
Returns:
GoogleDriveFile: Uploaded file
"""
image_file = drive.CreateFile({'parents': [{'id': folder_id}]})
image_file.SetContentFile(filename)
image_file.Upload()
return image_file | 4f5bf9e32b8d0efd26653eaf84d3ef9f9ee1b7b7 | 103,587 |
def flip_bbox_xy(bbox):
""" flips x and y positions for all parts of a bounding box """
return (bbox[1], bbox[0], bbox[3], bbox[2]) | 36fc75c2d28eab62e7b216bd347db90454a52e82 | 103,593 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.