content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
import six
def FormatTags(tags_dict):
"""Format a dict of tags into arguments.
Args:
tags_dict: Tags to be formatted.
Returns:
A string contains formatted tags
"""
return ','.join(
'{0}={1}'.format(k, v) for k, v in sorted(six.iteritems(tags_dict)))
|
34f041f64f913bfe7537d71a3997bb6efe21bbdf
| 274,360
|
def get_push_size(op):
"""Get push side increment for given instruction or register."""
ins = op.lower()
if ins == 'pushq':
return 8
elif ins == 'pushl':
return 4
else:
raise RuntimeError("push size not known for instruction '%s'" % (ins))
|
3381f917647cb5799fb66fb7803e95a570e02b5c
| 653,753
|
def compare_float(expected, actual, relTol=None, absTol=None):
"""
Fail if the floating point values are not close enough, with
the given message.
You can specify a relative tolerance, absolute tolerance, or both.
"""
if relTol is None and absTol is None:
raise ValueError("You haven't specified a 'relTol' relative "
"tolerance or a 'absTol' absolute tolerance "
"function argument. You must specify one.")
msg = ""
if absTol is not None:
absDiff = abs(expected - actual)
if absTol < absDiff:
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Abs diff: {absDiff}',
'Abs tol: {absTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
if relTol is not None:
# The relative difference of the two values. If the expected value is
# zero, then return the absolute value of the difference.
relDiff = abs(expected - actual)
if expected:
relDiff = relDiff / abs(expected)
if relTol < relDiff:
# The relative difference is a ratio, so it's always unit-less.
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Rel diff: {relDiff}',
'Rel tol: {relTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
return msg or None
|
14f0bba7bd00f7579188f0402341549f879b7ff7
| 213,548
|
def key_klifs_residues(numbering):
"""
Retrieve a list of PDB residue indices relevant to key kinase conformations mapped via KLIFS.
Define indices of the residues relevant to a list of 12 collective variables relevant to
kinase conformational changes. These variables include: angle between aC and aE helices,
the key K-E salt bridge, DFG-Phe conformation (two distances), X-DFG-Phi, X-DFG-Psi,
DFG-Asp-Phi, DFG-Asp-Psi, DFG-Phe-Phi, DFG-Phe-Psi, DFG-Phe-Chi1, and the FRET L-S distance.
All features are under the current numbering of the structure provided.
Parameters
----------
numbering : list of int
numbering[klifs_index] is the residue number for the given PDB file corresponding to KLIFS residue index 'klifs_index'
Returns
-------
key_res : list of int
Key residue indices
"""
key_res = []
# angle between aC and aE helices
key_res.append(numbering[20]) # residue 21 (res1 in aC)
key_res.append(numbering[28]) # res29 (res2 in aC)
key_res.append(numbering[60]) # res61 (res1 in aE)
key_res.append(numbering[62]) # res63 (res2 in aE)
# key salt bridge
key_res.append(numbering[16]) # res17 (K in beta3)
key_res.append(numbering[23]) # res24 (E in aC)
# DFG conformation and Phe conformation
key_res.append(numbering[27]) # res28 (ExxxX)
key_res.append(numbering[81]) # res82 (DFG-Phe)
# X-DFG Phi/Psi
key_res.append(numbering[79]) # res80 (X-DFG)
# DFG-Asp Phi/Psi
key_res.append(numbering[80]) # res81 (DFG-Asp)
# FRET distance
# not in the list of 85 (equivalent to Aura"S284"), use the 100% conserved beta III K as a reference
key_res.append(numbering[16] + 120)
# not in the list of 85 (equivalent to Aura"L225"), use the 100% conserved beta III K as a reference
key_res.append(numbering[16] + 61)
return key_res
|
40babdaf3a4aa6182ef2eba0866e1e7e85216321
| 113,877
|
def percent_bias(series1, series2):
"""Percent bias (PBIAS) measures the average tendency
of the simulated values to be larger or smaller than their observed ones.
https://rdrr.io/cran/hydroGOF/man/pbias.html#:~:text=Percent%20bias%20(PBIAS)%20measures%20the,values%20indicating%20accurate%20model%20simulation
Args:
series1 (Series): can be considered as model
series2 (Series): can be considered as observed or target
Returns:
float : percent bias
"""
return 100*((series1.sum()-series2.sum())/series2.sum())
|
e271e0d2753bc7396fe5d959eaab422d2963e802
| 599,794
|
def __level_has_fm_terms(df, fm_column_names):
"""Check if a df has non null values for a list of column name
:param df: dataframe
:type df: pandas.DataFrame
:param fm_column_names: column names used
:type fm_column_names: list
:return: True is any column has at least one non null value
:rtype: bool
"""
return df.loc[:, set(fm_column_names).intersection(df.columns)].any().any()
|
cb4087881083aa76e8de85da0c856fd1c6c66837
| 308,984
|
import re
def match(regex, line):
"""Match *entire* line to regex converting all spaces into '\s+' and allowing trailing spaces."""
regex = regex.replace(" ", r"\s+")
regex += r"\s*\Z"
return re.match(regex, line)
|
2ba01ee264ac1c20eb5923743655db487d7f76fc
| 78,987
|
def total_callback(read):
"""
callback for counting total reads
"""
return not read.is_supplementary and not read.is_secondary
|
406263667348cbbe1ba7e55328c17c1f3d8c8535
| 142,651
|
def AB2Jy(ABmag):
"""Convert AB magnitudes to Jansky"""
return 10.**(-0.4*(ABmag+48.60))/1e-23
|
a55b70df44f56461d935c8e5aa8aff50df26a982
| 9,280
|
from typing import Dict
from typing import Any
def _get_fp_len(fp_params: Dict[str, Any]) -> int:
"""
Return the length of the fingerprint with the given parameters.
Parameters
----------
fp_params : Dict[str, Any]
Parameters to get the fingerprint length from
Returns
-------
int
The fingerprint length belonging to the given fingerprint parameters
"""
return fp_params['nBits'] if 'nBits' in fp_params else fp_params['fpSize'] if 'fpSize' in fp_params else 166
|
944e952ad07fa0fa5ea11d5bff8e46b98c1ab87e
| 32,078
|
def decorate_table(table_text, convert_fun, d_cols=" & ", d_rows="\\\\\n"):
"""Transforms text of the table by applying converter function to each element of this table.
:param table_text: (str) text of the table.
:param convert_fun: (str => str) a function to be applied to each element of the table.
:param d_cols: (str) delimiter between columns.
:param d_rows: (str) delimiter between rows.
:return: (str) text of the converted table.
"""
def process_cell(s):
return str(convert_fun(s))
if d_cols not in table_text:
return table_text # delimiter was not present
splitted = table_text.split(d_cols)
new_text = ""
for i in range(0, len(splitted)):
s = splitted[i]
last_in_row = d_rows in s
if last_in_row:
two_elems = s.split(d_rows)
decorated = process_cell(two_elems[0]) + d_rows
if len(two_elems) > 1 and two_elems[1] != '':
decorated += process_cell(two_elems[1])
else:
decorated = convert_fun(s)
new_text += decorated
if i < len(splitted)-1:
new_text += d_cols
return new_text
|
55788a8ffb853702b81b38dc446ca9951371f9c9
| 18,105
|
def qualname(cls):
"""
Similar to the Python 3.3+ __qualname__ attribute.
>>> qualname(C)
'C'
>>> qualname(C.f)
'C.f'
>>> qualname(C.D)
'C.D'
>>> qualname(C.D.g)
'C.D.g'
"""
return '%s.%s' % (cls.__module__, cls.__name__)
|
4807b88f7e16279ee5e8105a98e82fcf25351593
| 566,740
|
from typing import Tuple
from typing import Optional
def _non_digit_cloud(cloud: str) -> Tuple[Optional[str], str]:
"""Returns cloud type and altitude for non-digit TOPS BASES cloud elements"""
if cloud.endswith("FT"):
return None, cloud[:-4]
return cloud[:3], cloud[3:]
|
a6e019da25a5200e753a3d682c4ddd1e0e15e4f9
| 258,162
|
def to_list(x, allow_tuple=False):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
# Arguments
x: target object to be normalized.
allow_tuple: If False and x is a tuple,
it will be converted into a list
with a single element (the tuple).
Else converts the tuple to a list.
# Returns
A list.
"""
if isinstance(x, list):
return x
if allow_tuple and isinstance(x, tuple):
return list(x)
return [x]
|
8e11ed5e80ad0d40c560ad40b8041bf24012659b
| 544,096
|
def search_dico(a, x):
"""
Returns the index of x in a if present, None elsewhere.
"""
mn = 0
mx = len(a) - 1
i = 0
while mx - mn > 1:
i = (mn + mx) // 2
if a[i] == x:
return i
elif x > a[i]:
mn = i
else:
mx = i
if a[mn] == x:
return mn
elif a[mx] == x:
return mx
else:
return None
|
0335c5918ff6363e173814f95a2e99b79ea740d5
| 469,998
|
def parse_curie(s: str):
"""
Takes a CURIE formatted string and returns the namespace and identifier in a tuple.
If multiple colons appear in the string, the first is taken to be the delimiter.
Does not allow empty namespace elements (s starts with colon).
"""
if ":" in s:
cidx = s.index(":")
if cidx == 0:
raise RuntimeError("CURIE namespace element is empty")
ns = s[:cidx]
value = s[cidx+1:]
return (ns, value)
else:
raise RuntimeError("Could not parse CURIE: " + s)
|
42378dc777811966801dc186725e336e0f6a172e
| 493,527
|
from contextlib import suppress
def rename_aesthetics(obj):
"""
Rename aesthetics in obj
Parameters
----------
obj : dict or list
Object that contains aesthetics names
Returns
-------
obj : dict or list
Object that contains aesthetics names
"""
lookup = {
'colour': 'color',
'outlier_colour': 'outlier_color',
}
if isinstance(obj, dict):
for eng, us in lookup.items():
with suppress(KeyError):
obj[us] = obj.pop(eng)
else:
obj = [lookup[x] if x in lookup else x for x in obj]
return obj
|
0c1310f72b6e7b1c8ec5b064f8daf2c3c31afbe7
| 409,528
|
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
|
8c1858689146c9ffd1507fbe01a244e2aec42638
| 429,659
|
def parse_bbmap_log_file(log_file):
"""
parses a bbmap log file (paired, single end or both (bbrwap))
returns number of used and mapped reads.
This is the sum or se + R1 + R2 reads
"""
N_results = 0
mapped = 0
used = 0
with open(log_file) as f:
for line in f:
if line.startswith("mapped:"):
# mapped: 85.4123% 27804399 85.4133% 2774727540
try:
mapped_reads = line.strip().split(":")[1].split()[1]
except ValueError as e:
raise Exception(f"Error parsing line:\n{line}\n") from e
mapped += int(mapped_reads)
elif line.startswith("Reads Used:"):
# Reads Used: 65106274 (6496839447 bases)
try:
used_reads = line.strip().split(":")[1].split()[0]
except ValueError as e:
raise Exception(f"Error parsing line:\n{line}\n") from e
used += int(used_reads)
elif "Results" in line:
N_results += 1
assert N_results <= 2, "you have more than one library in this log file"
if used == 0:
raise IOError(
f"I couldn't parse the log file, probably the job didn't finish properly: {log_file}"
)
assert (
used >= mapped
), "something is wrong, you have more than 100% mapped reads?"
return used, mapped
|
594268b30275b476d0593fee55ed13eaded2f20d
| 191,892
|
def validate_subsequence_for_loop(arr, seq):
"""
>>> arr = [5, 1, 22, 25, 6, -1, 8, 10]
>>> seq = [1, 6, -1, 10]
>>> validate_subsequence_for_loop(arr, seq)
True
>>> arr = [5, 1, 22, 25, 6, -1, 8, 10]
>>> seq = [1, 6, 2, 10]
>>> validate_subsequence_for_loop(arr, seq)
False
"""
seq_idx = 0
for item in arr:
if seq_idx == len(seq):
break
if seq[seq_idx] == item:
seq_idx += 1
return seq_idx == len(seq)
|
68b8d59d5d6a28962d43a31edad0955270e16e8b
| 376,720
|
def meta_attr(obj, key):
"""Returns an attribute of an objects _meta class"""
return getattr(obj._meta, key, "")
|
6a3058a9bc45be66fb193b929c8b6f83921c6fbf
| 254,215
|
def get_pairs(val):
"""Given val, returns the set of pairs 1 <= {i, j} <= val, s.t. order does not matter"""
pairs = []
for i in range(val):
for j in range(i, val):
pairs.append([i, j])
return pairs
|
1606809214fc8d1a8fb46b5a2ce702b940c2f8b7
| 290,339
|
def _shape_array_to_matrix(shape):
""" From [x1 y1 x2 y2 ... xp yp]
to [[x1 y1]
[x2 y2]
.
.
.
[xp yp]]
"""
assert len(shape.shape) == 1
temp_shape = shape.reshape((-1, 2))
return temp_shape
|
efc352a57ab1fee7b2cf470e14911eedabd07767
| 410,687
|
import re
def make_file_extension_match(extension):
"""Return a function that searches for filename extensions.
"""
def is_file_match(filename):
match = re.search("\." + extension + "$", filename)
if match:
return True
else:
return False
return is_file_match
|
4b63e0546f6e4535cac5250f6f215d0cd65a0676
| 405,571
|
def setup_abort_cmd(parser):
"""
Sets up an 'abort' command for a strategy command parser.
ex: sw-manager patch-strategy abort <some args>
:param parser: the strategy parser to add the create command to.
"""
abort_cmd = parser.add_parser('abort',
help='Abort a strategy')
abort_cmd.set_defaults(cmd='abort')
abort_cmd.add_argument('--stage-id',
help='stage identifier to abort')
return abort_cmd
|
ba618632e40b2cd96b16797416f624df38fb21b9
| 285,898
|
def compute_sea_level(altitude: float, atmospheric: float) -> float:
"""
Calculates the pressure at sea level (in hPa) from the specified altitude
(in meters), and atmospheric pressure (in hPa).
# Equation taken from BMP180 datasheet (page 17):
# http://www.adafruit.com/datasheets/BST-BMP180-DS000-09.pdf
Args:
altitude : Altitude in meters
atmospheric : Atmospheric pressure in hPa
Return:
float The approximate pressure
"""
return atmospheric / pow(1.0 - (altitude / 44330.0), 5.255)
|
cd4cfb761dfba2fcbd1fc7913b8a332c84b707f6
| 372,100
|
import random
def random_number(bits):
"""Generate a random integer that will cleanly fit in a number of bits."""
max, min = 2**bits - 1, 2**(bits - 1)
return random.randint(min, max)
|
89e7f167bc7af35b7193a7c148f863fd1e185a74
| 40,251
|
import typing
import fnmatch
def filter_by_patterns(
name: str,
include_patterns: typing.Optional[typing.List[str]] = None,
exclude_patterns: typing.Optional[typing.List[str]] = None,
):
"""
Decide if a name should be included, given a set of include/exclude patterns. These are unix-like patterns,
implemented with Python's fnmatch https://docs.python.org/3/library/fnmatch.html.
Exclude patterns override include ones.
:param name: The name to be filtered
:param include_patterns: A list of wildcard patterns to match the name you want to include
:param exclude_patterns: A list of wildcard patterns to match the name you want to exclude
:return: True if it should be included, False otherwise
"""
def _is_a_pattern(patterns: typing.List[str]):
for pattern in patterns:
if fnmatch.fnmatch(name, pattern):
return True
return False
is_included = True
is_excluded = False
if include_patterns:
is_included = _is_a_pattern(include_patterns)
if exclude_patterns:
is_excluded = _is_a_pattern(exclude_patterns)
if is_excluded or not is_included:
return False
return True
|
e6bd7ddb05e93f395b054e95a9ec28a70f7af919
| 173,244
|
def children(tree):
"""
The acorn AST is tricky. This returns all the children of an element
:param ast tree:
:rtype: list[ast]
"""
assert type(tree) == dict
ret = []
for k, v in tree.items():
if type(v) == dict and v.get('type'):
ret.append(v)
if type(v) == list:
ret += filter(None, v)
return ret
|
740e13b4f8a92da196cacaf9dbea1f5daa4f44f3
| 332,804
|
def get_smallest_divisible_number_brute_force(max_factor):
"""
Get the smallest divisible number by all [1..max_factor] numbers by brute force.
"""
number_i = max_factor
while True:
divisible = True
for factor_i in range(1, max_factor+1):
if number_i % factor_i > 0:
divisible = False
break
if divisible:
return number_i
number_i += 1
|
f30159375bf852e77da2fcee21f8c5e407042b95
| 690,310
|
def subdict(dic, keys):
"""
Returns a new dictionary dic2 such that
dic2[i] = dic[i] for all i in keys
dic -- a dictionary
keys -- a list of keys
"""
dic2 = {}
for key in keys:
if key in dic:
dic2[key] = dic[key]
return dic2
|
fd9dbb7b38834a6ccfbacb666cc8f7a0badaa1ac
| 621,080
|
import itertools
def combine_bucket(
parts: list,
threshold: int,
truncate: bool = False,
keep_remain: bool = False) -> list:
"""
Convert parts to buckets with given length(threshold).
Parameters
----------
parts: the given parts.
threshold: bucket length.
truncate: whether to truncate those whose length is bigger than threshold.
keep_remain: when truncate=True, whether to keep the remain parts.
Returns
out: list of bucket.
-------
"""
def deal_long_part(part: str) -> list:
result = []
if truncate:
if keep_remain:
len_subparts = len(part) // threshold + 1
for i in range(len_subparts):
sub_part = part[i*threshold: (i+1)*threshold]
if sub_part:
result.append(sub_part)
else:
result.append(part[:threshold])
else:
result.append(part)
return result
buckets = []
while parts:
part = parts.pop(0)
# directly add to buckets when a part is longer than threshold
if len(part) > threshold:
sub_parts = deal_long_part(part)
buckets.append(sub_parts)
else:
while parts and len(part) < threshold:
another = parts[0]
if len(part + another) > threshold:
break
else:
part += parts.pop(0)
buckets.append([part])
result = list(itertools.chain(*buckets))
return result
|
c994a6cd19a538ea572f24ed17b100db68877f89
| 626,341
|
def ppmv2pa(x, p):
"""Convert ppmv to Pa
Parameters
----------
x Gas pressure [ppmv]
p total air pressure [Pa]
Returns
-------
pressure [Pa]
"""
return x * p / (1e6 + x)
|
974d79d022a7fb655040c7c2900988cd4a10f064
| 5,767
|
def zot_getKeys(zot_library):
"""
Extracts the keys from the complete Zotero Library
"""
keys = []
for item in zot_library:
keys.append( item['data']['key'] )
return keys
|
1a9d33067d6223feab2334d9da880bb3160ff08b
| 315,126
|
def almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool:
"""
Return True if two floats are almost equal
"""
return abs(value_1 - value_2) <= delta
|
ff1c29c57434a169824fe76e451053f3edc6e519
| 696,006
|
def str_to_int(i):
"""
Converts a string into integer. Returns 0 if a string cannot be
converted.
Args:
i (str): string representation of an integer
Returns:
int: conversion of the string
"""
try:
return int(i)
except ValueError:
return 0
|
c4fe93a15228e7f35c69dd0b10b82856a13507fc
| 461,324
|
def filter_output_fields(configs):
"""Remove fields that are not required by CloudWatch agent config file."""
desired_keys = ["log_stream_name", "file_path", "timestamp_format", "log_group_name"]
return [{desired_key: config[desired_key] for desired_key in desired_keys} for config in configs]
|
0ac5f68799f4170becf88049f4608da02bda6a56
| 75,269
|
def receive_message(socket, name):
"""
Receive a message of size 1000 from socket.
"""
try:
response = socket.recv(1000).decode('ascii')
except Exception as e:
print('[' + name + '_ERROR] Did not receive anything in request')
return False
return response
|
6f065517525778586caa7fdfa6aa8a0b314603e4
| 289,443
|
import string
import random
def generate_random_string(
string_length=88,
chars=string.ascii_lowercase + string.ascii_uppercase + string.digits,
remove_confusing_digits=False):
"""
Generate a random string.
:param string_length:
:param chars:
:param remove_confusing_digits: do not offer these often confused letter/number
:return:
"""
if remove_confusing_digits:
chars = chars.replace("0", "")
chars = chars.replace("o", "")
chars = chars.replace("O", "")
chars = chars.replace("1", "")
chars = chars.replace("l", "")
return ''.join(random.SystemRandom().choice(chars) for _ in range(string_length))
|
d2ba6bfa407cbe71a8fc65d3350956b1ab51b35a
| 641,786
|
def check_experiment_existence(topic_model):
"""
Checks if topic_model has experiment.
Parameters
----------
topic_model : TopicModel
topic model
Returns
-------
bool
True if experiment exists, in other case False.
"""
is_experiment = topic_model.experiment is not None
return is_experiment
|
98d16718f3106b5f07d7449feff45d6b19bac18a
| 325,447
|
import torch
def gradient_wrt_input(model, target_weights, initial_guess, n_iter=100, mask=None, lr=1e-1, verbose=True, device=None,
dtype=None):
"""Find input tensor such that the model produces an allocation close to the target one.
Parameters
----------
model : torch.Module
Network that predicts weight allocation given feature tensor.
target_weights : torch.Tensor
Vector of targeted asset weights of shape `(n_assets,)`.
initial_guess : torch.Tensor
Initial feature tensor serving as the starting point for the optimization. The shape is
`(n_channels, lookback, n_assets)` - the sample dimension is not included.
n_iter : int
Number of iterations of the gradients descent (or other) algorithm.
mask : None or torch.Tensor
If specified, then boolean ``torch.Tensor`` of the same shape as `initial_guess` than
one can elementwise choose what parts of the inputs to optimize (True) and which
keep the same as the initial guess (False).
lr : float
Learning rate for the optimizer.
verbose : bool
If True, then verbosity activated.
dtype : None or torch.dtype
Dtype to be used. If specified, casts all used tensors.
device : None or torch.device
Device to be used. If specified, casts all used tensors.
Returns
-------
result : torch.Tensor
Feature tensor of the same shape as `initial_guess` that is mapped by the network (hopefully)
close to `target_weights`.
hist : list
List of losses per iteration.
"""
device = device or torch.device('cpu')
dtype = dtype or torch.float32
x = initial_guess.clone().to(device=device, dtype=dtype)
x.requires_grad = True
if mask is None:
mask = torch.ones_like(x)
elif torch.is_tensor(mask):
if mask.shape != x.shape:
raise ValueError('Inconsistent shape of the mask.')
else:
raise TypeError('Incorrect type of the mask, either None or torch.Tensor.')
# casting
mask = mask.to(dtype=torch.bool, device=device)
model.to(device=device, dtype=dtype)
target_weights = target_weights.to(device=device, dtype=dtype)
optimizer = torch.optim.Adam([x], lr=lr)
model.train()
hist = []
for i in range(n_iter):
if i % 50 == 0 and verbose:
msg = '{}-th iteration, loss: {:.4f}'.format(i, hist[-1]) if i != 0 else 'Starting optimization'
print(msg)
loss_per_asset = (model((x * mask)[None, ...])[0] - target_weights) ** 2
loss = loss_per_asset.mean()
hist.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if verbose:
print('Optimization done, final loss: {:.4f}'.format(hist[-1]))
return x, hist
|
c645ae9389a9d44adcf006d6a772a35f4c42507d
| 694,353
|
def header_status(header):
"""Parse HTTP status line, return status (int) and reason."""
status_line = header[:header.find('\r')]
# 'HTTP/1.1 200 OK' -> (200, 'OK')
fields = status_line.split(None, 2)
return int(fields[1]), fields[2]
|
31f0874de7fc0b259da65baad5c75d3f181d61b9
| 242,988
|
def find_node_input_name(node, name):
"""
Finds a node input by its name.
:param node: onnx node
:param name: node name
:return: input index
"""
for i, inode in enumerate(node.input.node):
if inode.name == name:
return i
return -1
|
32ec5f35a6a4c7811931cfcfc2213ca7667a1ba6
| 216,928
|
from typing import Dict
def truecase(word: str, case_counter: Dict[str, int]):
"""
Truecase a word using a Truecase dictionary
:param word: a word
:param case_counter: A counter; a dictionary of words/tokens and their relative frequency counts
:return: the truecased word
>>> case_counts ={"caesar": 1, "Caesar": 99}
>>> truecase('CAESAR', case_counts)
'Caesar'
"""
lcount = case_counter.get(word.lower(), 0)
ucount = case_counter.get(word.upper(), 0)
tcount = case_counter.get(word.title(), 0)
if lcount == 0 and ucount == 0 and tcount == 0:
return word #: we don't have enough information to change the case
if tcount > ucount and tcount > lcount:
return word.title()
if lcount > tcount and lcount > ucount:
return word.lower()
if ucount > tcount and ucount > lcount:
return word.upper()
return word
|
afc7d5de89ffb5c84ab43b129a88a3c43ce4b98a
| 373,773
|
import random
def sample_coal_count(a, t, n):
"""
Sample the number lineages present starting after generations 't' starting
with 'a' lineages and population size 'n'
"""
t2 = 0.0
b = a
while b > 1:
rate = b * (b - 1) / 2.0 / n
t2 += random.expovariate(rate)
if t2 < t:
b -= 1
else:
break
return b
|
f972a2d2e6002d73bd3f89ddead06e8991edf820
| 357,978
|
import struct
def read_plain_byte_array(file_obj, count):
"""Read `count` byte arrays using the plain encoding."""
return [file_obj.read(struct.unpack(b"<i", file_obj.read(4))[0]) for i in range(count)]
|
f300d205fda9b1b92ebd505f676b1f76122f994d
| 707,522
|
def str2bool(value):
"""Translate a string to a boolean value."""
return str(value).lower() in ("yes", "true", "y", "1")
|
30618d1589d1c9ad21f527ffa523df7dc2aa38f2
| 209,522
|
from typing import List
from typing import Dict
def parse_csl_author(authors: List[Dict[str, str]]) -> List[str]:
"""Parse the author list in csl json."""
parsed_authors = []
for author in authors:
family_name = author.get("family")
given_name = author.get("given")
parsed_author = f"{family_name}, {given_name}"
parsed_authors.append(parsed_author)
return parsed_authors
|
8522f42b97758c9ba1378cda3c44c34632c183ee
| 191,378
|
def string2number(i):
""" Convert a string to a number
Input: string (big-endian)
Output: long or integer
"""
return int(i.encode('hex'), 16)
|
552a40383bbed73f8cba668a38b3770431e740a3
| 575,571
|
def sum_weighted_completion_times(jobs):
"""Return sum of weighted completion times."""
total = 0
time_so_far = 0
for j in jobs:
time_so_far += j[1]
total += j[0] * time_so_far
return total
|
f3aefdc64a3cce9cf17e5f8782c54338f4ef19f8
| 186,623
|
def geo_cellsize(raster_geo, x_only=False):
"""Return pixel width & pixel height of geo-transform
Args:
raster_geo (tuple): :class:`gdal.Geotransform` object
x_only (bool): If True, only return cell width
Returns:
tuple: tuple containing the x or x and y cellsize
"""
if x_only:
return raster_geo[1]
else:
return (raster_geo[1], raster_geo[5])
|
1705a941ab2a16582cbb6de9700724523700f274
| 210,314
|
import torch
def sequence_mask(lengths, max_seq_length=None, dtype=torch.bool):
"""
Returns a mask tensor representing the first N positions of each cell.
lengths: integer tensor, all its values <= maxlen.
max_seq_length: scalar integer tensor, size of last dimension of returned tensor.
Default is the maximum value in lengths.
dtype: output type of the resulting tensor.
return: A mask tensor of shape lengths.shape + (maxlen,), cast to specified dtype.
sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],
# [True, True, True]],
# [[True, True, False],
# [False, False, False]]]
"""
if max_seq_length is None:
max_seq_length = lengths.max()
matrix = torch.unsqueeze(lengths, dim=-1)
row_vector = torch.arange(0, max_seq_length, 1).to(matrix.device)
mask = row_vector < matrix
mask.type(dtype)
return mask
|
2f6261c37c2a3ccc32d615e314e3251333f258c9
| 632,756
|
def _chunks(data, chunk_size):
"""Returns generator that yields chunks of chunk size of data"""
return (data[i:i + chunk_size] for i in range(0, len(data), chunk_size))
|
84affc3abe78345e0d282e3c789f75415ae28a8c
| 132,716
|
def storage_path_fixture(tmpdir_factory):
"""Fixtures to represent a readinglist pickle file location"""
return tmpdir_factory.mktemp("data").join("readinglist.pickle")
|
5ba907fb1f530873b50752fb62f3063ceaae5a08
| 474,154
|
def get_percentage(new_price, old_price):
"""
Returns the percentage increase/decrease of the new price new_price based on the old price old_price.
"""
diff = new_price - old_price
return (diff / old_price) * 100
|
f5f6237ae3611c59307f7c7eeb1867a75cf86e5f
| 170,500
|
def strhash(string):
"""
Old python hash function as described in PEP 456, excluding prefix, suffix and mask.
:param string: string to hash
:return: hash
"""
if string == "":
return 0
x = ord(string[0]) << 7
for c in string[1:]:
x = ((1000003 * x) ^ ord(c)) & (1 << 32)
x = (x ^ len(string))
return x
|
27e9324c8d39d76a5cc3fc0338aeb184716dea5b
| 226,384
|
import requests
def get_resource(url):
""" Function to download a web resource at the specified url.
Example: website, image e.t.c
It returns a requests object
"""
res = requests.get(url)
try:
res.raise_for_status()
except Exception as e:
raise e
return res
|
9c3b419cc278083d1aa6b2d63dd2aa1d4d2abff7
| 199,954
|
def get_ticket_detail(tickets):
"""
Iterate over ticket details from response.
:param tickets: ticket details from the response.
:return: List of ticket details which include required fields from resp.
"""
return [{
'TicketNumber': ticket.get('ticketNumber', ''),
'TicketStatus': ticket.get('ticketStatus', ''),
'DeepLink': ticket.get('deepLink', ''),
'Type': ticket.get('type', ''),
'ConnectorName': ticket.get('connectorName', ''),
'DetailedStatus': ticket.get('detailedStatus', '')
} for ticket in tickets]
|
6c908863086b4f51d913fa6cee76c22ba9fb3a01
| 629,517
|
def indent(text, level):
"""
Indent each line in a string by prepending whitespace
"""
return "\n".join([" " * (4 * level) + line for line in text.split("\n")])
|
16ce4b765a01a9001b092947f19a812260d08096
| 473,046
|
import collections
def getExptSum(dq,mip,rqi):
"""Return a dictionary of experiment uids keyed on MIPs, from list of request items (used in makeTables)"""
cc = collections.defaultdict( set )
for i in rqi:
es = dq.inx.uid[i.esid]
if es._h.label == 'experiment':
cc[es.mip].add(es.uid)
elif es._h.label in ['exptgroup','mip']:
if 'experiment' in dq.inx.iref_by_sect[i.esid].a:
for id in dq.inx.iref_by_sect[i.esid].a['experiment']:
ex = dq.inx.uid[id]
cc[ex.mip].add(id)
return cc
|
21d8642894602c64aef6b6b12e6a29593a284072
| 368,778
|
def _LogFilters(name, task_name):
"""Returns filters for log fetcher to use.
Args:
name: string id of the entity.
task_name: String name of task.
Returns:
A list of filters to be passed to the logging API.
"""
filters = [
'resource.type="ml_job"', 'resource.labels.job_id="{0}"'.format(name)
]
if task_name:
filters.append('resource.labels.task_name="{0}"'.format(task_name))
return filters
|
d2aafc13bb350f2233fb81a6448cbe5c7acfa805
| 171,355
|
def power(a,b):
"""
Computes a to the power of b using recursion
"""
if b == 0:
return 1
if b == 1:
return a
return a * power(a,b-1)
|
454082b59a0f9beb4f641eb1d300d03025a9bc11
| 488,007
|
def calculate_bmi(height, weight, system='metric'):
"""
Return the body mass index (BMI) for the given weight and height.
"""
if system == 'metric':
bmi = (weight / (height ** 2))
else:
bmi = 703 * (weight / (height ** 2))
return bmi
|
3923aaf54ccd3d17bd6b117dbe008cde9ca976bd
| 359,094
|
def pluck(iterable, key, missing_value="N/A"):
"""In an iterable of dicts, return a list of
values from one key
"""
result = list()
for item in iterable:
_item = dict(item)
result.append(_item.pop(key, missing_value))
return result
|
219d1cc906d59a07b4623e476524a32c2c17efcd
| 427,112
|
def UrnToFlowId(urn):
"""Converts given URN string to a flow id string."""
components = urn.split("/")
return components[-1]
|
f98b777f9439dcf5c7e72445019e87d9d92f989c
| 52,169
|
def check_exceptions(playername):
"""
Fix exceptions for a player name between Comunio and Comuniazo.
:param playername: Name of the football player.
:return: Corrected name.
"""
exceptions = {'Banega': 'Ever Banega', 'Mikel': u'Mikel González', u'Isma López': u'Ismael López'}
return exceptions.get(playername, playername)
|
4b646a409315578168fa9daae29e1a9e48f94467
| 471,574
|
def get_layers(data_raw, width, height):
"""
Takes raw input data string and splits it into layers, based
on the width and height of the image received.
Returns a list with each entry being a single layer of the
image.
"""
layer_list = []
image_size = width * height
for i in range(0, len(data_raw), image_size):
layer_list.append(data_raw[i:i+image_size])
return layer_list
|
2e7b2d0f23ba0008f974e65b7941066534ed4c41
| 486,087
|
def clean_schema(lst):
"""This method cleans the list items so that they can be compared.
- Strips space
- Remove trailing/leading spaces
- convert to lower case
Args:
lst (list): List to be cleaned
Returns:
list : Cleaned list
"""
schema=[]
for col in lst:
col=col.lower().strip()
col=" ".join(col.split())
schema.append(col)
return schema
|
314d05aaf0dd3cd43cf480b6c3f06298dbdb2785
| 437,234
|
from typing import Iterable
def contains_substring(s_in: str, subs: Iterable[str]) -> bool:
"""
Determine if any of the given substrings is in the given string.
Parameters
----------
s_in: str
Input string to check for given substrings.
subs: iterable of str
Substrings to check for in str
Examples
--------
>>> from glo.helpers import contains_substring
>>> contains_substring("this is a test", ["hey", "there"])
False
>>> contains_substring("this is another test", ["test", "hey", "there"])
True
>>> contains_substring("this is another test", ["this", "is", "another"])
True
>>> contains_substring("THIS IS ANOTHER TEST", ["this", "is", "another"])
False
"""
return any(sub_str in s_in for sub_str in subs)
|
932d131e384be3e0fcb81cc3b975e6adf17812b3
| 284,675
|
import math
def isinf(x):
"""Return True if the real or the imaginary part of x is positive or negative infinity."""
return math.isinf(x.real) or math.isinf(x.imag)
|
3f399fd95f52b35ebb3eb82aa5e7d570a4fe8416
| 79,201
|
def GetMappingKeyName(run, site, verification):
"""Returns a str used to uniquely identify a mapping."""
return 'RunSiteMap_%s_%s_%s' % (run.key().name(),
site.key().name(),
verification.key().name())
|
2e552e2ca2680d05512c4e25b40ea15305cb01be
| 239,819
|
def mass_spectrum_to_string_cols(mass_spectrum):
"""
Write a row-wise csv for the mass spectrum.
Parameters
----------
mass_spectrum: Classes.MassSpectrum
Returns
-------
ms_string: str
"""
ms_string = "m/z,relative abundance\n"
for x in range(0, len(mass_spectrum.mz)):
ms_string += f"{mass_spectrum.mz[x]},{mass_spectrum.relative_abundances[x]}\n"
return ms_string
|
ef79057daa5b298d926108685f5f5ff2eb621cb8
| 282,688
|
from pathlib import Path
from typing import List
def git_https_packages_to_uninstall(env_path: Path) -> List[str]:
"""
Look for lines of the form "git+https...#egg=name" and return a list of such "name" values.
"""
to_uninstall = []
with env_path.open() as inp:
for line in inp:
line = line.split("#")[0] # remove any comments
if line.find("git+https") > 0 and line.find("#egg="):
name = line.strip().split("=")[-1]
to_uninstall.append(name)
return to_uninstall
|
07298cf92901f297a65510d1146682d7c2024512
| 486,515
|
def get_repos(organization, repositories):
"""
Returns a list of GitHub repos
:param organization: a GitHub organization
:param repositories: a list of repositories to get from the organization
"""
if repositories == "all":
return organization.get_repos()
return [organization.get_repo(repo) for repo in repositories]
|
4351fbc519377c7d774dc6ed0c79111f0a2ecfcc
| 526,372
|
def make_csv(headers, data):
"""
Creates a CSV given a set of headers and a list of database query results
:param headers: A list containg the first row of the CSV
:param data: The list of query results from the Database
:returns: A str containing a csv of the query results
"""
# Create a list where each entry is one row of the CSV file, starting
# with the headers
csvRows =[','.join(headers),]
# Iterate through the provided data and create the rest of the CSV's rows
for datum in data:
currentRow = ''
for header in headers:
# Get this rows value for the given header
val = getattr(datum, header)
if type(val) is str:
# Escape the strings
currentRow += '"' + val + '",'
elif type(val) is float:
# Don't Escape the floats
currentRow += str(val) + ','
else:
# If it is empty and a place holder
currentRow += ','
csvRows.append(currentRow[:-1])
# Combine all of the rows into a single single string and return it.
return "\n".join(csvRows)
|
5101d53de8dd09d8ebe743d77d71bff9aeb26334
| 709,721
|
def qc_curve_group_well(well, tests, keys=None, alias=None):
"""
Run tests on a cohort of curves.
Args:
well (welly.well.Well): Well object.
tests (dict): a dictionary of tests, mapping mnemonics to lists of
tests. Two special keys, `all` and `each` map tests to the set
of all curves, and to each curve in the well, respectively.
You only need `all` if the test involves multiple inputs, e.g.
comparing one curve to another. See example in tests/test_quality.py
keys (list): a list of the mnemonics to run the tests against.
alias (dict): an alias dictionary, mapping mnemonics to lists of
mnemonics. e.g. {'density': ['DEN', 'DENS']}
Returns:
dict. Test results for all the curves.
{curve_name0: {test0: test_result0, ...}, ...}
"""
keys = well._get_curve_mnemonics(keys, alias=alias)
if not keys:
return {}
all_tests = tests.get('all', tests.get('All', tests.get('ALL', [])))
data = {test.__name__: test(well, keys, alias) for test in all_tests}
results = {}
for i, key in enumerate(keys):
this = {}
for test, result in data.items():
this[test] = result[i]
results[key] = this
return results
|
ca745890714e4d58efd9ac8549b43f1fdc346db6
| 574,871
|
def parse_custom_types(types):
"""
Parses curstom types format as sent through the service.
:param types: curstom types JSON
:type types: list
:return: custom types dictionary
:rtype: dict
"""
model_types = {}
for typ in types:
name = typ['name']
model_types[name] = [
x.lower().strip() for x in typ['keywords'].split(',')
]
return model_types
|
127d6ebafd2b06f02ac07f827c6bd5c1ff2bdb32
| 217,373
|
def add(a, b=0):
"""Simple addition function
arguments:
a: value for which addition is defined
b: value for which addition is defined (optional, defaults to 0)
returns:
a + b
"""
return a + b
|
e925943aff77e39a815015e8f7d94f767e033894
| 68,959
|
def format_universe_repo_content_type(universe_version):
""" Formats a universe repo content-type of version `universe-version`
:param universe_version: Universe content type version: "v3" or "v4"
:type universe_version: str
:return: content-type of the universe repo version `universe_version`
:rtype: str
"""
content_type = "application/" \
"vnd.dcos.universe.repo+json;" \
"charset=utf-8;version=" \
+ universe_version
return content_type
|
1314e1b04e9e4773b5fa65a4e1787ed0ee53ac89
| 165,914
|
def noop(x):
"""Does nothing, just returns the input."""
return x
|
22218449a468bda627ffaf1751c6729cb9b0d10a
| 193,391
|
def run(c, *a, **kw):
"""A Context.run or Connection.run with better defaults"""
kw.setdefault("echo", True)
kw.setdefault("pty", True)
kw.setdefault("replace_env", False)
return c.run(*a, **kw)
|
59baf0c45828f81560d9365515c26d4142fa0d10
| 127,769
|
import re
def __build_plural_types(plurals):
"""
Returns the given list of plurals in a format that can be consumed by the template generator
Input is plurals, a dictionary in the following form:
{'one': 'one topping', 'other': '${ice_cream_toppings} toppings', 'zero': 'no toppings'}
Where the keys match one of the CLDR plural categories ('zero', 'one', 'two', 'few', 'many' or 'other')
The return for an input as the above would be:
```
[
{
'plural_name': 'one'
'plural_value': 'one topping'
},
{
'plural_name': 'other'
'plural_value': '%d toppings'
},
{
'plural_name': 'zero'
'plural_value': 'no toppings'
}
]
```
"""
return [{
"plural_name": key,
"plural_value": re.sub('\${.+}', "%d", value) # Substitutes the variable value for the string '%d' because that's what the plist needs.
} for key, value in plurals.items() if value is not None]
|
022425f0e1d7098bcabe8e8adbc3e51464189e42
| 345,639
|
def parsedcommand(obj):
"""
Decorator to flag documented expression commands available to users.
Used with the usage() method.
"""
obj.parsedcommand = True
return obj
|
6bd6e06c61cd2f6443bfc9bf4e176c83691eae46
| 33,064
|
def apply_stats_template(template, top_stats):
"""Apply statistics template. Available keywords are:
* :attr:`POST-COUNT` : The cumulative sum of posts.
* :attr:`POST-REACTION-COUNT` : The cumulative sum of post reactions.
* :attr:`BEST-POST-REACTION` : The posts with the highest reactions.
* :attr:`COMMENT-COUNT` : The cumulative sum of comments.
* :attr:`COMMENT-REACTION-COUNT` : The cumulative sum of comments reactions.
* :attr:`BEST-COMMENT-REACTION` : The comments with the highest reactions.
* :attr:`REPLY-COUNT` : The cumulative sum of replies.
* :attr:`REPLY-REACTION-COUNT` : The cumulative sum of replies reactions.
* :attr:`BEST-REPLY-REACTION` : The replies with the highest reactions.
* :attr:`COMMENT-REPLY-COUNT` : The cumulative sum of replies and comments.
* :attr:`REACTION-COUNT` : The cumulative sum of reactions.
* :attr:`REACTION-AHAH` : The cumulative sum of "AHAH" reactions.
* :attr:`REACTION-LOVE` : The cumulative sum of "LOVE" reactions.
* :attr:`REACTION-CARE` : The cumulative sum of "CARE" reactions.
* :attr:`REACTION-WOW` : The cumulative sum of "WOW" reactions.
* :attr:`REACTION-SAD` : The cumulative sum of "SAD" reactions.
* :attr:`REACTION-ANGER` : The cumulative sum of "ANGER" reactions.
* :attr:`REACTION-LIKE` : The cumulative sum of "LIKE" reactions.
Args:
template (str): The template containing keywords.
top_stats (dict): The top statistics, generated with ``get_top_stats()`` function.
Returns:
str
Example:
>>> template = "The best post is: <<TOP1-BEST-POST-REACTION>>, the second best comment is: <<TOP2-BEST-COMMENT-REACTION>>"
>>> stats = get_top_stats(posts)
>>> apply_stats_template(template, stats)
'The best post is: Léa Ricot, the second best comment is: Jean Neymar'
"""
for stat_category in top_stats.keys():
for i in range(0, 3):
try:
stat = top_stats[stat_category][i]["user"]
except:
stat = "None"
template = template.replace(f"<<TOP{i+1}-{stat_category}>>", stat)
return template
|
bd963275a9f5c9f839d0e38daaf7613f70509426
| 327,368
|
def slack_escape(text):
"""
Escapes special characters for Slack API.
https://api.slack.com/docs/message-formatting#how_to_escape_characters
"""
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
return text
|
9d3d6bc86894f69365973394d0037bdd3eb23ece
| 648,097
|
def get_client_ip_address(request):
"""
Get the client IP Address.
"""
return request.META['REMOTE_ADDR']
|
e1ac554e30a1f95d075807d2073c79fb5489eac4
| 180,014
|
def cronbachs_alpha(data):
"""Cronbach's Alpha
Cronbach's alpha is one of the most widely adopted estimates of a scale's inter-item reliability.
Parameters
----------
data : pandas.DataFrame
df containing the item responses for the scale
"""
k = data.shape[1]
sum_item_var = data.var().sum()
scale_var = data.sum(axis = 1).var()
return (k/(k-1))*(1-(sum_item_var/scale_var))
|
994f8a710ef3949a050ae65edd73fcb04d8539df
| 515,235
|
def is_blank(string):
"""Checks if string is either empty or just whitespace."""
if not string or string.isspace():
return True
return False
|
51ebb96a68a69f5d1f6dcc66774c9b874ebcd61c
| 262,713
|
def calculate_rate(df, numerator, denominator, rate_per=1000):
"""Creates a rate column for a dataframe with a numerator and denominator column.
Args:
df: measures dataframe
numerator: numerator for rate
denominator: denominator for rate
rate_per: unit for calculated rate
Returns:
Input dataframe with additional rate column
"""
rate = df[numerator]/(df[denominator]/rate_per)
df['rate'] = rate
return df
|
3576750f524678f8870659c42bec48f6d8f898d3
| 357,887
|
def evaluate_training_result(env, agent):
"""
Evaluates the performance of the current DQN agent by using it to play a
few episodes of the game and then calculates the average reward it gets.
The higher the average reward is the better the DQN agent performs.
:param env: the game environment
:param agent: the DQN agent
:return: average reward across episodes
"""
total_reward = 0.0
episodes_to_play = 100
for i in range(episodes_to_play):
trajectories, _ = env.run(is_training=True)
# calculate reward
episode_reward = 0.0
for ts in trajectories[0]:
# print(
# 'State: {}, Action: {}, Reward: {}, Next State: {}, Done: {}'.
# format(ts[0], ts[1], ts[2], ts[3], ts[4]))
episode_reward += ts[2]
total_reward += episode_reward
average_reward = total_reward / episodes_to_play
return average_reward
|
4492d7af64174483d63af610e19ac45c6c1c63a5
| 687,270
|
def get_metrics(metrics, loss, losses=None):
"""Structure the metric results
PARAMETERS
----------
metrics: object
Contains statistics recorded during inference
loss: tensor
Loss value
losses: list
List of loss values
RETURNS
-------
metrics_values: dict
"""
metrics_values = dict()
metrics_values['loss'] = loss.item()
if isinstance(losses, list):
metrics_values['loss_ce'] = losses[0].item()
metrics_values['loss_dice'] = losses[1].item()
acc, acc_by_class = metrics.get_pixel_acc_class() # harmonic_mean=True)
prec, prec_by_class = metrics.get_pixel_prec_class()
recall, recall_by_class = metrics.get_pixel_recall_class() # harmonic_mean=True)
miou, miou_by_class = metrics.get_miou_class() # harmonic_mean=True)
dice, dice_by_class = metrics.get_dice_class()
metrics_values['acc'] = acc
metrics_values['acc_by_class'] = acc_by_class.tolist()
metrics_values['prec'] = prec
metrics_values['prec_by_class'] = prec_by_class.tolist()
metrics_values['recall'] = recall
metrics_values['recall_by_class'] = recall_by_class.tolist()
metrics_values['miou'] = miou
metrics_values['miou_by_class'] = miou_by_class.tolist()
metrics_values['dice'] = dice
metrics_values['dice_by_class'] = dice_by_class.tolist()
return metrics_values
|
3b6fff4fc9dcdc9eaf2e10d859f97f63fa9ea6cb
| 98,308
|
def byteLength(n: int) -> int:
"""Returns minimal amount of bytes to write given number
Parameters:
n: int
Returns:
result: int
minimal amount of bytes to write n
"""
return (n.bit_length() + 7) // 8
|
99bf3337763ffdd10af54644aeaf1f58817fc049
| 477,225
|
def get_eccF(r1_norm, r2_norm, c_norm):
"""
Computes the eccentricity component along the chord. This value is kept
constant for all the problem as long as the boundary conditons are not
changed.
Parameters
----------
r1_norm: float
Norm of the initial vector position.
r2_norm: float
Norm of the final vector position.
c_norm: float
Norm of the chord vector.
Returns
-------
ecc_F: float
Eccentricity component along the chord direction.
Notes
-----
Equation (3) from Avanzini's report [1].
"""
ecc_F = (r1_norm - r2_norm) / c_norm
return ecc_F
|
97d5e8215c65104aa7030e653967fda6e62a1c85
| 414,552
|
def eh_menor_que_essa_quantidade_de_caracters(palavra: str, quantidade: int) -> bool:
"""
Função para verificar se a string é menor que a quantidade de caracters informados
@param palavra: A palavra a ser verificada
@param quantidade: A quantidade de caracters que deseja verificar
@return: Retorna True em caso da palavra seja menor que a quantidade de caracters e False em caso negativo
"""
tamanho = len(palavra)
eh_menor = False
if tamanho < quantidade:
eh_menor = True
return eh_menor
|
827469606b0b93b78b63686465decbbbc63b9673
| 3,535
|
def validate_root_domain(items, root_domains):
""" Filters a list of potential FQDN's by cross checking the domain with IANA's list of valid root domains.
:param items: List of FQDN strings
:param root_domains: Path to the root domains file.
:return: A filtered List of FQDN strings
"""
valid = []
with open(root_domains, 'r') as f:
roots = [i.lower().strip('\n') for i in f]
for hostname in items:
root = hostname.split('.')[-1]
if root in roots:
valid.append(hostname)
del roots
del items
return valid
|
c59ed3964e388798861975ca0b1b978da0ae670d
| 231,513
|
def ensure_iterable(obj):
"""
Ensures that the object provided is a list or tuple and wraps it if not.
"""
if not isinstance(obj, (list, tuple)):
obj = (obj,)
return obj
|
f26dce0b19f5428179e0b8af058e675f81540edd
| 685,508
|
def _split_last(s, sub):
"""Splits string `s` at the last occurrence of substring `sub` and returns a
tuple of the form (left, right)."""
return (sub.join(s.split(sub)[:-1]), s.split(sub)[-1])
|
54bdcc8793138c6a7bbbb23ec5f7bc954192ca86
| 148,558
|
from pathlib import Path
def format_relative_dirname(directory: Path, base_directory: Path) -> str:
"""Formats a relative directory path in a way that's compatible with the
presigned POST URLs.
Parameters
----------
directory : `pathlib.Path`
The directory to compute a relative path/name for.
base_directory : `pathlib.Path`
The base directory.
Returns
-------
name : `str`
The relative directory name.
Examples:
- ``"base/`` relative to ``"/base/"`` is ``"/"``.
- ``"base/a/`` relative to ``"/base/"`` is ``"a/"``.
- ``"base/a/b`` relative to ``"/base/"`` is ``"a/b/"``.
"""
name = str(directory.relative_to(base_directory))
if name == ".":
return "/"
elif not name.endswith("/"):
return name + "/"
else:
return name
|
bdc998a87ed995aecd32ebf4062b6da33df4e095
| 486,403
|
def hmsm_to_days(hour=0, min=0, sec=0, micro=0):
"""
Convert hours, minutes, seconds, and microseconds to fractional days.
"""
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24.
|
127cc131b6a1ee31f411fc9493601dc6c09fe786
| 661,041
|
def delta_percent(decimals=1):
"""A delta formatter to display the delta as a float with a given number of decimals.
Args:
decimals: The number of decimals to display.
Returns:
A delta formatter function (f(a,b)) returning (b-a)/a displayed as a percentage.
"""
return (lambda a, b: '{:+.{prec}f}%'.format(100.0 * (b - a) / a, prec=decimals))
|
a8c04264ab6f6289fce8642822254afb8155054a
| 645,771
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.