content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def bts(boolean, y="Y", n="N"):
"""
Converts a boolean value to a string
:param boolean: The boolean to be converted
:param y: [string] the value to be returned if boolean is True
:param n: [string] the value to be returned if boolean is False
:return [string]:
"""
if boolean:
return y
return n
|
80ed95dba181779dbd8fa3d2536d6dd091100a8b
| 519,160
|
def get_base_worker_instance_name(experiment):
"""GCE will create instances for this group in the format
"w-|experiment|-$UNIQUE_ID". 'w' is short for "worker"."""
return 'w-' + experiment
|
81ecd22a33608e1c2aafb97905684311c6b33241
| 686,047
|
def _update_allow(allow_set, value):
"""
Updates the given set of "allow" values. The first time an update to the
set occurs, the value(s) are added. Thereafter, since all filters are
implicitly AND'd, the given values are intersected with the existing allow
set, which may remove values. At the end, it may even wind up empty.
Args:
allow_set: The allow set, or None
value: The value(s) to add (single value, or iterable of values)
Returns:
The updated allow set (not None)
"""
adding_seq = hasattr(value, "__iter__") and \
not isinstance(value, str)
if allow_set is None:
allow_set = set()
if adding_seq:
allow_set.update(value)
else:
allow_set.add(value)
else:
# strangely, the "&=" operator requires a set on the RHS
# whereas the method allows any iterable.
if adding_seq:
allow_set.intersection_update(value)
else:
allow_set.intersection_update({value})
return allow_set
|
f4535847f4190b590de36efd23827ef2df1baca3
| 413,375
|
from pathlib import Path
def add_suffix(name: str, suffix: str) -> str:
"""Add suffix to app."""
app = Path(name)
return f"{app.stem}{suffix}{app.suffix}"
|
af11fa342185daa96af4ac2c36511f6a6a16678e
| 540,111
|
def build_shorthand_dict(sh_list, deck):
"""creates a dictionary for conversion from shorthand to standard notation"""
sh_dict = {}
for i in range(52):
sh_dict[sh_list[i]] = deck[i]
return sh_dict
|
adbe31654fb9765e45cadf7122e15bfe5147989b
| 207,401
|
def _str2rng(rngstr, keeporder=True, rebin=None):
""" simple utility to convert a generic string representing a
compact list of scans to a sorted list of integers
Parameters
----------
rngstr : string with given syntax (see Example below)
keeporder : boolean [True], to keep the original order
keeporder=False turn into a sorted list
rebin : integer [None], force rebinning of the final range
Example
-------
> _str2rng('100, 7:9, 130:140:5, 14, 16:18:1')
> [7, 8, 9, 14, 16, 17, 18, 100, 130, 135, 140]
"""
_rng = []
for _r in rngstr.split(', '): #the space is important!
if (len(_r.split(',')) > 1):
raise NameError("Space after comma(s) is missing in '{0}'".format(_r))
_rsplit2 = _r.split(':')
if (len(_rsplit2) == 1):
_rng.append(_r)
elif (len(_rsplit2) == 2 or len(_rsplit2) == 3):
if len(_rsplit2) == 2 :
_rsplit2.append('1')
if (_rsplit2[0] == _rsplit2[1]):
raise NameError("Wrong range '{0}' in string '{1}'".format(_r, rngstr))
if (int(_rsplit2[0]) > int(_rsplit2[1])):
raise NameError("Wrong range '{0}' in string '{1}'".format(_r, rngstr))
_rng.extend(range(int(_rsplit2[0]), int(_rsplit2[1])+1, int(_rsplit2[2])))
else:
raise NameError('Too many colon in {0}'.format(_r))
#create the list and return it (removing the duplicates)
_rngout = [int(x) for x in _rng]
if rebin is not None:
try:
_rngout = _rngout[::int(rebin)]
except:
raise NameError("Wrong rebin={0}".format(int(rebin)))
def uniquify(seq):
# Order preserving uniquifier by Dave Kirby
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
if keeporder:
return uniquify(_rngout)
else:
return list(set(_rngout))
|
acd12f8ebed2fb37942b6835f636f962e461d8f5
| 354,173
|
def to_uid(atom):
"""Return a unique identifier for an Atom."""
return atom.id_string()
|
31549cf036f78ce2b73cb526e0857d9d5afdb49a
| 516,255
|
def plural(length, label, suffix='s'):
"""Return a label with an optional plural suffix"""
return '%d %s%s' % (length, label, suffix if length != 1 else '')
|
4387cf1a5162be4d96910931365b039718177510
| 228,934
|
import json
def is_json_compat(value):
"""
Check that the value is either a JSON decodable string or a dict
that can be encoded into a JSON.
Raises ValueError when validation fails.
"""
try:
value = json.loads(value)
except ValueError as e:
raise ValueError('JSON decoding error: ' + str(e))
except TypeError:
# Check that the value can be serialized back into json.
try:
json.dumps(value)
except TypeError as e:
raise ValueError(
'must be a JSON serializable object: ' + str(e))
if not isinstance(value, dict):
raise ValueError(
'must be specified as a JSON serializable dict or a '
'JSON deserializable string'
)
return True
|
405ca22c414f52bb50abc9e4cb76e5b0d5df753a
| 358,731
|
def teacher(i, K, categories, output_activations):
"""
Function that calculates the feedback in learning, which is supplied in the form of teacher values (equation 4b in
[Krus92]_)
Parameters
----------
i : int
Stimulus ID or stimulus number
K : int
Category number
categories : list
This is the list that indicates which stimulus belongs to which category. For example, out of 5 stimuli, if
stimuli #0, #2, #4 belongs to category 0 and the rest belongs to category 1, then
categories_Idx = [[0,2,4],[1,3]]
output_activations : list
This is the list containing the activations of each node in the output category layer
Returns
-------
float
Feedback of the model's performance to the current stimulus in the form of a value that is used in the learning
phase.
"""
num_categories = len(categories)
correct_category = 0
for k in range(num_categories):
if i in categories[k]:
correct_category = k
if correct_category == K:
return max(1, output_activations[K])
else:
return min(-1, output_activations[K])
|
b468b6015428190d8a9eb0998a25065e386dd722
| 472,653
|
def steps_recursive(n, steps = None, line = ""):
"""Return a list of "n" progression steps, using recursion."""
if steps is None:
steps = []
if len(steps) == n:
return steps
if len(line) == n:
steps.append(line)
return steps_recursive(n, steps)
line += "#" if len(line) <= len(steps) else " "
return steps_recursive(n, steps, line)
|
5a6b68c73b083d35a3ffebe7a046f0cde77df9d3
| 388,282
|
from typing import Union
from pathlib import Path
from typing import List
def get_related_files(
tested_configuration_file: Union[str, Path], suffix_filter: str
) -> List[Path]:
"""Return all the file related to a test conf file endind with a suffix."""
conf_path = Path(tested_configuration_file)
return [
p
for p in conf_path.parent.iterdir()
if str(p.stem).startswith(conf_path.stem) and str(p).endswith(suffix_filter)
]
|
e630d21ac72cda00dc1b25f9ee7e835bcc78cc76
| 370,092
|
import torch
def quaternion_to_axis_angle(quaternions):
"""
Convert rotations given as quaternions to axis/angle.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True)
half_angles = torch.atan2(norms, quaternions[..., :1])
angles = 2 * half_angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
return quaternions[..., 1:] / sin_half_angles_over_angles
|
acb770283d63e45c9a53b8ed04d7400200cefd60
| 222,764
|
def split(l, counts):
"""
>>> split("hello world", [])
['hello world']
>>> split("hello world", [1])
['h', 'ello world']
>>> split("hello world", [2])
['he', 'llo world']
>>> split("hello world", [2,3])
['he', 'llo', ' world']
>>> split("hello world", [2,3,0])
['he', 'llo', ' wo', 'rld']
>>> split("hello world", [2,-1,3])
['he', 'llo world']
"""
res = []
saved_count = len(l) # count to use when encoutering a zero
for count in counts:
if not l:
break
if count == -1:
break
if count == 0:
while l:
res.append(l[:saved_count])
l = l[saved_count:]
break
res.append(l[:count])
l = l[count:]
saved_count = count
if l:
res.append(l)
return res
|
2703b713569e8628021b142ff1bbef2bb66c283a
| 97,260
|
def _datetime(value):
"""Return datetime object."""
return value
|
c4b29cb1f4b719eb695f3ef54b55bb81b56a4abe
| 437,970
|
def remove_duplicate_char(input_string):
"""
returns an unordered string without duplicate characters
"""
return "".join(set(input_string))
|
34cb224afdc9d98d16a419eb2782b19217ddc011
| 200,351
|
from typing import OrderedDict
def combine(**kwargs):
"""Generate list of combinations based on keyword arguments.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = combine(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
return [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
|
92449d1fa7f0ca0666495487aec260243c9e1b7f
| 232,758
|
def strip_whitespace(tokens):
"""Remove whitespace at the beggining and end of a token list.
Whitespace tokens in-between other tokens in the list are preserved.
:param tokens:
A list of :class:`~.token_data.Token` or
:class:`~.token_data.ContainerToken`.
:return:
A new sub-sequence of the list.
"""
for i, token in enumerate(tokens):
if token.type != 'S':
break
else:
return [] # only whitespace
tokens = tokens[i:]
while tokens and tokens[-1].type == 'S':
tokens.pop()
return tokens
|
df5c9665cbfa48d80cb6cde87619d04bebe12435
| 215,629
|
import torch
def compute_cross_attention(x, y, sim):
"""Compute cross attention.
x_i attend to y_j:
a_{i->j} = exp(sim(x_i, y_j)) / sum_j exp(sim(x_i, y_j))
y_j attend to x_i:
a_{j->i} = exp(sim(x_i, y_j)) / sum_i exp(sim(x_i, y_j))
attention_x = sum_j a_{i->j} y_j
attention_y = sum_i a_{j->i} x_i
Args:
x: NxD float tensor.
y: MxD float tensor.
sim: a (x, y) -> similarity function.
Returns:
attention_x: NxD float tensor.
attention_y: NxD float tensor.
"""
a = sim(x, y)
a_x = torch.softmax(a, dim=1) # i->j
a_y = torch.softmax(a, dim=0) # j->i
attention_x = torch.mm(a_x, y)
attention_y = torch.mm(torch.transpose(a_y, 1, 0), x)
return attention_x, attention_y
|
1f51664ead660e1af882b5434ce3671c8b930829
| 249,403
|
def update_sequence(s, n, x):
"""Return a tuple copy of s with the nth element replaced by x."""
t = tuple(s)
if -len(t) <= n < len(t):
return t[0:n] + (x,) + t[n + 1 : 0 if n == -1 else None]
else:
raise IndexError("sequence index out of range")
|
fa0ac3308e2a73ec2bd6bbe39a4a689b7d68c65e
| 461,139
|
import re
def get_skipped_reason(data):
"""Return test case skip reason from report string.
Args:
data(str): test case report
Returns:
str: skip reason or None
"""
try:
reason_rules = re.compile("Skipped:(.*?)..$")
return ('\n'.join(reason_rules.findall(data))).strip()
except TypeError:
return None
|
fc20f07b80477ca3252adaee2f5e8269ffda74ca
| 677,157
|
def _bom_free_version(encoding: str) -> str:
""" Given an utf encoding, returns a BOM-free version of it (little-endian version) """
def utf_aliases(num):
return {fmt % num for fmt in ['u%d', 'utf%d', 'utf-%d', 'utf_%d']}
encoding = encoding.lower()
if encoding in utf_aliases(16):
return 'utf_16_le'
if encoding in utf_aliases(32):
return 'utf_32_le'
return encoding
|
f3a7392b61a3dde1b1b33a6af067edf528471036
| 176,206
|
def in_cell_neighborhood(tau,cycle,cycle_eq):
"""
Returns True if cycle_eq_val lies in the neighborhood of loop characteristic
cell tau projected onto the cycle directions and False otherwise.
:param tau: Cell object
:param cycle: tuple defining a cycle. The cycle is defined by cycle[0]->cycle[1] ->...->cycle[n]->cycle[0]
:param cycle_eq: tuple of the form (eq_val,stable)
:return: bool
"""
eq_val = cycle_eq[0]
for j in range(len(cycle)):
if j < len(cycle) - 1:
jplus1 = j + 1
else:
jplus1 = 0
left = tau.theta_rho_minus(cycle[j])
right = tau.theta_rho_plus(cycle[j])
if eq_val[j] < left or eq_val[j] > right:
return False
return True
|
40cf7145ec6f6433881c341e3587ac080329191c
| 389,193
|
def shaveLF(inLF, border=(3, 3)):
"""
Shave the input light field in terms of a given border.
:param inLF: input light field of size: [H, W, S, T, C]
:param border: border values
:return: shaved light field
"""
h_border, w_border = border
if (h_border != 0) and (w_border != 0):
shavedLF = inLF[h_border:-h_border, w_border:-w_border, ...]
elif (h_border != 0) and (w_border == 0):
shavedLF = inLF[h_border:-h_border, :, ...]
elif (h_border == 0) and (w_border != 0):
shavedLF = inLF[:, w_border:-w_border, ...]
else:
shavedLF = inLF
return shavedLF
|
55e4e763c2f11ce81852884c0af7f25fa07422a2
| 481,874
|
def one_hot(length, current):
"""
Standard one hot encoding.
>>> one_hot(length=3,current=1)
[0, 1, 0]
"""
assert length > current
assert current > -1
code = [0] * length
code[current] = 1
return code
|
e0a9867decc6809335a6ac8760364aebdbc49a84
| 309,121
|
def subst_string(s,j,ch):
""" substitutes string 'ch' for jth element in string """
res = ''
ls = list(s)
for i in range(len(s)):
if i == j:
res = res + ch
else:
res = res + ls[i]
return res
|
049be9c19e090ecfdfc1a01c71f3b06deaeb74b6
| 210,838
|
def V(x):
"""
potential energy function
use units such that m = 1 and omega_0 = 1
"""
return 0.5 * pow(x, 2.0)
|
1969edd5447657096353eda96cb281da68383e8f
| 670,635
|
def run_mean(arr, n=10, dim="time", center=True, **kwargs):
"""Simple running average along a dimension."""
return arr.rolling(**{dim: n}, center=center, **kwargs).mean().dropna(dim)
|
c7b2ef778ec2b51d17069b6cf96b93189f4fa279
| 346,314
|
def _double_quotes(unquoted):
"""
Display String like redis-cli.
escape inner double quotes.
add outter double quotes.
:param unquoted: list, or str
"""
if isinstance(unquoted, str):
# escape double quote
escaped = unquoted.replace('"', '\\"')
return f'"{escaped}"' # add outter double quotes
elif isinstance(unquoted, list):
return [_double_quotes(item) for item in unquoted]
|
3475f7290a1f94c6b92f260d415e397d77cd7a69
| 269,151
|
def _non_framed_body_length(header, plaintext_length):
"""Calculates the length of a non-framed message body, given a complete header.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:param int plaintext_length: Length of plaintext in bytes
:rtype: int
"""
body_length = header.algorithm.iv_len # IV
body_length += 8 # Encrypted Content Length
body_length += plaintext_length # Encrypted Content
body_length += header.algorithm.auth_len # Authentication Tag
return body_length
|
f7126c65152272a7a6c822eb3102861f97450b17
| 461,801
|
def get_cate(model, dataset, k=10):
"""Compute the CATE for k-fold validation
Arguments
---------
dataset: BaseDataSet
Dataset to train and validate on.
model: BaseModel
Model to train and validate with.
k: int
Number of folds
Returns
-------
results: list[tuple]
A set of results for each of the folds, where one result consists of
a list of cate estimates and a list of indices to which the results
belong, which together form a tuple.
"""
results = []
for train_data, test_data in dataset.kfolds(k=k):
model.train(model.preprocess(train_data.standard_df))
cur_cate = model.predict_cate(test_data.standard_df)
results.append((cur_cate, test_data.idx))
return results
|
99bfa1b5f895e2b484bcfcce3646cd4cdb94cfd9
| 291,885
|
def derenv_square(t, t_pulse, t_start=0, **not_needed_kwargs):
"""
Time derivative of the envelope for a square pulse.
Trivial, for consistency with other derivative functions only.
See Also
--------
envelope_square
"""
return 0
|
86a05e730a4dd7de119c8520d6db34668ab82797
| 301,635
|
def split(slicable, fraction):
"""
splits data into test-train set or dev-validation set; does not shuffle.
params: slicable - an object that responds to len() and [], works on dataframes
fraction - a value between 0 and 1
returns: (x, y) - where x has (1-fraction) percent entries and y has the rest
"""
partition = int(len(slicable) * (1.0 - fraction));
return( (slicable[:partition], slicable[partition:]) );
|
0ce9981b58c1a5f3958e3ab31bf4d61247f74168
| 317,200
|
def format_error_report(shift_errors):
"""
Format the error report for Slack
"""
error_report = ''
for shift in shift_errors:
error_report += 'Shift: {}\n'.format(shift['shift']['start_dt'])
for error in shift['errors']:
error_report += ' {}\n'.format(error)
for warning in shift['warnings']:
error_report += ' {}\n'.format(warning)
error_report += '\n'
return error_report
|
8b2684b748165b78791937521cd05f588a2dffb5
| 320,385
|
import re
def replace_spaces(some_string):
""" Substitute spaces with underscores"""
return re.sub(" ", "_", some_string)
|
23f6bb9079ec92afc97f1da1aa0c0eec8af512c8
| 665,548
|
def _filter_data_by_labels(X, y, label_filters):
"""
Filter out features and labels based on label
:param X: Features
:param y: Labels
:param label_filters: Labels to use
:return: Features and labels that have been a label in label_filters
"""
assert len(y) == len(X)
x_ys = zip(X, y)
return zip(*[x_y for x_y in x_ys if x_y[1] in label_filters])
|
732d9e643fcf97d33b64823b2f8bcbdab5e4d581
| 591,191
|
def fizz_buzz(number):
"""Returns 'Fizz' if a number is divisible by 3, 'Buzz' if a number is divisible by 5
'FizzBuzz' if a number is divisible by both 3 and 5
otherwise it returns the number itself
"""
if number % 3 == 0 and number % 5 == 0:
return "FizzBuzz";
elif number % 3 == 0:
return "Fizz";
elif number % 5 == 0:
return "Buzz";
else:
return number;
|
d6ec257c38dbb841d13be1917cec1da2a6bd627f
| 592,078
|
def get_string_after_n_space(text, n):
"""
Method to return string after the nth space
Input --> 'test1 test2 test3', 1
Output --> test2 test3
Input --> 'test1 test2 test3, 2
Output --> test3
:param text
:param n:
:return: string after nth space
"""
return text.split(' ', n)[-1].strip()
|
99abae9dc2f24d1b26999d6e401b7fc56ec584ad
| 42,579
|
import functools
def prepost_method(method_name, *method_args, **method_kwargs):
"""Decorators that call an instance method before and after another."""
def prepost_method_decorator(instance_method):
"""Call an instance method before and after the decorated method."""
@functools.wraps(instance_method)
def wrapper(self, *args, **kwargs):
getattr(self, method_name)(*method_args, **method_kwargs)
output = instance_method(self, *args, **kwargs)
getattr(self, method_name)(*method_args, **method_kwargs)
return output
return wrapper
return prepost_method_decorator
|
5dc541f80eb9f4150e07763c83683c4979585020
| 142,980
|
def get_absolute_uri_no_query(request):
"""
Returns the absolute URI of the request with no query parameters.
:param request: The request
:return: The URI of the request page
"""
return request.build_absolute_uri(request.path)
|
59fb6554f971924c3ed3e4013d06cc993abd3f06
| 413,989
|
def cree_matrice(taille_matrice,defaut=0):
"""
Fonction qui retourne une matrice de taille taille_matrice[0]×taille_matrice[1] avec une valeur par defaut
"""
matrice=[]
for k in range(taille_matrice[0]):
matrice.append([])
for _ in range(taille_matrice[1]):
matrice[k].append(defaut)
return matrice
|
b92365838f563d2a1b6adbb0ab873ee54f5b66fb
| 305,606
|
import json
def load_result(json_file):
"""Load contents of a json file."""
with open(json_file, "rb") as f:
return json.load(f)
|
c281a61d78a47befd38079e4696e3add77d7e289
| 488,250
|
def rgb2rgba(rgb):
"""Take a row of RGB bytes, and convert to a row of RGBA bytes."""
rgba = []
for i in range(0, len(rgb), 3):
rgba += rgb[i:i+3]
rgba.append(255)
return rgba
|
5a8107649a2442fb3a13e7e167a5cecc0ee57e29
| 136,006
|
import struct
def floating_point(value):
"""Encode a floating point value.
:param float value: Value to encode
:rtype: bytes
"""
if not isinstance(value, float):
raise TypeError('float type required')
return struct.pack('>f', value)
|
188885c4c1f6d5e80b168d64cfada56debaee365
| 342,308
|
from typing import List
def check_if_duplicates(listOfElems:List[str]) -> bool:
"""Checks if list has any duplicates
Args:
listOfElems (List[str]): List of strings
Returns:
bool: True = duplicates present, False = no duplicates
"""
if len(listOfElems) == len(set(listOfElems)):
return False
else:
return True
|
bfc4a6aa438296c6f65589ad557e5869cb585746
| 301,104
|
def proportion_common_words(topics, topk=10):
"""
compute proportion of unique words
Parameters
----------
topics: a list of lists of words
topk: top k words on which the topic diversity will be computed
Returns
-------
pcw : proportion of common words
"""
if topk > len(topics[0]):
raise Exception('Words in topics are less than '+str(topk))
else:
unique_words = set()
for topic in topics:
unique_words = unique_words.union(set(topic[:topk]))
puw = 1 - (len(unique_words) / (topk * len(topics)))
return puw
|
ea93dfcd14ed963855184c9b0356c01a359b31d6
| 537,130
|
from typing import Tuple
def split_namespace(s: str) -> Tuple[str, str]:
""" Splits a namespace and name into their parts
Parameters
----------
s: str
string to be split
Returns
-------
(tuple)
namespace: str
name: str
"""
split = s.split('.')
name = split.pop()
namespace = '.'.join(split)
return (namespace, name)
|
118f48dfa3ef560c766efa81bf042762245a6218
| 641,890
|
def rate(hit, num):
"""Return the fraction of `hit`/`num`, as a string."""
if num == 0:
return "1"
else:
return "%.4g" % (float(hit) / num)
|
d3a502fe286257971fbc1d00605b6c65659c5745
| 587,057
|
def bind_to_parent_array(t, a):
"""Binds tree to array a, returning result in list.
Takes as input tree t with _leaf_index set.
Returns list of (node_row, parent_row such that node_row points to the
row of a that corresponds to the current row, and parent_row points to
the row of the parent.
Order will be preorder traversal, i.e. for propagating attributes from
the root to the tip.
Typical usage of this function is to set up an array structure for many
preorder traversals on the same tree, especially where you plan to change
the data between traversals.
"""
result = []
for n in t.traverse(self_before=True, self_after=False):
if n is not t:
result.append([a[n._leaf_index], a[n.Parent._leaf_index]])
return result
|
7b689a5115e63b9244e788ec923269e9b64bb742
| 357,879
|
def num2coords(i, gridwidth=10):
"""
Coordinates of variable i in a Gibbs grid
Parameters
----------
i : int
Position of variable in ordering
gridwidth : int
Width of the grid
Returns
-------
length_coord, width_coord (coordinates)
"""
length_coord = i % gridwidth
width_coord = i // gridwidth
return int(length_coord), int(width_coord)
|
5c245cd8f041f60d46aadae96d3a9b313162b21e
| 325,572
|
from typing import Union
import ast
def is_relative_import(node: Union[ast.Import, ast.ImportFrom]) -> bool:
"""Return `True` if the given import is a relative import."""
if not isinstance(node, ast.ImportFrom):
return False
return node.level != 0
|
d449485e664080e069702f6ee3183afc6e632391
| 359,749
|
def screenRegion(gfx, region=(0.0, 0.0, 1.0, 1.0)):
"""(gfx, 4-tuple of floats) -> (4-tuple of ints)
Determine the absolute coordinates of a screen region from its
relative coordinates (coordinates from 0.0 to 1.0)
"""
w, h = gfx.getSize()
x1 = (w - 1) * region[0]
y1 = (h - 1) * region[1]
x2 = (w - 1) * region[2]
y2 = (h - 1) * region[3]
if type(w) == type(1):
x1 = int(x1 + 0.5)
y1 = int(y1 + 0.5)
x2 = int(x2 + 0.5)
y2 = int(y2 + 0.5)
return (x1, y1, x2, y2)
|
35ef5e208bc1cd6279adaf2fef6b7dfc74830dfe
| 41,119
|
def show_notice(notice):
"""Shows a notice given by the string 'notice'"""
return { 'notice': notice }
|
3baea9757a19c21a1f757c504af9c99c6c0c7353
| 341,461
|
def quote(string):
""" string -> 'string' """
return "'" + str(string) + "'"
|
69aed8494786db1ee012b3f9af4db7827df0c70f
| 519,245
|
def old(sym):
""" Return the "old" version of symbol "sym", that is, the one
representing "sym" in the pre_state.
"""
return sym.prefix('old_')
|
4cc24bd0448195ee1c373106679177ce428e5937
| 699,139
|
def tostring(barray):
"""Convert a byte array to string in utf-8 noop if already a string."""
if isinstance(barray, str):
return barray
if isinstance(barray, bytes):
# return str(barray, 'utf-8', errors='replace') # Python 3
return barray.decode(encoding="utf=8", errors="replace")
|
91b45a715184653499ae5054d6e9d1e1a8563d85
| 609,965
|
def ideal_buy_and_sell_days(arr):
"""
given an array representing stock prices on a daily basis,
determine the best days to buy and sell
:param arr: an array representing a series of stock prices
:return: a tuple containing the index of an ideal buy day, and the index of an ideal selling day
"""
# the first element is always the lowest price at the start
trailing_low_index = 0
best_buy_index = 0
best_profit = 0
best_sell_index = None
for i in range(len(arr)):
# found a new low price, obviously not a buying day
if arr[i] < arr[trailing_low_index]:
trailing_low_index = i
continue
# never sell at a loss
if arr[i] < arr[trailing_low_index]:
continue
# new best trading days found
test_profit = arr[i] - arr[trailing_low_index]
if test_profit > best_profit:
best_buy_index = trailing_low_index
best_sell_index = i
best_profit = test_profit
return [best_buy_index, best_sell_index]
|
2fb40d5016ca3473682ad9ba2d5943c7de561869
| 279,224
|
import json
def format_charts(data):
"""Form chart POST data for JSON usage within db.
Args:
data (dict): The request.form data to format.
Returns:
modules (list): A list of json-decoded dictionaries.
"""
modules = []
for item in data:
if item.startswith('module_'):
val_json = json.loads(data[item])
modules.append(val_json)
return modules
|
79f7bc9430d292fcb071f29d8d34e9988a26b467
| 556,161
|
def align_origin(origin, w, h, align=('left','top')):
"""Calculates size of text box and returns an origin as if aligned
from the given origin.
Accepted alignments are:
left, center, right
top, center, bottom"""
if align==('left','top'):
return origin
aligns = {'left': 0, 'center': .5, 'right': 1, 'top': 0, 'bottom': 1}
ox, oy = origin
x_align, y_align = align
return (ox - (w * aligns[x_align]), oy + (h * aligns[y_align]))
|
2fbc673a309c73177251107016e7e2c8a42f469e
| 634,815
|
def get_de_genes_to_plot(markers_df, lfc_thresh=1, padj_thresh=0.1, n_to_plot=5):
"""
Top DiffExp Genes.
Return as dict for easy plotting with sc.pl.dotplot.
"""
markers_df = markers_df[
(markers_df['logfoldchanges']>=lfc_thresh) &
(markers_df['pvals_adj']<=padj_thresh)
].groupby("group").head(n_to_plot)
return markers_df.groupby("group").agg(list)['names'].to_dict()
|
3f69a0e6560d50132d37b431caefa8af2672ba0a
| 235,893
|
def scaleFromRedshift(redshift):
"""
Converts a redshift to a scale factor.
:param redshift: redshift of the object
:type redshift: float or ndarray
:return: scale factor
:rtype: float or ndarray
"""
return 1. / (redshift + 1.)
|
99501af0f5a2005ed952e8d01ad7533d02bfdf22
| 645,961
|
def remove_the_last_person(queue):
"""Remove the person in the last index from the queue and return their name.
:param queue: list - names in the queue.
:return: str - name that has been removed from the end of the queue.
"""
return queue.pop()
|
a40b2ca7232d4807073df0d9cb891aa13daa9f63
| 561,519
|
def dss_is_geo(dss_schema):
"""
Check if the input dataset contains a geopoint (DSS storage type) column
If so, specific processing will be applied later on
:param dss_schema: schema of a dss dataset
>>> dss_schema = {"columns": [{"name": "customer_id", "type": "bigint"}]}
:return Boolean{input dataset contains at least one geopoint column}
"""
for column in dss_schema['columns']:
if column['type'] == 'geopoint':
return True
return False
|
ae90babcbba8f7815485ce78ea6e97e531249057
| 614,257
|
import math
def dist(cosv,sinv,cordv):
"""Intermidiate scalar value for calculating sinus and cosinus of vectors
Parameter
---------
cosv, sinv : The vectors returned from pairCS(num)
cordv : Vector to operate on. All inputs must have same length
and is expected to be "flat" (horisontal) numpy ndarrays
Returns
-------
Scalar value to assist in other calculations"""
return math.sqrt((cosv @ cordv.reshape(-1,1))**2+(sinv @ cordv.reshape(-1,1))**2)
|
1f7dba5df1806fd184f6b5517a0da2b713044663
| 444,392
|
def create_description(name, args=None):
"""Create description from name and args.
Parameters
----------
name : str
Name of the experiment.
args : None or argparse.Namespace
Information of the experiment.
Returns
-------
description : str
Entire description of the experiment.
"""
description = f"experiment_name: {name} \n"
if args is not None:
for k, v in vars(args).items():
description += f"{k:<32}: {v} \n"
return description
|
968526c3f08cae2891358b92684b9cbf7e36866c
| 502,267
|
def crop_img_arr(img_arr, bbox):
"""Crop bounding box from image.
Parameters
----------
img_arr
Image in array format
bbox
Coordinates of bounding box to crop
Returns
-------
img_arr
Cropped image
"""
return img_arr[bbox[0] : bbox[1], bbox[2] : bbox[3], :]
|
7c0b3b2e894b4e43d24e196f5d11eba6da4a83b5
| 70,373
|
def match_with_batchsize(lim, batchsize):
"""
Function used by modify_datasets below to match return the integer closest to lim
which is multiple of batchsize, i.e., lim%batchsize=0.
"""
if lim % batchsize == 0:
return lim
else:
return lim - lim % batchsize
|
c37226946c51144df6192adeaf265326ee3bb701
| 17,376
|
import decimal
def create_decimal(x, digits, rounding=decimal.ROUND_HALF_UP):
"""Create `Decimal` object from `float` with desired significant figures.
Parameters
----------
x : float
Value to convert to decimal.
digits : int
Number of signficant figures to keep in `x`, must be >= 1.
rounding : str
Rounding mode, must be one of the rounding modes accepted as in
`decimal.Context.rounding`.
Returns
-------
y : Decimal
Conversion of `x` to `Decimal`.
"""
assert digits >= 1 # Makes not sense otherwise
with decimal.localcontext() as ctx:
ctx.prec = digits
ctx.rounding = rounding
y = +decimal.Decimal(x)
return y
|
dcc2bb5ecffa4678daab2e0569652a90a86bca9d
| 180,973
|
def clean_row(row):
"""Helper method that cleans whitespaces and newlines in text returned from BeautifulSoup
Parameters
----------
row
text returned from BeautifulSoup find method
Returns
-------
list of elements
"""
return [r for r in row.text.strip().split("\n") if r not in ["", " "]]
|
029d1164ff67b0aca05f915f05638fbf13dcd406
| 209,895
|
from pathlib import Path
def get_immediate_directories(directory_path):
"""Gets the immediate sub-directories within a directory
Args:
directory_path (str): path to the directory
Returns:
list(str): list of sub-directories
"""
p = Path(directory_path)
return [f for f in p.iterdir() if f.is_dir() and not f.name.startswith(".")]
|
8aabf6279367733bc6b25aec2cf69fb2e8f1c0a0
| 670,595
|
from typing import List
from typing import Dict
from typing import Any
from typing import Tuple
from typing import MutableMapping
def _kwargs_from_call(param_names: List[str], kwdefaults: Dict[str, Any], args: Tuple[Any, ...],
kwargs: Dict[str, Any]) -> MutableMapping[str, Any]:
"""
Inspect the input values received at the wrapper for the actual function call.
:param param_names: parameter (*i.e.* argument) names of the original (decorated) function
:param kwdefaults: default argument values of the original function
:param args: arguments supplied to the call
:param kwargs: keyword arguments supplied to the call
:return: resolved arguments as they would be passed to the function
"""
# pylint: disable=too-many-arguments
resolved_kwargs = dict() # type: MutableMapping[str, Any]
# Set the default argument values as condition parameters.
for param_name, param_value in kwdefaults.items():
resolved_kwargs[param_name] = param_value
# Override the defaults with the values actually supplied to the function.
for i, func_arg in enumerate(args):
if i < len(param_names):
resolved_kwargs[param_names[i]] = func_arg
else:
# Silently ignore call arguments that were not specified in the function.
# This way we let the underlying decorated function raise the exception
# instead of frankensteining the exception here.
pass
for key, val in kwargs.items():
resolved_kwargs[key] = val
return resolved_kwargs
|
996714eaa88a88bb52d2a45d82195f9a7e9b1755
| 103,851
|
def output_component(graph, edge_stack, u, v):
"""Helper function to pop edges off the stack and produce a list of them."""
edge_list = []
while len(edge_stack) > 0:
edge_id = edge_stack.popleft()
edge_list.append(edge_id)
edge = graph.get_edge(edge_id)
tpl_a = (u, v)
tpl_b = (v, u)
if tpl_a == edge['vertices'] or tpl_b == edge['vertices']:
break
return edge_list
|
05aa5eed7025d2f77925eacdacbc9275f72ec87a
| 286,290
|
from datetime import datetime
def datetime_to_year(dt: datetime) -> float:
"""
Convert a DateTime instance to decimal year
For example, 1/7/2010 would be approximately 2010.5
:param dt: The datetime instance to convert
:return: Equivalent decimal year
"""
# By Luke Davis from https://stackoverflow.com/a/42424261
year_part = dt - datetime(year=dt.year, month=1, day=1)
year_length = datetime(year=dt.year + 1, month=1, day=1) - datetime(year=dt.year, month=1, day=1)
return dt.year + year_part / year_length
|
5f4ae29d57d13a344e70016ab59dbc0a619db4d8
| 701,802
|
def try_with_lazy_context(error_context, f, *args, **kwargs):
"""
Call an arbitrary function with arbitrary args / kwargs, wrapping
in an exception handler that attaches a prefix to the exception
message and then raises (the original stack trace is preserved).
The `error_context` argument should be a lambda taking no
arguments and returning a message which gets prepended to
any errors.
"""
try:
return f(*args, **kwargs)
except Exception as e:
msg = error_context()
e.args = tuple(
["%s:\n%s" % (msg, e.args[0])] + [a for a in e.args[1:]]
)
raise
|
7e2a4cfee7b4acf5a449b4b07f8c56baca5a63d3
| 701,165
|
def remove_empty_entries(dicts):
"""Drop keys from dicts in a list of dicts if key is falsey"""
reduced = []
for d in dicts:
new_d = {}
for key in d:
if d[key]:
new_d[key] = d[key]
reduced.append(new_d)
return reduced
|
526e12b188f16cc93440c59e27d81775bfcaf80e
| 450,211
|
def grompp_em(job):
"""Run GROMACS grompp for the energy minimization step."""
em_mdp_path = "em.mdp"
msg = f"gmx grompp -f {em_mdp_path} -o em.tpr -c init.gro -p init.top --maxwarn 1"
return msg
|
be611c0e5fdac9a03596f6796c4b7bcae45ec06e
| 405,433
|
def get_offer_title(html_parser):
"""
This method returns the offer title.
:param html_parser: a BeautifulSoup object
:rtype: string
:return: The offer title
"""
title = html_parser.find("meta", attrs={"property": "og:title"})["content"]
return title
|
c3ed4327b40449b0200f803e074b6c90f0b26f17
| 450,506
|
def read_file(path):
"""Read file."""
with open(path) as _file:
return _file.read()
|
bed1e255478c6d43d84240e1c1969aa3c1bc21f3
| 27,813
|
import string
def makeValidMapKey(name):
"""Turns the given string into a valid key for use as a colour map
or lookup table identifier.
"""
valid = string.ascii_lowercase + string.digits + '_-'
key = name.lower().replace(' ', '_')
key = ''.join([c for c in key if c in valid])
return key
|
2cef3b5def09a076c69ea2982b16ee5bed490fdd
| 153,364
|
def copresence(acc, w1, w2):
"""Results 1 if a pair of figures is on stage at the same time, and 0
otherwise."""
return int(acc + w1 + w2 > 0)
|
3056b25df4a59bc421a3aec3d33e25db8ccb98bd
| 700,068
|
def simulate_one(chain, state={}, config={}):
"""
Simulate a chain once
Args:
- chain: an iterable with functions of type
f(state, log) -> new_state
- state (default: {}): initial state to apply chain to
- config (default: {}): pass configuration parameters in a dictionary
Returns:
Dictionary with the log
"""
log = {}
for link in chain:
# might want to just update in place;
# not return new_state but overwrite state
state = link(state, config, log)
return log
|
c9b1254e383708d3c2e6f7b701265e6df45acc78
| 285,914
|
def initial(array):
"""Return all but the last element of `array`.
Args:
array (list): List to process.
Returns:
list: Initial part of `array`.
Example:
>>> initial([1, 2, 3, 4])
[1, 2, 3]
.. versionadded:: 1.0.0
"""
return array[:-1]
|
fc579be391ffdf5c3445b5f4420a0c28cb572cad
| 77,807
|
import asyncio
def default_loop(loop):
"""
Return the specified loop or the default.
"""
if not loop:
return asyncio.get_event_loop()
return loop
|
1e1600e2d66ba97af12eb7fce43733e82446d6eb
| 338,834
|
def unnp(dicary):
"""Return `dicary` with any ndarray values replaced by lists."""
ndicary = {}
for k, v in dicary.items():
try:
v.shape
except AttributeError:
ndicary[k] = v
else:
ndicary[k] = v.tolist()
return ndicary
|
88d43d36de93883d9332d78c02880eeb344d6c70
| 69,902
|
def read_metadata_fei(filename):
"""Read the metadata from a TIFF produced by an FEI electron microscope.
This metadata is included as ASCII text at the end of the file.
Parameters
----------
filename : str
The input filename.
Returns
-------
metadata : dict
Dictionary of metadata.
"""
md = {'root': {}}
current_tag = 'root'
reading_metadata = False
with open(filename, 'rb') as fin:
for line in fin:
if not reading_metadata:
if not line.startswith(b'Date='):
continue
else:
reading_metadata = True
line = line.rstrip().decode()
if line.startswith('['):
current_tag = line.lstrip('[').rstrip(']')
md[current_tag] = {}
else:
if line and line != '\x00': # ignore blank lines
key, val = line.split('=')
md[current_tag][key] = val
if not md['root'] and len(md) == 1:
raise ValueError('Input file %s contains no FEI metadata.' % filename)
return md
|
8763a931e86602580b7acb854882941c744be7a3
| 227,600
|
def snitch_last_contained(metadata):
"""Return the frame when snitch was last contained."""
last_contain = 0
for _, movements in metadata['movements'].items():
contain_start = False
for movement in movements:
if movement[0] == '_contain' and movement[1] == 'Spl_0':
# Should not be containing anything already
contain_start = True
elif contain_start and movement[0] == '_pick_place':
last_contain = max(movement[-2], last_contain)
contain_start = False
if contain_start:
# It never ended
last_contain = 300 # Max frames
return last_contain
|
139719b3c4cd38aeab01e52078547da7501161cf
| 678,831
|
import hashlib
def get_hash(filename):
"""Calculate SHA256 checksum for file"""
buffer_size = 65536
sha256 = hashlib.sha256()
with open(filename, "rb") as file:
while True:
chunk = file.read(buffer_size)
if not chunk:
break
sha256.update(chunk)
return sha256.hexdigest()
|
737a0e528eb5407cfb82932562a6dbcb81d76cad
| 436,182
|
import pickle
def read_pickle(file_name=None):
"""
Read a pickle file and return the dictionary
:param file_name:
:return: dictionary
"""
if file_name is not None:
loaded_file = open(file_name, "rb")
features = pickle.load(loaded_file, encoding="UTF8")
return features
return None
|
adc5bb46f6c9a500a68ee651cdc3d47479ef4914
| 589,090
|
def _normalize_handler_method(method):
"""Transforms an HTTP method into a valid Python identifier."""
return method.lower().replace("-", "_")
|
aad23dba304ba39708e4415de40019479ccf0195
| 709,205
|
def txtToDict(txtfile):
"""Read vertices from a text file of (Lon, Lat) coords.
Input file represents a single polygon with a single line list of comma-separated
vertices, e.g.: "<lon>, <lat>, <lon>, <lat>, ...".
"""
with open(txtfile) as f:
polygon = f.readline()
if not polygon:
return None
vertices = [float(val) for val in polygon.strip().split(",")]
d = {}
d["Lon"] = vertices[::2]
d["Lat"] = vertices[1::2]
if len(d["Lon"]) != len(d["Lat"]):
raise RuntimeError("Invalid input.")
return d
|
33351ac5d0e4f6e14ac65036d2bbc43291d67ab2
| 315,426
|
from typing import Optional
from typing import Any
def null_str(value: Optional[Any]) -> str:
"""
Return NULL if the value is None, otherwise str(value).
"""
if value is None:
return "NULL"
return str(value)
|
8bd4e30bfdb17ca5e637769bcd5f72ac10647a7e
| 368,847
|
def may_view_public_identity(app, identity, model, permission):
""" Even anonymous may view the public metadata of the instance. """
return True
|
296ad3584ca6768ec8944044673e4afb5ed54f0f
| 217,014
|
import re
def is_heading(this_line):
"""Determine whether a given line is a section header
that describes subsequent lines of a report.
"""
cattle_clue = '(BRED?|COW?|BRED?|HEIFER?|BULL?|HEIFERETTE?|PAIRS?|CALF?|)'
has_cattle = re.search(cattle_clue, str(this_line), re.IGNORECASE)
is_succinct = len(this_line) < 3
return bool(has_cattle and is_succinct)
|
24a306a29706c038fe314cefa2434807fbe3fe99
| 537,484
|
def cli(ctx, common_name=""):
"""Get all organisms
Output:
Organism information
"""
return ctx.gi.organisms.get_organisms(common_name=common_name)
|
f3d4dc637f779289960a33e31644f4aeb4af9e4a
| 281,054
|
def func_tower_weight_d2h(
diameter: float, height: float, coeff_a: float, coeff_b: float
) -> float:
"""
Returns tower mass, in kg, based on tower diameter and height.
:param diameter: tower diameter (m)
:param height: tower height (m)
:param coeff_a: coefficient
:param coeff_b: coefficient
:return: tower mass (in kg)
"""
tower_mass = coeff_a * diameter ** 2 * height + coeff_b
return 1e3 * tower_mass
|
6f185eea95a808e24b8cb3250acb14d6d1ba2096
| 498,570
|
def remove_low_std(X,std_val=0.01):
"""
Remove features below this standard deviation
Parameters
----------
X : pd.DataFrame
analytes with their features (i.e. molecular descriptors)
std_val : float
float value to cut-off removing features
Returns
-------
list
list with features to remove
"""
rem_f = []
std_dist = X.std(axis=0)
rem_f.extend(list(std_dist.index[std_dist<std_val]))
return(rem_f)
|
7a8216ba72d26171ca54851da40a2ee4064b6dc6
| 370,143
|
def run_bq_query(client, query, timeout):
""" Returns the results of a BigQuery query
Args:
client: BigQuery-Python bigquery client
query: String query
timeout: Query timeout time in seconds
Returns:
List of dicts, one per record;
dict keys are table field names and values are entries
"""
job_id, _results = client.query(query, timeout=timeout)
complete, row_count = client.check_job(job_id)
if complete:
results = client.get_query_rows(job_id)
print('Got %s records' %row_count)
else:
raise RuntimeError('Query not complete')
return(results)
|
1336b884b32d15e7bcb5b97ef8b2b6922d775e77
| 690,533
|
def _manage_strictness(col, eia860_ytd):
"""
Manage the strictness level for each column.
Args:
col (str): name of column
eia860_ytd (boolean): if True, the etl run is attempting to include
year-to-date updated from EIA 860M.
"""
strictness_default = .7
# the longitude column is very different in the ytd 860M data (it appears
# to have an additional decimal point) bc it shows up in the generator
# table but it is a plant level data point, it mucks up the consistency
strictness_cols = {
'plant_name_eia': 0,
'utility_name_eia': 0,
'longitude': 0 if eia860_ytd else .7
}
return strictness_cols.get(col, strictness_default)
|
3894f8f35fc18b6326abcdeb1caa988627099d79
| 508,488
|
def reverse_order_str(order_str):
"""Given some ordering, possibly already negative, reverse it."""
# Historical note on why we abstract orderings as strings:
# NDB makes it really hard to reverse an arbitrary ordering.
# Specifically, Property.__neg__() returns a PropertyOrder, not
# a Property, so you can't do Property.__neg__().__neg__()
# This gets around that by reversing the string form.
if order_str.startswith('-'):
return order_str[1:]
else:
return '-' + order_str
|
337d2288414f4580a7ad2b74bb60ca30a30dda1b
| 139,773
|
def is_strictly_increasing(L):
""" Returns True if the list contains strictly increasing values.
Examples:
is_strictly_increasing([0,1,2,3,4,5]) > True
is_strictly_increasing([0,1,2,2,4,5]) > False
is_strictly_increasing([0,1,2,1,4,5]) > False
"""
for x, y in zip(L, L[1:]):
if x > y: return False
return True
|
8af27b7af9f7942d622f2c73054e9e12a1f734ec
| 592,471
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.