content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def _core_dist(point, neighbors, dist, n_features):
"""
Computes the core distance of a point.
Core distance is the inverse density of an object.
Args:
point (int): number of the point in the dataset
neighbors (np.ndarray): array of dimensions (n_neighbors, 1):
array of all other points indexes in object class
dist (np.ndarray): array of dimensions (n, n):
precalculated distances between all points
Returns: core_dist (float)
inverse density of point
"""
n_neighbors = len(neighbors)
distance_vector = dist[point][neighbors]
distance_vector = distance_vector[distance_vector != 0]
numerator = ((1/distance_vector)**n_features).sum()
core_dist = (numerator / (n_neighbors - 1)) ** (-1/n_features)
return core_dist | ec3801709b5fbce4107f94cd19f3ccd59c971887 | 100,198 |
def subtree_induced_by_subset(tree, s):
""" Returns the subtree of tree induced by the nodes containing the set s.
Args:
tree (NetworkX graph): A junction tree.
s (set): Subset of the node in the underlying graph of T.
Example:
>>> t = jtlib.sample(5)
>>> t.nodes
NodeView((frozenset([0, 4]), frozenset([3]), frozenset([1, 2, 4])))
>>> t.edges
EdgeView([(frozenset([0, 4]), frozenset([1, 2, 4])), (frozenset([3]), frozenset([1, 2, 4]))])
>>> subt = jtlib.subtree_induced_by_subset(t, frozenset([1]))
>>> subt.nodes
NodeView((frozenset([1, 2, 4]),))
>>> t.edges
EdgeView([(frozenset([0, 4]), frozenset([1, 2, 4])), (frozenset([3]), frozenset([1, 2, 4]))])
"""
if len(s) == 0:
return tree.copy()
v_prime = {c for c in tree.nodes() if s <= c}
return tree.subgraph(v_prime).copy() | 51794d12299c832d69262258a1d4f69896fa6e86 | 100,200 |
def _get_filter_settings(relay_config, flt):
"""
Gets the filter options from the relay config or the default option if not specified in the relay config
:param relay_config: the relay config for the request
:param flt: the filter
:return: the options for the filter
"""
filter_settings = relay_config.config.get('filter_settings', {})
filter_key = flt.spec.id
return filter_settings.get(filter_key, None) | 9a8ce92d2b2c8af35b3817ab410ff8c960e9de49 | 100,201 |
def nframes(dur, hop_size=3072, win_len=4096) -> float:
"""
Compute the numbero of frames given a total duration, the hop size and
window length. Output unitiy of measure will be the same as the inputs
unity of measure (e.g. samples or seconds).
N.B. This returns a float!
"""
return (dur - win_len) / hop_size + 1 | c59058259c9e2c98b45789848002fc55bb7b4971 | 100,204 |
def kwargs_nonempty(**kwargs):
"""Returns any keyword arguments with non-empty values as a dict."""
return {x: y for x, y in kwargs.items() if y} | 25229dfccdd99fec79f8d2c86fe234182b82f1a7 | 100,210 |
def read_int(prompt: str, min_value: int = 1, max_value: int = 5) -> int:
"""Read an integer between a min and max value."""
while True:
line = input(prompt)
try:
value = int(line)
if value < min_value:
print(f"The minimum value is {min_value}. Try again.")
elif value > max_value:
print(f"The maximum value is {max_value}. Try again.")
else:
return value
except ValueError:
print("That's not a number! Try again.") | aebadaa743fccf4ffa6eaf0ae78d121562fb3862 | 100,211 |
def create_rules(lines):
"""
Given the list of line rules, create the rules dictionary.
"""
rules = dict()
for line in lines:
sline = line.split()
rules[sline[0]] = sline[-1]
return rules | 3ae3555f08fa4aa2cb147172ad4d584025c35133 | 100,215 |
from typing import List
def _remove_blank_lines_from_list(*, lines: List[str]) -> List[str]:
"""
Remove blank lines from a list of lines.
Parameters
----------
lines : list of str
Target list of lines.
Returns
-------
result_lines : list of str
A lines list which removed blank lines.
"""
result_lines: List[str] = []
for line in lines:
if line.strip() == '':
continue
result_lines.append(line)
return result_lines | e1fe11c74594f77c70521b75f5534debc6817efd | 100,218 |
import re
def cigar_to_max_operation(cigar):
"""Get the longest cigar operation from this cigar string."""
return max([int(i) for i in re.split(r"[=IDX]+", cigar) if i]) | 88ef78293d64e513510ca48e86a6fa450215d912 | 100,221 |
def validateMCMCKwargs(ap, samplerKwargs, mcmcKwargs, verbose=False):
"""
Validates emcee.EnsembleSampler parameters/kwargs.
Parameters
----------
ap : approxposterior.ApproxPosterior
Initialized ApproxPosterior object
samplerKwargs : dict
dictionary containing parameters intended for emcee.EnsembleSampler
object
mcmcKwargs : dict
dictionary containing parameters intended for
emcee.EnsembleSampler.run_mcmc/.sample object
verbose : bool, optional
verboisty level. Defaults to False (no output)
Returns
-------
samplerKwargs : dict
Sanitized dictionary containing parameters intended for
emcee.EnsembleSampler object
mcmcKwargs : dict
Sanitized dictionary containing parameters intended for
emcee.EnsembleSampler.run_mcmc/.sample object
"""
# First validate kwargs for emcee.EnsembleSampler object
if samplerKwargs is None:
samplerKwargs = dict()
samplerKwargs["ndim"] = ap.theta.shape[-1]
samplerKwargs["nwalkers"] = 20 * samplerKwargs["dim"]
samplerKwargs["log_prob_fn"] = ap._gpll
else:
# If user set ndim, ignore it and align it with theta's dimensionality
samplerKwargs.pop("ndim", None)
samplerKwargs["ndim"] = ap.theta.shape[-1]
# Initialize other parameters if they're not provided
try:
nwalkers = samplerKwargs["nwalkers"]
except KeyError:
print("WARNING: samplerKwargs provided but nwalkers not in samplerKwargs")
print("Defaulting to nwalkers = 20 per dimension.")
samplerKwargs["nwalkers"] = 20 * samplerKwargs["ndim"]
if "backend" in samplerKwargs.keys():
print("WARNING: backend in samplerKwargs. approxposterior creates its own!")
print("with filename = apRun.h5. Disregarding user-supplied backend.")
# Handle case when user supplies own loglikelihood function
if "log_prob_fn" in samplerKwargs.keys():
# Remove any other log_prob_fn
samplerKwargs.pop("log_prob_fn", None)
# Prevent users from providing their own backend
samplerKwargs.pop("backend", None)
# Properly initialize log_prob_fn to be GP loglikelihood estimate
samplerKwargs["log_prob_fn"] = ap._gpll
# Validate mcmcKwargs dict used in sampling posterior distribution
# e.g. emcee.EnsembleSampler.run_mcmc method
if mcmcKwargs is None:
mcmcKwargs = dict()
mcmcKwargs["iterations"] = 10000
mcmcKwargs["initial_state"] = ap.priorSample(samplerKwargs["nwalkers"])
else:
try:
nsteps = mcmcKwargs["iterations"]
except KeyError:
mcmcKwargs["iterations"] = 10000
if verbose:
print("WARNING: mcmcKwargs provided, but iterations not in mcmcKwargs.")
print("Defaulting to iterations = 10000.")
try:
p0 = mcmcKwargs["initial_state"]
except KeyError:
mcmcKwargs["initial_state"] = ap.priorSample(samplerKwargs["nwalkers"])
if verbose:
print("WARNING: mcmcKwargs provided, but initial_state not in mcmcKwargs.")
print("Defaulting to nwalkers samples from priorSample.")
return samplerKwargs, mcmcKwargs | e8b8102433343a523def0603d4e69952acf3a244 | 100,225 |
def load_protobuf(file, pb_cls):
"""
Instantiates protobuf object and loads data from file into object.
:param file: file containing serialized protobuf contents
:param pb_cls: class from generated client
:return: instance of pb_cls
"""
obj = pb_cls()
if hasattr(file, "read"):
contents = file.read()
else:
with open(file, "rb") as f:
contents = f.read()
obj.ParseFromString(contents)
return obj | 270f9d2dc18305f1255ff2073528d850283768a8 | 100,229 |
def check_tiramisu_layers(tiramisu_model=None):
"""Checks and returns the tiramisu layers on the tiramisu model string.
Parameters
----------
tiramisu_model : str or None, optional (default : None)
The tiramisu model to be used.
Returns
-------
tiramisu_layers : str
String with the amount of layers. Empty string if tiramisu_model is
None.
"""
if tiramisu_model is not None:
tiramisu_layers = tiramisu_model[8:]
else:
tiramisu_layers = ''
return tiramisu_layers | d84580b8409d7892158cc43a15116c138d4ecc51 | 100,233 |
def stemWords(words_list, stemmer):
"""
Stemming repetitive/similar words
Params:
list of words (text),
stemmer algorithm
"""
return [stemmer.stem(word) for word in words_list] | cf9195d5f948c034deba178026859ece14fd3ed8 | 100,234 |
import torch
def _torch_normalize_vectors(rr):
"""Normalize surface vertices."""
new_rr = rr.clone()
size = torch.linalg.norm(rr, axis=1)
mask = (size > 0)
new_rr[mask] = rr[mask] / size[mask].unsqueeze(-1)
return new_rr | fc3746ac16eca52aba00b749240eae419885ab57 | 100,236 |
def cleanup_favorite(favorite):
"""Given a dictionary of a favorite item, return a str."""
return str(favorite["post"]) | e0824b655e3f2ab46d27dccd351313eecc39599b | 100,237 |
import torch
def bellman(qf, targ_qf, targ_pol, batch, gamma, continuous=True, deterministic=True, sampling=1, reduction='elementwise_mean'):
"""
Bellman loss.
Mean Squared Error of left hand side and right hand side of Bellman Equation.
Parameters
----------
qf : SAVfunction
targ_qf : SAVfunction
targ_pol : Pol
batch : dict of torch.Tensor
gamma : float
continuous : bool
action space is continuous or not
sampling : int
Number of sampling in calculating expectation.
reduction : str
This argument takes only elementwise, sum, and none.
Loss shape is pytorch's manner.
Returns
-------
bellman_loss : torch.Tensor
"""
if continuous:
obs = batch['obs']
acs = batch['acs']
rews = batch['rews']
next_obs = batch['next_obs']
dones = batch['dones']
targ_pol.reset()
_, _, pd_params = targ_pol(next_obs)
pd = targ_pol.pd
next_acs = pd.sample(pd_params, torch.Size([sampling]))
next_obs = next_obs.expand([sampling] + list(next_obs.size()))
targ_q, _ = targ_qf(next_obs, next_acs)
next_q = torch.mean(targ_q, dim=0)
targ = rews + gamma * next_q * (1 - dones)
targ = targ.detach()
q, _ = qf(obs, acs)
ret = 0.5 * (q - targ)**2
if reduction != 'none':
ret = torch.mean(
ret) if reduction == 'elementwise_mean' else torch.sum(ret)
return ret
else:
raise NotImplementedError(
"Only Q function with continuous action space is supported now.") | 8f57db995c092c9ec81aa321c347439788ab06f3 | 100,239 |
def archive_coupling_dict(coupling_dict):
"""
Turns an enzyme coupling dict of the form:
.. code:: python
{'AB6PGH': <Enzyme AB6PGH at 0x7f7d1371add8>,
'ABTA': <Enzyme ABTA at 0x7f7d1371ae48>,
'ACALD': <Enzyme ACALD at 0x7f7d1371aeb8>}
to:
.. code:: python
{'AB6PGH': 'AB6PGH',
'ABTA': 'ABTA',
'ACALD': 'ACALD'
"""
return {k:v.id for k,v in coupling_dict.items()} | 433a7432653ad21d9f538b34e1f36a611499e9a2 | 100,240 |
def get_m3(m1, c, gamma):
"""
Helper: get M3 value from M1, Cv and skewness.
"""
std = c * m1
var = std**2
return gamma * var * std + 3 * m1 * var + m1**3 | c9b4c067b914594f14dd36a2ba2d920f5da02de5 | 100,242 |
def avoids(word, forbidden):
"""
Checks a word for a string of forbidden characters.
Return False if any are present, else return True
word = word to check
forbidden = characters to avoid
"""
for char in forbidden:
if char in word:
return False
return True | aae79c01b0ba4d61333104f84b62041651028823 | 100,244 |
from datetime import datetime
def convert_datetime(value):
"""Convert a date and time to a :class:`~datetime.datetime` object.
Parameters
----------
value : :class:`bytes`
The datetime value from an SQLite database.
Returns
-------
:class:`datetime.datetime`
The `value` as a datetime object.
"""
try:
# datetime.fromisoformat is available in Python 3.7+
return datetime.fromisoformat(value.decode())
except AttributeError:
# mimics the sqlite3.dbapi2.convert_timestamp function
datepart, timepart = value[:10], value[11:]
year, month, day = map(int, datepart.split(b'-'))
timepart_full = timepart.split(b'.')
hours, minutes, seconds = map(int, timepart_full[0].split(b':'))
if len(timepart_full) == 2:
microseconds = int('{:0<6.6}'.format(timepart_full[1].decode()))
else:
microseconds = 0
return datetime(year, month, day, hours, minutes, seconds, microseconds) | 8e76e26e70f61572a48238bbda6f9417d27a01a7 | 100,247 |
def is_native(obj, module):
"""
Determines if obj was defined in module.
Returns True if obj was defined in this module.
Returns False if obj was not defined in this module.
Returns None if we can't figure it out, e.g. if this is a primitive type.
"""
try:
return module.__name__ in obj.__module__
except (AttributeError, TypeError):
return None | 6d3a6c9babe0d02c1eb177b6f312d23d0b826848 | 100,250 |
def populate_nested_dictionary(dictionary, key_list, types=[]):
"""
For each key k_i in key_list, make sure that
dictionary[k_0][k_1][k_2]...[k_i] exists, creating objects of the
appropriate type in each sub-dictionary as needed.
If types is specified, then dictionary[key_list[i]] should point to an
object of type types[i] for all i
returns the element corresponding to dictionary[k_0][k_1]...[k_{n-1}] where
n is the size of key_list
"""
if len(key_list) > 0:
if key_list[0] not in dictionary:
if len(types) > 0:
dictionary[key_list[0]] = types[0]()
else:
dictionary[key_list[0]] = {}
return populate_nested_dictionary(dictionary[key_list[0]], key_list[1:],
types[1:])
else:
return dictionary | 8b531db42db49809e8cb510e5a121a394dc13f17 | 100,255 |
def cov_pow(h, w=1.0, r=1.0, s=1.0):
"""
1D-power covariance model (with sill=1 and range=1):
:param h: (1-dimensional array or float): lag(s)
:param w: (float >0): weight (sill)
:param r: (float >0): range
:param s: (float btw 0 and 2): power
:return: (1-dimensional array or float) evaluation of the model at h
"""
return (w * (1. - (h/r)**s)) | ecc8ac5a3a9de7dfdeb428b21efdfa36a1377b76 | 100,260 |
import re
def build_regexp_if_needed(maybe_regexp):
"""
Creates a regexp if the `maybe_regexp` is a str.
"""
if not isinstance(maybe_regexp, str):
return maybe_regexp
return re.compile(maybe_regexp) | 32b1d1b7547c5d62314b2fca47f8d5a671f3aff5 | 100,261 |
def maybe_create_block_in_items_map(im, block):
""" create/return the block in an items_map """
try:
return im[block]
except:
im[block] = l = [None] * len(block.items)
return l | d7dccaa17301ae9b3f6dc056143b291231b634fa | 100,265 |
def calc_probability(col_dict):
"""
This function calculates probability of each item in the dictionary
For example, {'item1':10,'item2':30} gives prob = {'item1': 0.25, 'item2': 0.75}
"""
s = sum(col_dict.values())
for key, val in col_dict.items():
col_dict[key] = val / s
return col_dict | 680632c54e795e93289af830c1e9f1fba1983f71 | 100,266 |
def _get_exist_branches(branch_names, all_branch_names):
"""Filter out not existing branches from branch names or return all branches."""
if not branch_names:
return all_branch_names
return set(branch_names).intersection(all_branch_names) | 254b3520f3c3cd9dd86791d0cebdaef3d02024af | 100,273 |
def get_time_diff(old_time, new_time):
"""Returns the difference in time between two rospy times"""
time_diff = new_time.to_sec() - old_time.to_sec()
return time_diff | d38d1942cfbaa4262fdd1411cfde05b48ea0f576 | 100,278 |
import statistics
def get_stats(any_list):
"""
Return basic stats about a list
:param any_list: a list
:return: mean, median, stdev, variance
"""
mean = statistics.mean(any_list)
median = statistics.median(any_list)
stddev = statistics.stdev(any_list)
variance = statistics.variance(any_list)
return mean, median, stddev, variance | 901cf529bc3c4e8897d2e615c6bd12af00ce507d | 100,280 |
def toTokenList(x):
""" String to list of characters (tokens) """
return list(x) | 145e99aeeea170d08911f620128355bd2ba144c6 | 100,283 |
def _get_xyz(df, label):
"""Get xyz coordinates from a pandas data frame.
Parameters
----------
df : pandas.DataFrame
Data frame with (at least) columns x, y, z, label.
label : str
Electrode label for which to get x, y, z from `df`.
Returns
-------
x, y, z : float
Positions of electrodes on a unit sphere.
"""
# Check that all labels are present
for var in ["label", "x", "y", "z"]:
if var not in df.columns:
raise ValueError(f"df must contain a column '{var}'")
# Check we get exactly one row of data
subdf = df[df["label"] == label]
nrows = subdf.shape[0]
if nrows == 0 or nrows > 1:
raise ValueError(f"Expected one row of data but got: {nrows}")
# Get the data
x = float(df[df["label"] == label].x)
y = float(df[df["label"] == label].y)
z = float(df[df["label"] == label].z)
return x, y, z | d83c45fa38b14691d5dd39b8c74bcbd2ade29988 | 100,284 |
def url(base_url: str, region: str) -> str:
"""Returns a regionalized URL based on the default and the given region."""
if region != "us":
base_url = base_url.replace("https://", f"https://{region}-")
return base_url | 9302316d4c980f9ec22f7b2ca343d95d4fcfd3b8 | 100,285 |
def find_seperation(arg):
"""
Helper Function for decompose
@param: arg is a string corresponding to two function statement sepereated by a comma
Example: "and(and(a,c),b),or(ab,b)"
@return:
return the index of the comma seperating the functions or -1 if no such comma exist
"""
open_parenthesis = 0
for i in range(len(arg)):
if arg[i] == '(':
open_parenthesis += 1
elif arg[i] == ')':
open_parenthesis -= 1
elif arg[i] == ',' and open_parenthesis == 0:
return i
return -1 | 316aa2a4cf4bc2e06f783d02b1a7d895c6615b71 | 100,287 |
def _parse_options(options):
"""Parse search options string into optiond_dict
"""
if options is None:
return {}
search_options = {
'insensitive': 'i' in options,
'word_boundaries': 'b' in options,
'recursive': 'r' in options,
}
return search_options | c0e1083f3b58ac9bad04809c5db4738d1d305f41 | 100,293 |
import random
import string
def generate_rand_name() -> str:
"""Generate a random name of the form "export_XXXXXX" where XXXXXX are
6 random characters.
Returns:
str: the generated name
"""
suf = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f"exporters_{suf}" | 3bc205a34a08acb3a520b96d52611f45797df230 | 100,296 |
def transform_state(input_state: str, transformation: str):
"""
A function to track the next position on the Bloch sphere based on the current position and the applied clifford
:param input_state: Position on the bloch sphere (one of the six poles)
:param transformation: A clifford operation
:return: The next state on the bloch sphere
"""
transformations = {
"x": {
"I": "x",
"X/2": "x",
"X": "x",
"-X/2": "x",
"Y/2": "z",
"Y": "-x",
"-Y/2": "-z",
},
"-x": {
"I": "-x",
"X/2": "-x",
"X": "-x",
"-X/2": "-x",
"Y/2": "-z",
"Y": "x",
"-Y/2": "z",
},
"y": {
"I": "y",
"X/2": "z",
"X": "-y",
"-X/2": "-z",
"Y/2": "y",
"Y": "y",
"-Y/2": "y",
},
"-y": {
"I": "-y",
"X/2": "-z",
"X": "y",
"-X/2": "z",
"Y/2": "-y",
"Y": "-y",
"-Y/2": "-y",
},
"z": {
"I": "z",
"X/2": "-y",
"X": "-z",
"-X/2": "y",
"Y/2": "-x",
"Y": "-z",
"-Y/2": "x",
},
"-z": {
"I": "-z",
"X/2": "y",
"X": "z",
"-X/2": "-y",
"Y/2": "x",
"Y": "z",
"-Y/2": "-x",
},
}
return transformations[input_state][transformation] | aecd577a8eb039b0a7baf755f22c3d85148165a7 | 100,299 |
def _check_center_ok(centerpos, centersize, pos):
"""Checks whether pos is far away enough from centerpos"""
x, y = False, False
if pos[0]-centersize > centerpos[0] or pos[0]+centersize < centerpos[0]: x = True
if pos[1]-centersize > centerpos[1] or pos[0]+centersize < centerpos[1]: y = True
return (x and y) | f3a966ad4824c921e1a721c328acdb9833d49524 | 100,300 |
def get_symmetric_pos_th_nb(neg_th):
"""Compute the positive return that is symmetric to a negative one.
For example, 50% down requires 100% to go up to the initial level."""
return neg_th / (1 - neg_th) | 3b9af948d6f405147aa7c156bd81556e7e44b8d9 | 100,303 |
import json
def create_opaque_data(nonce: str, token: str) -> str:
"""
:param nonce: Nonce
:param token: Token
:return: Opaque data for the user
"""
# The "1" below denotes the version of the data exchanged, right now only version 1 is supported.
return '1' + json.dumps({'nonce': nonce, 'token': token}) | 8e239ad31f4776c86f6a14b3760ee8aa83eb23ae | 100,304 |
def norm_aplha(alpha):
""" normalise alpha in range (0, 1)
:param float alpha:
:return float:
>>> norm_aplha(0.5)
0.5
>>> norm_aplha(255)
1.0
>>> norm_aplha(-1)
0
"""
alpha = alpha / 255. if alpha > 1. else alpha
alpha = 0 if alpha < 0. else alpha
alpha = 1. if alpha > 1. else alpha
return alpha | df3a6bd4133c03f8da2825d9cd5c32cb666b9d4a | 100,305 |
def NoOpDecorator(func):
"""Mock decorator that passes through any function for testing."""
return func | 12efae5b016f296f683e6e7f303568085f930357 | 100,307 |
def reconstruct_path(came_from, start, end):
"""
Reconstructs the came_from dictionary to be a list of tiles
we can traverse and draw later.
:param came_from: dictionary
:param start: Tile
:param end: Tile
:return: List path
"""
current = end
path = [current]
while current != start:
current = came_from[current]
path.append(current)
path.append(start) # optional
path.reverse() # optional
return path | 9d654ff65dbb0ffc2e5848b9c26ba7dcd652903b | 100,311 |
def to_joules(ujoules):
""" Converts from microjoules to joules """
return ujoules*10**(-6) | 44d42c37384ec177d97084eaaf87ac6680eb3c58 | 100,312 |
def clamp_image(image, bounds=(0, 1)):
"""
Clamps the values of an image to be within bounds.
"""
a, b = bounds
return image.clamp(a, b) | 28607fb5acac4b0655cd99ab6ea129ed5e27a69f | 100,316 |
def line_intersection(line1, line2):
"""
finds the intersection point of two straight lines
from: https://stackoverflow.com/questions/20677795/how-do-i-compute-the-intersection-point-of-two-lines-in-python
by: @paul-draper
:param line1: 2 point defining a line
:type line1: tuple
:param line2: 2 point defining a line
:type line2: tuple
:return: x & y coordinates of the intersection point of the two given lines
:rtype: tuple
"""
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
raise Exception('lines do not intersect')
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return x, y | 79936aff9f5884115e2912069da5e653f14fc753 | 100,319 |
import math
def minutes_to_hours(minutes):
"""
Converts Minutes into Hours and Minutes
"""
hours = math.trunc(minutes / 60)
mins = minutes % 60
return f"{hours} Hours and {mins} Mins" | c3b79d3efc0d7cdefc6be850b8d41a51169a986d | 100,323 |
def transliterate(trans, item):
"""Transliterates string, list of strings and list of list of strings"""
if isinstance(item, str):
return trans.get(item, item)
if isinstance(item, list) and len(item) > 0:
if isinstance(item[0], str):
return [trans.get(i, i) for i in item]
if isinstance(item[0], list):
return [[trans.get(i, i) for i in first] for first in item]
return item | 0774c95528b984b9be2972221077e21836537a32 | 100,324 |
import json
def read_input_json(jsonfile):
"""Read the JSON supplied from the data manager tool
Returns a tuple (param_dict,extra_files_path)
'param_dict' is an arbitrary dictionary of parameters
input into the tool; 'extra_files_path' is the path
to a directory where output files must be put for the
receiving data manager to pick them up.
NB the directory pointed to by 'extra_files_path'
doesn't exist initially, it is the job of the script
to create it if necessary.
"""
params = json.loads(open(jsonfile).read())
return (params['param_dict'],
params['output_data'][0]['extra_files_path']) | a4dfb0bba2d2c21ea5de440458d8fbcddd3c15da | 100,329 |
import torch
def seq_mask(seq_len, max_len):
"""Create a mask for the sequences.
:param seq_len: list or torch.LongTensor
:param max_len: int
:return mask: torch.LongTensor
"""
if isinstance(seq_len, list):
seq_len = torch.LongTensor(seq_len)
mask = [torch.ge(seq_len, i + 1) for i in range(max_len)]
mask = torch.stack(mask, 1)
return mask | 6582cea6dfb9c967ac1430dc676bbec55452411e | 100,332 |
def _is_vcs_file_status_unstaged(file_status):
"""Returns True if file status is unstage.
Staged status is determined by the second position of
`file_status.stats` code. See `guild.vcs_util.FileStatus` for
details.
"""
return file_status.status[1] != "_" | 95d9cec2d2587fc8c37046598114cdac363df15f | 100,333 |
def toWidthHeight(anchor):
"""Transforms an anchor in [x0,y0,x1,y1] format to [w,h,x,y]
Where [w,h,x,y] stands for width, height, center x coord, center y coordinate
"""
# Since in an anchor [x0,y0,x1,y1] we are represesnting not corner coordinates but
# coordinates of pixels that compose the corner, actual widths go across the pixels
# themselves, adding one to the total widths and heights of the regions covered by the
# pixels themseles.
width = anchor[2] - anchor[0] + 1
height = anchor[3] - anchor[1] + 1
x = .5 * (anchor[0] + anchor[2] + 1)
y = .5 * (anchor[1] + anchor[3] + 1)
return [width, height, x, y] | ad4953c45bcb85d119a62f09a3fbe503e4426232 | 100,334 |
import json
def parse_ironic(ironic_file):
"""Extracts relevant data from each ironic input file
"""
with open(ironic_file, 'r') as f:
try:
ironic = json.load(f)
except Exception:
raise RuntimeError(
'Invalid JSON file: {ironic_data_file}'.format(
ironic_data_file=ironic_file))
try:
uuid = ironic['extra']['system']['product']['uuid']
except Exception:
raise RuntimeError(
'The Machine Unique UUID is not defined in '
'data file: {ironic_data_file}'.format(
ironic_data_file=ironic_file))
try:
disks = ironic['inventory']['disks']
except Exception:
raise RuntimeError(
'No disks were found in '
'data file: {ironic_data_file}'.format(
ironic_data_file=ironic_file))
try:
root_disk = ironic['root_disk']
except Exception:
raise RuntimeError(
'No root disk was found in '
'data file: {ironic_data_file}'.format(
ironic_data_file=ironic_file))
return uuid.lower(), root_disk, disks | 531b1ad1bd29171007e876d016e5808f39add4fc | 100,336 |
def product(xs):
"""Return the product of the elements in an iterable."""
accumulator = 1
for x in xs:
accumulator *= x
return accumulator | 22e06452da356797ce5934a28bae5dbe4d249317 | 100,339 |
def temp_ftoc(temp_f):
"""Convert fahrenheit degrees to celsius.
Prometheus expects SI units, but some sensors return F.
"""
return (temp_f - 32.0) * (5.0 / 9.0) | 9c2ec79602295f97fb6dc96dd29073dd7cbaa26d | 100,342 |
def text(element):
"""Get the text of an element as a string."""
text = element.text
return text if text is not None else "" | 57838244d1f8fd9e389a2cebc7ffc9fb62eda664 | 100,345 |
def net_income(financials_df):
"""Checks if the latest reported Net Income is positive.
Explanation of Net Income: https://www.investopedia.com/terms/n/netincome.asp
financials_df = Financial Statement of the specified company
"""
net_income = financials_df.iloc[financials_df.index.get_loc("Net Income"),0]
if (net_income > 0):
return True
else:
return False | bb6563b09339fb485e2db62aec741264e9ff935a | 100,351 |
def min_max(input):
"""
Returns a tuple of min and max of the input list. Assume input is a non empty numeric list
Use only builtin functions from: https://docs.python.org/2/library/functions.html
"""
return min(input),max(input) | 5aaf4dee929e5746b10ce6464dd50f318d6479b7 | 100,353 |
import requests
def get_rss(url):
"""
get rss feed from a given url
:param url: url to resource containing rss feed
:return: text of the response, otherwise None
"""
try:
response = requests.get(url, timeout=1.0).text
except (requests.RequestException, ValueError):
response = None
return response | b14ba03b4959fe5b0e9c9a9ea3cd8097427a48a8 | 100,358 |
import re
def parse_write_collections(script):
"""Extract collections intended for writing from migration script"""
collections_regex = r'//\s*write\s*([\w-]+)'
return re.findall(collections_regex, script) | 1ba9c5c74b607428a78c630045611302ab5297fd | 100,365 |
import hashlib
def sha1_hash(string):
"""Return 20-byte sha1 hash of string."""
return hashlib.sha1(string).digest() | 8819b8bc68b105a01799a10d0e5cf6fcd0556fcb | 100,369 |
from typing import Iterable
def aslist(item):
"""
Wraps a single value in a list, or just returns the list
"""
if isinstance(item, list):
value = item
elif isinstance(item, str):
value = [item]
elif isinstance(item, Iterable):
value = list(item)
else:
value = [item]
return value | 08c97d20fb9c5644f694c570ef89b63751402e46 | 100,371 |
from typing import Optional
from typing import Tuple
def check_interval_included(element1: dict, element2: dict) -> Optional[Tuple[dict, dict]]:
"""
Comparison of two entities on start and end positions to find if they are nested
Parameters
----------
element1 : dict
element2 : dict
both of them in the following format
{
'entity': str,
'word': str,
'startCharIndex': int,
'endCharIndex': int
}
Returns
-------
If there is an entity to remove among the two returns a tuple (element to remove, element to keep)
If not, returns None
"""
if ((element1 != element2) and (element1['startCharIndex'] >= element2['startCharIndex']) and
(element1['endCharIndex'] <= element2['endCharIndex'])):
return element1, element2
if ((element1 != element2) and (element2['startCharIndex'] >= element1['startCharIndex']) and
(element2['endCharIndex'] <= element1['endCharIndex'])):
return element2, element1
if ((element1 != element2) and (element1['startCharIndex'] >= element2['startCharIndex']) and
(element1['endCharIndex'] >= element2['endCharIndex']) and
(element1['startCharIndex'] <= element2['endCharIndex']-1)):
return element1, element2
if ((element1 != element2) and (element2['startCharIndex'] >= element1['startCharIndex']) and
(element2['endCharIndex'] >= element1['endCharIndex']) and
(element2['startCharIndex'] < element1['endCharIndex']-1)):
return element2, element1
return None | bc01eefa8969261844956bd73502b061599a2326 | 100,373 |
def create_compose(ctx):
"""
Creates a docker-compose file for this project.
"""
return ctx.obj['docker'].create_compose(
project=ctx.obj['project_name']
) | 9678e552a018d3aa54da156808fd8323f551a91d | 100,376 |
def flat_scale_parameters(band):
"""
Return the flat scaling parameters for a given band.
:param band: The band to use, either 'FUV' or 'NUV'.
:type band: str
:returns: tuple -- A three-element tuple containing the flat scaling
parameters.
"""
if band == 'NUV':
flat_correct = -0.0154
flat_t0 = 840418779.02
flat_correct_0 = 1.9946352
flat_correct_1 = -1.9679445e-09
flat_correct_2 = 9.3025231e-19
elif band == 'FUV':
flat_correct = -0.0031
flat_t0 = 840418779.02
flat_correct_0 = 1.2420282
flat_correct_1 = -2.8843099e-10
flat_correct_2 = 0.000
else:
print("Band not specified.")
exit(1)
# It turns out that flat_correct and flat_t0 never get used.
# They are only retained above for historical purposes.
return flat_correct_0, flat_correct_1, flat_correct_2 | 58196a67842b11906d3bd370101d0772ce5d90b7 | 100,377 |
import ast
def valid_check(code):
"""Performs a check to ensure that the code provided by the user is syntactically correct.
:param code: Source code string block.
:type code: str
:returns: True is successfully compiled, False otherwise.
"""
try:
node = ast.parse(code)
# print(ast.dump(node))
except SyntaxError:
return False
return True | b4dad88c1be86eda28cbbc0a7eb60fcaed502f29 | 100,378 |
import hashlib
import hmac
def check_signature(token: str, hash: str, **kwargs) -> bool:
"""
Generate hexadecimal representation
of the HMAC-SHA-256 signature of the data-check-string
with the SHA256 hash of the bot's token used as a secret key
:param token:
:param hash:
:param kwargs: all params received on auth
:return:
"""
secret = hashlib.sha256(token.encode("utf-8"))
check_string = "\n".join(map(lambda k: f"{k}={kwargs[k]}", sorted(kwargs)))
hmac_string = hmac.new(
secret.digest(), check_string.encode("utf-8"), digestmod=hashlib.sha256
).hexdigest()
return hmac_string == hash | cd7e15f916c79623198b75fb44e2d895331d8c16 | 100,381 |
def get_response_names(response_count):
"""Returns a list of response variable names up to the count.
Example:
>>> get_response_names(3)
["R1", "R2", "R3"]
"""
return ['R' + str(i+1) for i in range(response_count)] | afbf749caf86ac3e89b9f4f34a1ea0379347d2b7 | 100,382 |
def validate_bandskpoints(value, _):
"""
Validate bandskpoints input port. Checks the kpoints list is set.
"""
if value:
try:
value.get_kpoints()
except AttributeError:
return "bandskpoints requires a list of kpoints, use `set_kpoints`." | 1756dad18ea2f98801de05950c34d580c1f22dce | 100,391 |
def checkml(ctx):
"""A check for the user circles.png.
Args:
ctx (commands.Context): The context of the command.
Returns:
bool: True if the user is circles.png.
"""
return ctx.author.id == 262120465525506049 | 37df602712902dbdc9bbab906fc949b6c78e49f3 | 100,392 |
import torch
def normalize_last_dim(tensor, eps=1e-6):
"""
Normalizes to a mean of 0 and a norm of 1.
:param tensor: a ND tensor
:param eps: epsilon to add not to divide by 0
:return: the tensor with normalization along last dimension axis.
"""
tensor = tensor - tensor.mean(-1, keepdim=True)
tensor = tensor / (torch.norm(tensor, p=2, dim=-1, keepdim=True) + eps)
return tensor | 7210e472f864eb5964d19f408922dc95c8918e0e | 100,393 |
def parse_dns_record(record: dict) -> dict:
"""
Parse the DNS record.
Replace the ttl and prio string values with the int values.
:param record: the unparsed DNS record dict
:return: the parsed dns record dict
"""
if record.get("ttl", None) is not None:
record["ttl"] = int(record["ttl"])
if record.get("prio", None) is not None:
record["prio"] = int(record["prio"])
return record | f1aa9c66ffe5ddd8b17e4e9a4928a6167a7b386b | 100,397 |
def calc_cigar_bit(cigar_op):
"""Given a cigar operation integer, return the cigar bit."""
# taken from htslib bam_cigar_type function
return 0x3c1a7 >> (cigar_op << 1) & 3 | ae42fe88f53cdfd160a0124f228b1bfe2bb82513 | 100,407 |
def determine_non_fit_area(shape, basic_shape, max_error=None):
"""
Determines the area of the part of the basic shape that does not fit the
given shape (i.e. what is left after differencing the two shapes,
and optionally negative buffering with the max error).
Parameters
----------
shape : polygon
The shape of the points.
basic_shape : polygon
The shape of the rectangle or triangle
max_error : float, optional
The maximum error (distance) a point may have to the shape.
Returns
-------
area : float
The area of the part of the basic shape that did not fit the
given shape.
"""
diff = basic_shape - shape
if max_error is not None:
diff = diff.buffer(-max_error)
print('non fit area: {}'.format(diff.area))
return diff.area | a26e155c890decfa78857814c2822fbba75416e2 | 100,409 |
import calendar
import time
def parse_timestamp(timestamp):
"""Parse a timestamp of the form "yyyy-mm-ddThh:mm:ssZ".
Args:
timestamp: the timestamp, as a string
Returns:
the number of seconds since the UNIX Epoch, as described by the timestamp
"""
return calendar.timegm(time.strptime(timestamp, "%Y-%m-%dT%H:%M:%SZ")) | e26ba6ff2eb119464347c4fadf47525635945efe | 100,416 |
def calc_qv_delta_p_ref(n_delta_p_ref, vol_building):
"""
Calculate airflow at reference pressure according to 6.3.2 in [2]
:param n_delta_p_ref: air changes at reference pressure [1/h]
:param vol_building: building_volume [m3]
:returns: qv_delta_p_ref : air volume flow rate at reference pressure (m3/h)
"""
# Eq. (9) in [2]
return n_delta_p_ref * vol_building | 9f1f06a20c92012ac1ba442fa2511b4752e54d14 | 100,418 |
import copy
def weak_execute(t, pn, m):
"""
Execute a transition even if it is not fully enabled
Parameters
----------
:param t: transition to execute
:param pn: Petri net
:param m: marking to use
Returns
-------
:return: newly reached marking if :param t: is enabled, None otherwise
"""
m_out = copy.copy(m)
for a in t.in_arcs:
m_out[a.source] -= a.weight
if m_out[a.source] <= 0:
del m_out[a.source]
for a in t.out_arcs:
m_out[a.target] += a.weight
return m_out | 3178d09c139a3cb009712a81bf4de720caac6cdd | 100,419 |
def drop_empty_props(item):
"""Remove properties with empty strings from nested dicts.
"""
if isinstance(item, list):
return [drop_empty_props(i) for i in item]
if isinstance(item, dict):
return {
k: drop_empty_props(v)
for k, v in item.items() if v != ''
}
return item | c427c01dc282b2cf42fa2fa701cdc707142c3088 | 100,420 |
def rank(tensor):
"""get rank attribute or None"""
try:
result = tensor.__class__.rank
except AttributeError:
result = None
pass
return result | be01b1013e325fff45d6db1476af35ddb0ebd9d9 | 100,422 |
def get_result(response, ctxlen):
"""Process results from OpenAI API response.
:param response: dict
OpenAI API Response
:param ctxlen: int
Length of context (so we can slice them away and only keep the predictions)
:return:
continuation_logprobs: np.array
Log probabilities of continuation tokens
is_greedy: bool
whether argmax matches given continuation exactly
"""
is_greedy = True
logprobs = response["logprobs"]["token_logprobs"]
continuation_logprobs = sum(logprobs[ctxlen:])
for i in range(ctxlen, len(response["logprobs"]["tokens"])):
token = response["logprobs"]["tokens"][i]
top_tokens = response["logprobs"]["top_logprobs"][i]
top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x])
if top_token != token:
is_greedy = False
break
return continuation_logprobs, is_greedy | 2014c3f5d3c92220a56419372d47039ab472967f | 100,424 |
def get_list(data):
"""
This function generates a list of strings from the date time index in data
Parameters
-------------------
data = a pandas dataframe
This contains a Date Time as index and tidal data
OUTPUT
-------------------
This function returns a list of strings
"""
out_list = list(data["Date Time"])
out_list_strings = []
for i in range(0, len(out_list)):
out_list_strings.append(str(out_list[i]))
return out_list_strings | 1ab6d9cdeaa116856a9dcb4b600e3502f52bf903 | 100,431 |
def get_dag_args(dags, args):
"""Given a list of Airflow dags and a list of argument names, return a dictionary dag_ids and their corresponding args dictionaries. If a particular dag argument is not defined directly or via default args, 'None' is returned."""
dags_args = {}
for dag in dags:
dag_args = {}
for arg in args:
dag_args[arg] = getattr(dag, arg, None) or dag.default_args.get(arg)
dags_args[dag.dag_id] = dag_args
return dags_args | de2d6669c6e60aec13eb4f823848918b3aa8d488 | 100,432 |
def in_custom_unit(td, seconds_per_unit, unit_name):
"""Return a string describing the number of whole 'unit_name' in 'td'.
'seconds_per_unit' is used as the divisor to determine how many whole units
are present in 'td'.
If the number of units is other than 1, an 's' is appended to the name of
the unit to make it plural.
Usage examples:
24 hours in days
>>> import datetime; in_named_unit(
... datetime.timedelta(seconds=60*60*24), 'day')
'1 day'
50 hours in days
>>> import datetime; in_named_unit(
... datetime.timedelta(seconds=60*60*50), 'day')
'2 days'
:td: a datetime.timedelta
:returns: a string describing the supplied 'td'
"""
seconds = td.total_seconds()
units = int(seconds // seconds_per_unit)
if units != 1:
unit_name += 's'
return ' '.join([str(units), unit_name]) | 090bdb4ac968d5d0a67394b2f843838b62266a48 | 100,433 |
def MMD2u_estimator(K, m, n):
"""
Compute the MMD^2_u unbiased statistic.
This is an implementation of an unbiased MMD^2_u estimator:
Equation (3) in Gretton et al. Journal of Machine Learning Research 13 (2012) 723-773
:param K: numpy-array
the pair-wise kernel matrix
:param m: int
dimension of the first data set
:param n: int
dimension of the second data set
:return: float
an unbiased estimate of MMD^2_u
"""
K_x = K[:m, :m]
K_y = K[m:, m:]
K_xy = K[:m, m:]
return 1.0 / (m * (m - 1.0)) * (K_x.sum() - K_x.diagonal().sum()) + \
1.0 / (n * (n - 1.0)) * (K_y.sum() - K_y.diagonal().sum()) - \
2.0 / (m * n) * K_xy.sum() | 64f63f3daee1e4aeeba28c1a76ee4d2442a08a81 | 100,434 |
def _attrgetter(attr):
"""
Return a callable object that fetches attr from its operand.
Unlike operator.attrgetter, the returned callable supports an extra two
arg form for a default.
"""
def fn(obj, *args):
return getattr(obj, attr, *args)
return fn | 82cb7e663875a9900f73fa8fecd38a41085b163e | 100,436 |
def overwrite(_old, new):
"""Overwrites the old value with the new value
Args:
_old: old value to be overwritten
new: new value to overwrite
Returns:
new value
"""
return new | 9968366f8be6039c814455eaa76e4f2fd25ad92a | 100,437 |
from typing import List
from typing import Tuple
def column_labels_level(column_labels: List[Tuple]) -> int:
"""Return the level of the column index."""
if len(column_labels) == 0:
return 1
else:
levels = set(1 if label is None else len(label) for label in column_labels)
assert len(levels) == 1, levels
return list(levels)[0] | 5de26a67d8bfaeb14045acc1183b79dec73aabdb | 100,446 |
def calculate_value(mask, value):
""" Calculates value with mask applied.
:param mask: mask string
:param value: int value
:return: int value
"""
# get binary value as string
bin_value = '{0:036b}'.format(value)
# replace all values except X
for i in range(len(mask)):
if mask[i] != 'X':
bin_value = bin_value[:i] + mask[i] + bin_value[i+1:]
return int(str(bin_value), 2) | 525fc29fc58d97b6687ff598a2e0589600140f01 | 100,450 |
def check_arg(name, value, exp_type, exp_range=()):
"""
Checks a value against an expected type and range.
Used for data preprocessign and projections.
Keyword Arguments:
- name: name of the argument, for error messages
- value: value
- exp_type: the type that the value should have
- exp_range: in case of number a tuple or list with (min, max)
in case of strings a tuple or list with all options
if empty (default), all values of the correct type will be allowed
Returns:
- Value if valid
Raises:
- Exception if invalid
"""
# check if None
if value == None:
raise Exception('Missing argument "{}"'.format(name))
# check type
if not isinstance(value, exp_type):
# accept ints for floats
if not (exp_type is float and type(value) is int):
raise Exception('Invalid argument "{}" with value "{}", should be {} but is "{}"'.format(
name, value, exp_type, type(value)))
# check range for strings and numbers
if exp_type == str and len(exp_range) != 0 and value not in exp_range:
raise Exception(
'Invalid argument "{}", value must be in {}'.format(name, exp_range))
elif exp_type == int or exp_type == float:
if len(exp_range) != 0 and not exp_range[0] <= value <= exp_range[1]:
raise Exception(
'Invalid argument "{}", value must be in {}'.format(name, exp_range))
return value | ac4b3c6809fb7bceba6a581c0e1986b0aa6dcfe6 | 100,451 |
import pkg_resources
def matches_version_range(version, version_range, name=None):
"""Return whether a version matches a given range.
This will compare the version to a range specification, checking if it
matches. The range must be in the form of ``name[>=]specifier``.
If ``name`` is specified, then the name will be compared to the one
listed in the version range as well.
Args:
version (unicode):
The version to compare against the range.
version_range (unicode):
The version range.
name (unicode, optional):
The optional name to compare against the one in the version
range.
Returns:
bool:
``True`` if the version and, optionally, the name matches.
"""
req = pkg_resources.Requirement.parse(version_range)
if name and req.project_name != name:
return False
return len(list(req.specifier.filter([version]))) > 0 | ae46707e6aaf1ba16fc00208fcaee6d019e81702 | 100,453 |
def coord_Array2Im(x_arr, y_arr, origin=1):
""" Convert image coordniate to numpy array coordinate """
X_IMAGE, Y_IMAGE = y_arr+origin, x_arr+origin
return X_IMAGE, Y_IMAGE | 04dc036035b5a5d871653eec9fcf2b4f498a3ebf | 100,454 |
def enum_choices(cls):
""" Return a list of the names of the given Enum class members. """
return [choice.name for choice in cls] | 2d2d5662b65b8409dc5103f137d5958b0dd20cfb | 100,455 |
def cat_imputer(trfm, col_names):
"""
Parameters
----------
trfm :
Contains the Sklearn's Imputer preprocessing instance
col_names : list
Contains list of feature/column names.
The column names may represent the names of preprocessed attributes.
Returns
-------
pp_dict : dictionary
Returns a dictionary that contains attributes related to Imputer preprocessing.
"""
derived_colnames = col_names
pp_dict = dict()
derived_flds = list()
mining_strategy = "asMode"
mining_replacement_val = trfm.fill_
pp_dict['mining_strategy'] = mining_strategy
pp_dict['mining_replacement_val'] = mining_replacement_val
pp_dict['mining_attributes'] = col_names
pp_dict['der_fld'] = derived_flds
pp_dict['der_col_names'] = derived_colnames
return pp_dict | 8573c51c93376eb527d80661a902675816ea1e17 | 100,457 |
def _func_parallelfraction(f, p, n):
"""
Model function to calculate the parallel fraction.
:param f: Actual parallel fraction parameters values
:param p: Numbers of cores used on model data
:param n: Problems size used on model data
:return: calculated parallel fraction value
"""
fp = f[0] + f[1]/p + f[2]*pow(f[3], n)
return max(min(fp, 1), 0) | 3c54174c14f919f9671551a77886fa886ef66e6e | 100,461 |
def get_tensor_children(tensor):
""" Get all calculation and data parent tensors (Not read). """
children_list = []
children_list.append(tensor)
if tensor.op:
for t in tensor.op.outputs:
if not 'read:0' in t.name:
children_list += get_tensor_children(t)
return list(set(children_list)) | 5d7bd97e9f1836e0df31a46013727d657d048185 | 100,464 |
import hashlib
def hash_file(path, hash_type):
"""create hash string for a file
Arguments:
path (str) : the path for the file
hash_type (str) : the type of the hash
Returns:
list of byte : the hash bytes
"""
with open(path, "rb") as f:
h = hashlib.new(hash_type)
while True:
data = f.read(2 ** 20)
if not data:
break
h.update(data)
return h | 84c6089ccbfb3808121b6822c9a71cdca100f311 | 100,467 |
def set_document_cookie_disabled(disabled: bool) -> dict:
"""
Parameters
----------
disabled: bool
Whether document.coookie API should be disabled.
**Experimental**
"""
return {
"method": "Emulation.setDocumentCookieDisabled",
"params": {"disabled": disabled},
} | e04f053ac8ea8e7153b0a4f936a8a78100324772 | 100,468 |
def decode_to_unicode(data):
"""Recursively decodes byte strings to unicode"""
if isinstance(data, bytes):
return data.decode('utf-8')
elif isinstance(data, dict):
return dict((decode_to_unicode(k), decode_to_unicode(v))
for k, v in data.items())
elif isinstance(data, list):
return [decode_to_unicode(e) for e in data]
return data | 5ba4138248d7ee948bc8a7d5ab371ec42b1bf5e9 | 100,469 |
def get_player_material(board):
"""Counts the number of materials on the current board for each player (A and B).
Args:
board (pgn board): Current board
Returns:
[int]: materialPlayerA
[int]: materialPlayerB
"""
# clear board to read out information
information = str(board).replace("\n", " ")
# get black and white material
materialPlayer_B = information.count('r') + information.count('n') + information.count('b') + information.count('q') + information.count('k') + information.count('p')
materialPlayer_W = information.count('R') + information.count('N') + information.count('B') + information.count('Q') + information.count('K') + information.count('P')
# return materials for each player
return materialPlayer_W, materialPlayer_B | 61fe7ffa5856d0a70481a831496cb7e10d215eb2 | 100,479 |
def num_leading_line_spaces(istr,start,pound=0):
"""count the number of leading non-whitespace chars
(newline chars are not be counted, as they end a line)
if pound, skip any leading '#'"""
length = len(istr)
if start < 0: start = 0
if length < 1 or length <= start: return 0
posn = start
if pound and istr[posn] == '#': posn += 1
while posn < length and istr[posn].isspace() and istr[posn] != '\n':
posn += 1
if posn == length: return 0 # none found
return posn-start # index equals num spaces from start | 92ec78f331d49f6b08f2cdf109dbd8164319d8e5 | 100,491 |
def make_node_barrier(node):
"""Turns a node into a hard barrier."""
node.make_barrier()
node.is_hard_barrier = True
return node | 49a958485091a783e9ca5f898019bfd1c665bcdf | 100,495 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.