content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def test_verilog(request):
"""Test Verilog translation rather than python."""
return request.config.option.test_verilog | d9723d68cffde9393b5e46c3da044f0ea7a35eda | 116,495 |
def get_scale_factor(cmip6_model):
"""
Gives the scale factor for creating filesets for the specified model.
Some model inputs will give a default value specified below.
:param cmip6_model:
:return: scale factor
"""
if "UKESM" in cmip6_model or "HadGEM3" in cmip6_model:
SCALE_FACTOR = 2.0
else:
SCALE_FACTOR = 1.0
return SCALE_FACTOR | 89bc4f77709d8edc56a3f56af7e3398c52ddb05b | 116,498 |
def whitespace(line):
"""Return index of first non whitespace character on a line."""
i = 0
for char in line:
if char != " ":
break
i += 1
return i | 0a25e33a0538d104ea5979860127bf9da0f6f42b | 116,501 |
def vect3_cross(u, v):
"""
Cross product.
u, v (3-tuple): 3d vectors
return (3-tuple): 3d vector
"""
return (u[1] * v[2] - u[2] * v[1],
u[2] * v[0] - u[0] * v[2],
u[0] * v[1] - u[1] * v[0]) | 9dfc2868bf1ddaabf6812e1f4df57089d3f08b42 | 116,502 |
def read_meta_file(file_name):
""" Read key-value pairs from a dumpi meta file.
Returns
-------
dict
A dictionary of key-value pairs.
"""
with open(file_name) as meta_file:
return dict([line.strip().split("=") for line in meta_file]) | e81d08a6fa4e0146c4f46d4c672998d25bd0b045 | 116,503 |
def range_value(start_val: str, end_value: str) -> str:
"""Helper function to create a Sentinel API compliant range for fields such
as orbit_number etc."""
return f"[{start_val} TO {end_value}]" | c5ddcc7c41eb9b5a2a11b90c59f41e482c56eb0f | 116,506 |
def update_ema_variables(ema_model, model, alpha_teacher, iteration):
"""
Args:
ema_model: model to update
model: model from which to update parameters
alpha_teacher: value for weighting the ema_model
iteration: current iteration
Returns: ema_model, with parameters updated follwoing the exponential moving average of [model]
"""
# Use the "true" average until the exponential average is more correct
alpha_teacher = min(1 - 1 / (iteration*10 + 1), alpha_teacher)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data[:] = alpha_teacher * ema_param[:].data[:] + (1 - alpha_teacher) * param[:].data[:]
return ema_model | 59b732cb2e85a32ab7fa6f2f08f808a322695b64 | 116,507 |
def fallback_serializer(_):
"""
Fallback serializer for non JSON serializable objects
Used for json.dumps
"""
return '<not serializable>' | 28ecdc575fc5b6ccbe7d931706a54f63162f564c | 116,514 |
import torch
def get_pmf(prelim_assignment):
"""
Calculates the probabilitiy mass function (pmf) which here is simplified the number of weights assigned to a
specific centroid devided by th number of all weights. The preliminary assignment considers only the minimal
distance from centroids to weights as cost.
With "spars_bound" we ensure that at least 50% of all weights would be assigned to the zero-centroid w_0 such that
the entropy score (information content) for w_0 is always the lowest.
Parameters:
-----------
prelim_assignment:
Minimal arguments for all 3 centroid distances to all layer weights
Returns:
--------
pmf_prelim:
Percentage frequencies of -only distance dependent- centroid assignments (w_n, w_0, w_p)
with pmf[w_n] + pmf[w_0] + pmf[w_p] = 1 and pmf[w_0] always > 0.5
"""
C_counts = torch.ones(3)
C_val, C_cts = torch.unique(prelim_assignment, return_counts=True)
# For the usual case that layer weights are assigned to three centroids (ternary)
if C_cts.shape[0] == 3:
C_counts = C_cts
# The following two cases, especially the last one, should not occur as precautions were taken such that the
# w_0 centroid can't absorb all assignments
# If layer weights are assigned to two only centroids (binary)
elif C_cts.shape[0] == 2:
if 0 not in C_val:
C_counts[1] = C_cts[0]
C_counts[2] = C_cts[1]
if 1 not in C_val:
C_counts[0] = C_cts[0]
C_counts[2] = C_cts[1]
if 2 not in C_val:
C_counts[0] = C_cts[0]
C_counts[1] = C_cts[1]
# If layer weights are assigned to only one centroid
elif C_cts.shape[0] == 1:
if (0 not in C_val and 1 not in C_val):
C_counts[2] = C_cts[0]
if (0 not in C_val and 2 not in C_val):
C_counts[1] = C_cts[0]
if (1 not in C_val and 2 not in C_val):
C_counts[0] = C_cts[0]
pmf_prelim = torch.div(C_counts.type(torch.float32), torch.numel(prelim_assignment))
# Ensuring that at least 50% of all weights are assigned to w_0 and probabilities still sum up to 1
spars_bound = 0.5
if pmf_prelim[1] < spars_bound:
pmf_prelim[0] -= (pmf_prelim[0]/(pmf_prelim[0] +
pmf_prelim[2]))*(spars_bound - pmf_prelim[1])
pmf_prelim[2] -= (pmf_prelim[2] / (pmf_prelim[0] +
pmf_prelim[2])) * (spars_bound - pmf_prelim[1])
pmf_prelim[1] = spars_bound
return pmf_prelim | e2155349362445f40abcce586df70771ed88a453 | 116,515 |
import math
def logit(x: float) -> float:
"""Logit function."""
return math.log(x / (1 - x)) | 07faf3ab505bfb60596e4ceef874fe83889af652 | 116,516 |
def get_experiment_setting(args):
"""
Methods implemented: 1- sarsa_lambda, 2- MuZero
Args:
args.env: str {MountainCar},
args.method: str {sarsa_lambda, MuZero}
args.save: {True, False}
The number of training steps and test steps should be adopted for each domain
Returns:
experiment_settings
"""
experiment_settings = {}
experiment_settings['env'] = args.env
experiment_settings['method'] = args.method
experiment_settings['num_train_steps'] = 5000000 # 5m for sarsa, 200k for muzero
experiment_settings['num_transition_steps'] = 5000
experiment_settings['num_test_steps'] = 30000
experiment_settings['num_datapoints'] = 50
experiment_settings['num_runs'] = 5
experiment_settings['save'] = args.save
experiment_settings['filename'] = None
if experiment_settings['num_datapoints'] > experiment_settings['num_test_steps']:
experiment_settings['num_datapoints'] = experiment_settings['num_test_steps']
return experiment_settings | 6a17e1076cfbb23e91e23f7a8085544c23633e01 | 116,519 |
def before(process):
"""Utility method to strip and decode a pexpect process' before output"""
return process.before.decode("utf-8").strip() | e7042c13061048c2464a627b0fb83d24deec88d0 | 116,521 |
def get_hours(time_str : str) -> float:
"""Get hours from time."""
h, m, s = time_str.split(':')
s = s.split('.')[0]
return int(h) + int(m) / 60 + int(s) / 3600 | 2e49771e31633ebbf159f6d3bb56a6c5761396f9 | 116,522 |
def counter_path(path, counter):
"""
Takes in a path and a counter and returns a zookeeper appropriate path.
:param path: The full path before the counter
:param counter: An integer used as the counter
:rtype: a zookeeper path with a counter.
"""
return "%s%010d" % (path, counter) | 992c96bc5e5d0b980175c6afcd2695b3c3aa3775 | 116,525 |
import pkg_resources
def get_version(package_name, default='Unknown'):
"""Returns the package version, or default value."""
try:
return pkg_resources.get_distribution(package_name).version
except (ImportError, pkg_resources.DistributionNotFound):
return default | a7462989173d121be807a31157d7d116d646a9f8 | 116,529 |
def _get_square_matrix(M, x, y, size):
"""Extract square part with a side of size of matrix M from (x,y) point"""
return [[M[i][j] for j in range(x, x+size)] for i in range(y,y+size)] | c4049945be7a255a2744555deea17372401b6b60 | 116,530 |
def parse_configurations(payload):
"""
Parses the configurations to perform from the given payload and returns a
list containing tuples with the ID of the sensor to configure and the value
to set.
Args:
payload (list): array of bytes to parse.
Returns:
A list containing tuples with the ID of the sensor to configure and the
value to set.
"""
configurations = []
for conf in payload:
configurations.append((conf, payload[conf]))
return configurations | 4b0f5461f2859000f05c49404288979792d94497 | 116,532 |
import math
def get_magAB_from_flambda(flambda, wlength):
"""
Converts a mag_AB value at a wavelength to f_lambda.
:param flambda: mag_AB value
:type flambda: float
:param wlength: wavelength value [nm]
:type wlength: float
:return: the mag_AB value
:rtype: float
"""
# transform from flambda to fnue
fnu = (wlength * wlength) / 2.99792458e+16 * flambda
# compute mag_AB
mag_AB = -2.5 * math.log10(fnu) - 48.6
# return the mag_AB
return mag_AB | d307a49d99d6242ec581b6f2559b4ff05e3aff22 | 116,533 |
import torch
def recurrent_state_as_numpy(recurrent_state):
"""Convert a recurrent state in torch.Tensor to numpy.ndarray.
Args:
recurrent_state (object): Recurrent state in torch.Tensor.
Returns:
object: Recurrent state in numpy.ndarray.
"""
if recurrent_state is None:
return None
elif isinstance(recurrent_state, torch.Tensor):
return recurrent_state.detach().cpu().numpy()
elif isinstance(recurrent_state, tuple):
return tuple(recurrent_state_as_numpy(s) for s in recurrent_state)
else:
raise ValueError("Invalid recurrent state: {}".format(recurrent_state)) | c254853b39e5fbf25a3b190e55112ceb8ae74c53 | 116,536 |
from pathlib import Path
def get_live_toml_file(toml_file: Path) -> Path:
"""
Check to see if the TOML file path exists already
Parameters
----------
toml_file: Path
Pathlib Path holding the user provided path to the toml file.
Returns
-------
Path
path to the live toml file if it exists, else the normal TOML file path
"""
live_file = Path(str(toml_file) + "_live")
if live_file.exists():
return live_file
return toml_file | df852bfffebf9055a9e0ebc565755866e2bf1f25 | 116,538 |
def encode_message(chat_name, port, message_type, sender_id, message, length):
"""Encode message for sending. In the format of ChatName_ListenPort_type_id_content_length.
Actually length is only useful when sending images or files.
"""
message = '{}_{}_{}_{}_{}_{}'.format(
chat_name, port, message_type, sender_id, message, length)
return message.encode() | c52003a236d5ee1c21cd5fc09d336c9e65004877 | 116,539 |
import torch
def run_mat_interp_back(kdat, coef_mat_real, coef_mat_imag, griddat):
"""Interpolates kdat to on-grid coordinates with input sparse matrices.
Args:
kdat (tensor): The off-grid frequency data.
coef_mat_real (sparse tensor): The real interpolation coefficients stored
as a sparse tensor.
coef_mat_imag (sparse tensor): The imaginary interpolation coefficients stored
as a sparse tensor.
griddat (tensor): A tensor to store the outputs in.
Returns:
griddat (tensor): kdat interpolated to on-grid locations.
"""
real_kdat = kdat[:, 0, :].t().reshape(-1, kdat.shape[0])
imag_kdat = kdat[:, 1, :].t().reshape(-1, kdat.shape[0])
coef_mat_real_t = coef_mat_real.t()
coef_mat_imag_t = coef_mat_imag.t()
# apply multiplies with complex conjugate
griddat[:, 0, :] = torch.mm(
coef_mat_real_t,
real_kdat
).t() + torch.mm(
coef_mat_imag_t,
imag_kdat
).t()
griddat[:, 1, :] = torch.mm(
coef_mat_real_t,
imag_kdat
).t() - torch.mm(
coef_mat_imag_t,
real_kdat
).t()
return griddat | c980588e29c62267218cc6320ab3d87b4ccfb653 | 116,542 |
def enumerate_bases(bases):
"""
Convert the given tuple of multiple sequence alignment bases (any
characters) into a tuple of integers such that all rows with the same base
share the same integer based on the order each base was seen.
>>> enumerate_bases(("A", "A", "C", "C"))
(0, 0, 1, 1)
>>> enumerate_bases(("C", "C", "A", "A"))
(0, 0, 1, 1)
>>> enumerate_bases(("C", "-", "T", "C"))
(0, 1, 2, 0)
>>> enumerate_bases(("A", "C", "T", "G"))
(0, 1, 2, 3)
>>> enumerate_bases(("A", "A", "A", "A"))
(0, 0, 0, 0)
"""
current_index = 0
base_to_index = {}
integers = []
for base in bases:
if base not in base_to_index:
base_to_index[base] = current_index
current_index += 1
integers.append(base_to_index[base])
return tuple(integers) | df8f66314468de16823c23c3c37a4f12e35d301a | 116,543 |
def filter_most_recent(Ldb):
"""
Only keep the most recent run
"""
ID = Ldb.sort_values('time')['run_ID'].tolist()[-1]
return Ldb[Ldb['run_ID'] == ID] | 448891c473a097874936479ab68664ceaa1c002b | 116,544 |
def solvable(deps, names):
"""
Return True if `names` form a solvable block, i.e. the set of names equals
to the set of their prerequisites.
"""
if not names: return False # Missing self-reference.
dep_names = set()
for name in names:
dep_names.update(deps[name])
return dep_names == set(names) | 96996f027f9d77e4dbb3b8daa6c7946d1cdf7f9d | 116,547 |
def rreplace(string, old, new, occurrence = 1):
"""replace <occurence> number of old by new in string
starting with the right"""
split = string.rsplit(old, occurrence)
return new.join(split) | 4c96959d18e46e1c5bc82b429efe365fdb698223 | 116,548 |
def kml_cb(mapping):
"""
Create text for a colorbar png file overlay
"""
kml_text = """
<ScreenOverlay>
<name>{name:s}</name>
<Icon>
<href>{cb_file:s}</href>
</Icon>
<overlayXY x="{xfrac:.4f}" xunits="fraction" y="{yfrac:.4f}" yunits="fraction"/>
<screenXY x="{xfrac:.4f}" xunits="fraction" y="{yfrac:.4f}" yunits="fraction"/>
</ScreenOverlay>
""".format(**mapping)
return kml_text | 0b570936784085e178b4cd1da38d3bbb11a96405 | 116,552 |
def sense_instances(instances, sense):
"""
Returns the list of instances in **instances** that have the sense **sense**.
Example:
sense_instances(senseval.instances('hard.pos'), 'HARD1')
"""
return [instance for instance in instances if instance.senses[0] == sense] | 28f6c17f2308a424d3e7776a08aade60a058763c | 116,554 |
import tempfile
def get_temp_filename(mode="w+b", buffering=-1, encoding=None, newline=None, suffix=None, prefix=None, dir=None) -> str:
"""Get temp filename
e.g
j.sals.fs.get_temp_filename(dir="/home/rafy/") -> '/home/rafy/tmp6x7w71ml'
Args:
mode (str, optional): [description]. Defaults to "w+b".
buffering (int, optional): buffering. Defaults to -1.
encoding ([type], optional): encoding . Defaults to None.
newline ([type], optional): Defaults to None.
suffix ([type], optional): ending suffix. Defaults to None.
prefix ([type], optional): prefix . Defaults to None.
dir ([type], optional): where to create the file. Defaults to None.
Returns:
[str]: temp filename
"""
return tempfile.NamedTemporaryFile(mode, buffering, encoding, newline, suffix, prefix, dir).name | 6ee213a5d2acf782b4ac24b2581bfcd733a23156 | 116,559 |
def NormalizePort(port, str_ok=True):
"""Checks if |port| is a valid port number and returns the number.
Args:
port: The port to normalize.
str_ok: Accept |port| in string. If set False, only accepts
an integer. Defaults to True.
Returns:
A port number (integer).
"""
err_msg = '%s is not a valid port number.' % port
if not str_ok and not isinstance(port, int):
raise ValueError(err_msg)
port = int(port)
if port <= 0 or port >= 65536:
raise ValueError(err_msg)
return port | 3eb549b418c0c1260011326ed18819d832f0e1c2 | 116,561 |
def env_dir(tmpdir):
"""Pytest fixture used to generate an extra (external) config directory"""
test_dir = tmpdir.mkdir("fromenv")
test_dir.join('fromenv.cfg').write('')
return test_dir | 92d4ae646b03e83ceee16c56925bdbd2cf34e9a3 | 116,564 |
import html
def unicodeToHTMLEntities(text):
"""Converts unicode to HTML entities. For example '&' becomes '&'."""
text = html.escape(text, True).encode('ascii', 'xmlcharrefreplace').decode('ascii')
return text | fd4b026cbc9ee960ef19c938d32dba268cbf2978 | 116,566 |
def get_essential( m ):
""" Get the "essential" leds, along with the associated tri.
Format: 'ess[led] = tri'
An essential LED is an LED that lights a triangle surface, where that LED is the only LED to light that triangle surface.
i.e. without that LED, the given triangle will never be lit.
"""
ess = {}
for k in m:
if len(m[k]) == 1:
led = m[k][0]
ess[led] = k #ess[led] = tri
return ess | 3cc4570724aa98e6a7d1b2b73cbc45abb53304fd | 116,572 |
from typing import OrderedDict
def create_object_dictionary(raw_object, fields):
"""
Returns a dictionary of an object. Every (valid)
field is a key. Each value is in its turn a dictionary
with the fields 'value' and 'id' (in case the field is
represents a ForeignKey).
"""
dict_object = OrderedDict()
fields_to_be_ignored = []
# walk over the fields
for field in fields:
try:
dict_object[field] = {'value': getattr(raw_object, field.name)}
if field.get_internal_type() == 'ForeignKey':
dict_object[field]['id'] = getattr(raw_object, field.name + '_id')
except:
fields_to_be_ignored.append(field)
return dict_object, fields_to_be_ignored | 077db933e69df9fc36816c31c5436db8fd67448f | 116,573 |
from pathlib import Path
import tempfile
def get_cachedir() -> Path:
"""Create the directory ``$TMPDIR/friendly_data_cache`` and return the Path object"""
cachedir = Path(tempfile.gettempdir()) / "friendly_data_cache"
cachedir.mkdir(exist_ok=True)
return cachedir | cfa20aba3e198e397d2978618dc7edd52d6dec20 | 116,575 |
def knapsack_helper(value, weight, m, i, w):
"""Return maximum value of first i items attainable with weight <= w.
m[i][w] will store the maximum value that can be attained with a maximum
capacity of w and using only the first i items
This function fills m as smaller subproblems needed to compute m[i][w] are
solved.
value[i] is the value of item i and weight[i] is the weight of item i
for 1 <= i <= n where n is the number of items.
"""
if m[i][w] >= 0:
return m[i][w]
if i == 0:
q = 0
elif weight[i] <= w:
q = max(knapsack_helper(value, weight,
m, i - 1 , w - weight[i])
+ value[i],
knapsack_helper(value, weight,
m, i - 1 , w))
else:
q = knapsack_helper(value, weight,
m, i - 1 , w)
m[i][w] = q
return q | ad822fff0dcaa88a7368313feb4c1bfbdabd208d | 116,578 |
from typing import IO
from io import StringIO
def input_stream() -> IO:
"""Input stream fixture."""
return StringIO(
"""light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags."""
) | 7d70135d61fe3b4e5d11cff2d986d1579c5355a4 | 116,580 |
def synchronized(f):
"""Decorator that synchronizes a class method with self._lock"""
def new_f(self, *args, **kwargs):
self._lock.acquire()
try:
return f(self, *args, **kwargs)
finally:
self._lock.release()
return new_f | cefa4c92bf6c6daa4455aa58db0db0b0c940dc15 | 116,582 |
import re
def _GetIndexFromCapsule(capsule):
"""Returns a help doc index line for a capsule line.
The capsule line is a formal imperative sentence, preceded by optional
(RELEASE-TRACK) or [TAG] tags, optionally with markdown attributes. The index
line has no tags, is not capitalized and has no period, period.
Args:
capsule: The capsule line to convert to an index line.
Returns:
The help doc index line for a capsule line.
"""
# Strip leading tags: <markdown>(TAG)<markdown> or <markdown>[TAG]<markdown>.
capsule = re.sub(r'(\*?[[(][A-Z]+[])]\*? +)*', '', capsule)
# Lower case first word if not an abbreviation.
match = re.match(r'([A-Z])([^A-Z].*)', capsule)
if match:
capsule = match.group(1).lower() + match.group(2)
# Strip trailing period.
return capsule.rstrip('.') | f56aa6cba2909d8448aa2161f0fff18d3caaf666 | 116,583 |
import math
def norm(x):
"""
Returns the L^2 norm of a vector x
Parameters
----------
x : np.ndarray : a numpy array of floats
Returns
-------
float : the L^2 norm of x
Examples
--------
>>> A = np.array([1.0, 2.0, 3.0, 3.0, 1.0, 1.0])
>>> norm(A)
5.0
"""
sum = 0
for component in x:
sum = sum + component**2
return math.sqrt(sum) | 6462c2ceaec3aeb1b4f9195c93549d0e1ec2627d | 116,599 |
def point_is_none(board, column: int, row: int) -> bool:
"""Determine if board is "None" at given row and column"""
try:
if board[row][column] is None:
return True
return False
except IndexError:
print("That is an invalid move, please try again")
return False | 9640c30657b014d4fb713039a653e4e925f48d9e | 116,608 |
def _escape_query_string(query_string: str):
"""Escape a query string to be used in a LIKE clause."""
return (
query_string
.replace("\\\\", "\\") # escape the escaper
.replace("%", "\\%") # escape %
.replace("_", "\\_") # escape _
.replace("*", "%") # allow '*' as the wildcard for any char (%)
.replace("?", "_") # allow '?' as the wildcard for a single char (_)
) | ed6b3c75a2f41be985c363b4836ab8c27ec07a9e | 116,609 |
def classify_image(classifier, labels, frame):
"""
Classify an image and print the top result.
Args:
classifier: A ``vision.Classifier`` object.
labels: The labels file for the Classifier model.
frame: The image to classify.
Returns:
A list of all class predictions, ordered by score.
"""
classes = classifier.get_classes(frame)
label_id = classes[0].id
score = classes[0].score
label = labels.get(label_id)
print(label, score)
return classes | 6161135b5120f2437254febe0e3eede2e35e3930 | 116,610 |
def skip(app, what, name, obj, skip, options):
"""
@return False if the name is __init__ or *skip* is set, True otherwise
"""
return skip and name is not "__init__" | 0bdbe8203b84c01cefadd0d3c3b46f39ffa75c44 | 116,616 |
def subtask_data(subtask_name, parent):
"""Formats subtask data."""
return {
"completed": False,
"name": subtask_name,
"parent": parent["gid"]
} | 576aff9f0e34ef86ee7178ba73ce51a7d2aed416 | 116,618 |
def cleanData(delimiter, target):
""" Cleans a string based targetted data set based on the delimiter;
turns the strings to integers for this question
--param
delimiter : string
target : list
--return
list
"""
clean = []
for i in target:
clean.append(i.split(delimiter))
# end of for
for i in clean:
for j in range(0,len(i)):
if i[j][0] == '0':
i[j] = int(i[j][1])
else:
i[j] = int(i[j])
return clean | 4116cc5a44d15256854726363d06eeb5acd56eb7 | 116,621 |
def add_regressor(data, regressor, varname=None):
"""
adds a regressor to a `pandas.DataFrame` of target (predictand) values
for use in fbprophet
Parameters
----------
data : pandas.DataFrame
The pandas.DataFrame in the fbprophet format (see function `prepare_data` in this package)
regressor : pandas.DataFrame
A pandas.DataFrame containing the extra-regressor
varname : string
The name of the column in the `regressor` DataFrame to add to the `data` DataFrame
Returns
-------
verif : pandas.DataFrame
The original `data` DataFrame with the column containing the
extra regressor `varname`
"""
data_with_regressors = data.copy()
data_with_regressors.loc[:,varname] = regressor.loc[:,varname]
return data_with_regressors | b22e22928aa0b608fc97b45619ff4285cab6aac6 | 116,625 |
def pytest_funcarg__issue_id(request):
"""
The issue id for the current test, or ``None``, if no issue id is to be
used.
The issue id is taken from the ``id`` attribute of the issue returned by
the ``issue`` funcarg. If the ``issue`` funcarg returns ``None``, this
funcarg also returns ``None``.
"""
issue = request.getfuncargvalue('issue')
if issue:
return issue.id
else:
return None | c976bfbb809c383099e2f07f31ec155019e70ada | 116,627 |
def estimate_nmcmc(accept_ratio, old_act, maxmcmc, safety=5, tau=None):
""" Estimate autocorrelation length of chain using acceptance fraction
Using ACL = (2/acc) - 1 multiplied by a safety margin. Code adapted from CPNest:
- https://github.com/johnveitch/cpnest/blob/master/cpnest/sampler.py
- http://github.com/farr/Ensemble.jl
Parameters
==========
accept_ratio: float [0, 1]
Ratio of the number of accepted points to the total number of points
old_act: int
The ACT of the last iteration
maxmcmc: int
The maximum length of the MCMC chain to use
safety: int
A safety factor applied in the calculation
tau: int (optional)
The ACT, if given, otherwise estimated.
"""
if tau is None:
tau = maxmcmc / safety
if accept_ratio == 0.0:
Nmcmc_exact = (1 + 1 / tau) * old_act
else:
Nmcmc_exact = (
(1. - 1. / tau) * old_act +
(safety / tau) * (2. / accept_ratio - 1.)
)
Nmcmc_exact = float(min(Nmcmc_exact, maxmcmc))
return max(safety, int(Nmcmc_exact)) | 6767155f2df3d5cdce3c6f3e75be9bf9a46eea16 | 116,629 |
def _prep_sanitized_vars(prefix, data):
"""
Prepares parameterized variables for SQL statements for sanitized entry.
Args:
prefix (str): The prefix to give the variable placeholder names. The
format is <prefix>val<count>. This can be an empty string, but must be
unique within a given SQL statement (e.g. in an update statement, since
there are values for updating as well as possible values in the where
clause, calling this for each of those portions should use a different
prefix to give it a different namespace and avoid dict collisions).
data ({str:str/int/bool/datetime/enum/etc}): The data to be prepared,
where the keys are the column names and the values are the
python-type values to be used as the variable values.
Returns:
val_vars ({str:str/int/bool/datetime/enum/etc}): The mapping of variable
names to be used in the SQL statement to their corresponding values.
These variable names in the key portion of this dict are intended to be
used in the `%(<>)s` format in the SQL statement. These are in the same
order as the column names -- Python 3.7+ REQUIRED.
"""
val_vars = {}
for col in data:
val_vars[f'{prefix}val{len(val_vars)}'] = data[col]
return val_vars | b89304b1478f21925c9be6c77000d0aa2fd7cde8 | 116,630 |
import re
def derive_title_link(title):
"""Derives a title sutiable for linking to"""
title = re.sub(r'[\ ]', '_', title)
title = re.sub(r'[^a-zA-Z0-9_~\-\.]', '', title)
return title | f3a78532466c57709f0edaf849ebe9257d5a8d16 | 116,634 |
def getName(path):
"""removes ending from path ('/home/robo/Overcast.bag' -> '/home/robo/Overcast')"""
parts = path.split('.')
if len(parts) > 1:
del parts[-1]
return ".".join(parts) | f2e16722fa24cfb5442c07d214b6b3c2b2971f35 | 116,636 |
import json
def _parse_tag_field(row):
"""Reading in a tag field and converting to a list of strings."""
if isinstance(row, (list, tuple)):
return row
if not isinstance(row, str):
row = str(row)
if row.startswith('[') and row.endswith(']'):
return json.loads(row)
if row == '-':
return []
if ',' in row:
return row.split(',')
return [row] | 3033adc0840058d22647403a57f70406ca44cf22 | 116,637 |
import re
def is_line_valid(string):
""" check if the string contain a digit """
return bool(re.search(r'\d', string)) | 280ff3a1b3f1e7ae6966f2dfaff9f2fbfb603860 | 116,638 |
from typing import Counter
def sum_all_dates(counters):
"""Sum up all the counters for each court across all dates.
:param counters: A dict of name-counter pairs.
:return: A counter object with counts for every court across all dates.
"""
by_court = Counter()
for court, counter in counters.items():
by_court.update({court: sum(v for v in counter.values())})
return by_court | ac3b5c2e1352507533fc0ddead3206a007ed7309 | 116,639 |
import math
def sphere_friction_coefficient(eta, d):
"""Friction coefficient of a sphere with diameter `d` in a liquid with viscosity `eta`
Parameters
----------
eta : float
Dynamic / shear viscosity [Pa*s]
d : float
Sphere diameter [m]
"""
return 3.0 * math.pi * eta * d | 076c2c2fd00513618a0f18eae010a4841d1b15ca | 116,640 |
import json
def get_groupsets(json_file):
"""Load groupset info from json file and return as a dictionary."""
with open(json_file, 'r') as fin:
datas = json.load(fin)
groupsets = {}
for line in datas['organisationUnitGroupSets']:
if not line['organisationUnitGroups']:
continue
for group in line['organisationUnitGroups']:
groupdict = {}
groupdict['groupsetid'] = line['id']
groupdict['groupsetname'] = line['name'].encode('utf-8')
groupsets[group['id']] = groupdict
return groupsets | b269679340d84201ab32931f75b1c738931a302e | 116,643 |
def _convert_platform_to_chromiumdash_platform(platform):
"""Converts platform to Chromium Dash platform.
Note that Windows in Chromium Dash is win64 and we only want win32."""
platform_lower = platform.lower()
if platform_lower == 'windows':
return 'Win32'
return platform_lower.capitalize() | 0cc9795ac9d5591189e4bf88687ce92b0cb4eadf | 116,645 |
def format_addr(address):
"""Turn a TCP or Unix domain socket address into a string."""
if isinstance(address, tuple):
if address[1]:
return '%s:%d' % address
else:
return address[0]
return address | 05e29e74a70c4e078d457a93820176ca33e71b23 | 116,646 |
def has_digits(s):
"""
This function checks whether a string
contains any digits
Arguments
s : string
Returns
(bool) True / False
"""
return len([char for char in s if char.isdigit()]) != 0 | bbf686f496e6dcfb4a768c43459317a4851a26db | 116,648 |
def isalnum( string, allowed_extra_chars='' ):
""" check if the given string only contains alpha-numeric characters + optionally allowed extra chars """
stripped_name = string.translate( None, allowed_extra_chars )
return stripped_name.isalnum() | a5152f82e80fe611f453cb2bbd9a96d1ec477135 | 116,651 |
def add_chromsize_colors(sizes_dict, color):
"""Add matplotlib color to chromsizes dict."""
for k in sizes_dict:
sizes_dict[k] = (sizes_dict[k], color)
return(sizes_dict) | eb3704eca215b1d2e8552e4d8bc822112b38e158 | 116,656 |
def _get_resource(instance_list, identity):
"""
Return instance UUID by name or ID, if found.
"""
for i in instance_list.items:
if identity in (i.properties.name, i.id):
return i
return None | 97af9437d80db377f6d45c604d9986766d4b59bd | 116,665 |
def merge(df_w_subset, *dfs):
"""Take dataframes containing relevant values (e.g. lat/lons, y, X matrices) and
return numpy arrays that are properly sorted to the index of the first input
dataframe.
Parameters
----------
df_w_subset : :class:`pandas.DataFrame`
The dataframe with the index that we are going to reindex all other dataframes
to before converting to numpy array
*dfs : :class:`pandas.DataFrame`
The other dataframes that will be sorted and returned as arrays
Returns
-------
list of :class:`numpy.ndarray`
The consistently sorted arrays, in the same order as the input DataFrames.
"""
return [df_w_subset.values] + [d.reindex(df_w_subset.index).values for d in dfs] | 85f153390a0c32a0f9888a2a34f5741d4c6db966 | 116,671 |
def points_with_error(dp):
"""Pull out all data points.
Args:
dp: list of dict, each giving information for a row
Returns:
tuple (list of true values, list of predicted values)
"""
true = []
pred = []
for p in dp:
true += [p['true_activity']]
pred += [p['predicted_activity']]
return (true, pred) | a9074cfdd8fbe03bb1da9bad2e91e61d5eb51c67 | 116,675 |
import re
def remove_URLs(section_content):
"""
Remove URL from the section content
:param section_content: content of a section
:return: content of a section without URLs
"""
# remove URL with regexp
section_content = re.sub(r'http\S+', '', section_content)
section_content = re.sub(r'www\S+', '', section_content)
section_content = re.sub(r'mailto\S+', '', section_content)
# remove multiple consecutive spaces
section_content = re.sub(' +', ' ', section_content)
return section_content | 55f5d31de89016181c6b684829508141bc4aa4e6 | 116,678 |
import torch
def _pairwise_distances(embeddings, squared=False):
"""Calculate pairwise distances of the given embeddings
Args:
embeddings (torch.tensor): Embeddings of shape (batch_size, embed_dim)
squared (bool): Squared euclidean distance matrix
Returns:
dists (torch.tensor): Pairwise distances of shape (batch_size, batch_size)
"""
dot_prod = torch.matmul(embeddings, embeddings.T)
sq_norm = dot_prod.diagonal(0)
dists = sq_norm.unsqueeze(0) - 2.0 * dot_prod + sq_norm.unsqueeze(1)
# Due to computation errors some dists may be negative so we make them 0.0
dists = torch.clamp(dists, min=0.0)
if not squared:
# Gradient of sqrt is infinite when dists are 0.0
mask = dists.eq(0.0).float()
dists = dists + mask * 1e-16
dists = (1.0 - mask) * torch.sqrt(dists)
return dists | f4d284c42d871f3e187846ccd7d1312456aeb451 | 116,679 |
def a2idx(a):
"""
Tries to convert "a" to an index, returns None on failure.
The result of a2idx() (if not None) can be safely used as an index to
arrays/matrices.
"""
if hasattr(a, "__int__"):
return int(a)
if hasattr(a, "__index__"):
return a.__index__() | a0d2216ee1599143daa24540d39cf9aba507aabb | 116,681 |
def crop(img, w, h):
"""
Extract part of size w x h from center of the given image
"""
w_, h_ = img.shape[0:2]
w_off, h_off = (w_ - w) // 2, (h_ - h) // 2
assert w_ >= w and h_ >= h, "cannot crop from {}x{} to {}x{}".format(w_, h_, w, h)
return img[w_off:w_off + w, h_off:h_off + h] | 8b3eb2c4057eaec7904e58e092bd9d0a7a736552 | 116,682 |
def can_host_services(servers: list, services: list) -> bool:
"""Checks if a set of servers have resources to host a group of services.
Args:
servers (list): List of edge servers.
services (list): List of services that we want to accommodate inside the servers.
Returns:
bool: Boolean expression that tells us whether the set of servers did manage or not to host the services.
"""
services_allocated = 0
# Sorting services by demand (in decreasing order) to avoid resource wastage
services = sorted(services, key=lambda service: -service.demand)
# Checking if all services could be hosted by the list of servers
for service in services:
# Sorting servers according to their demand (descending)
servers = sorted(servers, key=lambda sv: sv.capacity - sv.demand)
for server in servers:
if server.capacity >= server.demand + service.demand:
server.demand += service.demand
services_allocated += 1
break
# Recomputing servers' demand
for server in servers:
server.compute_demand()
return len(services) == services_allocated | a0158561d599e99af041988b42ec172654cb9f53 | 116,686 |
import functools
def spawning_callback(nursery, async_callback=None):
"""Return a new function that spawns *async_callback*.
This can be also used as a decorator::
@spawning_callback(nursery)
def on_click():
print("one")
await trio.sleep(1)
print("two")
button['command'] = on_click
Note that :class:`~AsyncButton` does this to its commands
automatically, so usually you don't need to use this with buttons.
"""
if async_callback is None:
return functools.partial(spawning_callback, nursery)
return functools.partial(nursery.spawn, async_callback) | d874c9fa96da57d1180b3b1573f66aebf2bf141a | 116,689 |
def clean_feature_names(df):
"""Function returns dataframe with spaces and newlines replaced by underscores in feature names"""
clean_feat = []
for f in df.columns:
clean_feat.append(((f.casefold()).replace("\n","_")).replace(" ","_"))
df=df.rename(columns=dict(zip(df.columns,clean_feat)))
return df | 49539847d90fd9493ef1e601b6e84f050bac9a2b | 116,693 |
import re
def load_file(file) -> list:
"""
Read a file containing a list of rectangle definitions, and return
a list with the rectangle specs. The ID is the array location.
:param file: Text file containing a rectangle definition on each
line
:return: List with parsed rectangle definition
"""
rectangles = list()
with open(file) as f:
for line in f:
rectangles.append(re.split('\D+', line)[1:-1])
for i in range(len(rectangles)):
for j in range(len(rectangles[i])):
rectangles[i][j] = int(rectangles[i][j])
return rectangles | 873a71c221b19030bf4690ec628f5d305147fcdb | 116,696 |
from typing import List
def preprocess_list_query(queries: List[str]):
"""Preprocesses a list of provided conditions into what the REST api expects (comma separated string)"""
# Note: could use resolve_multiple_args but the same comprehension works for tuples
return ",".join([q.strip() for q in queries]) | f80b86fd6c046f2557fffc859e81d1ea869c5bb5 | 116,702 |
from typing import List
import re
def get_names_and_values_of_lines(lines: List[str]) -> dict:
"""
All unnecessary code is deleted (annotations, doc).
Only the name of the variable and the value is extracted.
:param List[str] lines:
List of strings with lines from a modelica file.
:return:
dict: Containing the names as key and values as value.
Example:
>>> lines = ['parameter Boolean my_boolean=true "Some description"',
>>> 'parameter Real my_real=12.0 "Some description" annotation("Some annotation")']
>>> output = get_names_and_values_of_lines(lines=lines)
>>> print(output)
{'my_boolean': True, 'my_real': 12.0}
"""
res = {}
for line in lines:
line = line.replace(";", "")
# Check if line is a commented line and if so, skip the line:
if line.startswith("//"):
continue
# Remove part behind possible annotation:
loc = line.find("annotation")
if loc >= 0:
line = line[:loc]
# Remove possible brackets, like "param(min=0, start=5)
line = re.sub(r'[\(\[].*?[\)\]]', '', line)
# And now any quotes / doc / strings
line = re.sub(r'".*"', '', line)
# If a value is present (e.g. for parameters, one = sign is still present (always)
if line.find("=") >= 0:
name_str, val_str = line.split("=")
name_str = name_str.strip()
name = name_str.split(" ")[-1].replace(" ", "")
val_str_stripped = val_str.replace(" ", "")
if val_str_stripped in ["true", "false"]:
value = val_str_stripped == "true"
else:
try:
value = float(val_str_stripped)
except ValueError:
# Neither float, integer nor boolean, hence None
value = None
# else no value is stored in the line
else:
line = line.strip()
name = line.split(" ")[-1].replace(" ", "")
value = None
res.update({name: value})
return res | f4156ec326a7ba37a81c69fce0b037c8c8d2c249 | 116,704 |
def tfds_split_for_mode(mode):
"""Return the TFDS split to use for a given input dataset."""
if mode == 'test':
# The labels for the real ImageNet test set were never released. So we
# follow the standard (although admitted confusing) practice of obtain
# "test set" accuracy numbers from the ImageNet validation.
return 'validation'
elif mode == 'train':
return 'train'
elif mode == 'l2l_valid':
# To prevent overfitting to the test set, we use a held-out portion of the
# training set for model validation. We use the same number of examples here
# as we did for the TuNAS paper. However, the validation set contains
# different examples than it did in the paper, and we should expect small
# deviations from the reported results for this reason.
return 'train[:50046]'
elif mode == 'l2l_train':
return 'train[50046:]'
else:
raise ValueError('Invalid mode: {!r}'.format(mode)) | db0a27ffc820887c5a8810891687bca7f5c2850a | 116,709 |
import torch
def get_camera_wireframe(scale: float = 0.03):
"""
Returns a wireframe of a 3D line-plot of a camera symbol.
"""
a = 0.5 * torch.tensor([-2, 1.5, 4])
up1 = 0.5 * torch.tensor([0, 1.5, 4])
up2 = 0.5 * torch.tensor([0, 2, 4])
b = 0.5 * torch.tensor([2, 1.5, 4])
c = 0.5 * torch.tensor([-2, -1.5, 4])
d = 0.5 * torch.tensor([2, -1.5, 4])
C = torch.zeros(3)
F = torch.tensor([0, 0, 3])
camera_points = [a, up1, up2, up1, b, d, c, a, C, b, d, C, c, C, F]
lines = torch.stack([x.float() for x in camera_points]) * scale
return lines | fdef581adb034a67df3c9352111aeacb2372c659 | 116,712 |
import math
def make_ids(n, prefix='row_'):
"""
Return a list of ``n`` (integer) unique strings of the form
``prefix``<number>.
"""
k = int(math.log10(n)) + 1 # Number of digits for padding
return [prefix + '{num:0{pad}d}'.format(num=i, pad=k) for i in range(n)] | 2b9c38ded3b490be2e6afeffb6bbd6b6fa66a867 | 116,719 |
def matched_in_prev_blocks(gid, current_start, block_nodes):
"""
check whether gateway with gid is matched in previous block
:param gid:
:param current_start:
:param block_nodes:
:return:
"""
prev_nodes = set()
for prev_start, nodes in block_nodes.items():
if prev_start == current_start:
continue
prev_nodes.update(nodes)
return gid in prev_nodes | 119033c4910e86fad0e014b77bfa545df24f2bc9 | 116,724 |
def is_whitespace(ch):
"""Given a character, return true if it's an EDN whitespace character."""
return ch == "," or ch.isspace() | b50bf3835cb67cffd1c511e5ff235b901568efd2 | 116,725 |
import re
def scrape_console_db(html):
""" Scrapes DB connection count from Console page HTML """
# 1.34 database connections example:
# <li>DB:35/151</li>
# 1.36:
# DB: 33/1000
db_regex = r'DB:\s?(\d+)/(\d+)'
db_match = re.search(db_regex, html)
return {
'db-used': int(db_match.groups()[0]) if db_match else '',
'db-max': int(db_match.groups()[1]) if db_match else '',
} | 1bb25be926b4d40cb8acd9591dd5262594b4616e | 116,726 |
def call_object_method(obj, method_name, method_args):
"""
Dynamically call a method from a given object
@param obj: target object
@param method_name: name of a method to be called
@param method_args: dictionary of arguments to be passed to the target method
@return: returned value from calling the target method
"""
return getattr(obj, method_name)(**method_args) | fde11fedaed0785136abe75365cf7d05e89a38a5 | 116,730 |
from typing import Any
def _any2str(x: Any) -> str:
"""Simple string casting for the purpose of our dict2argterms function."""
if x is None:
return 'None'
if isinstance(x, str):
return "'" + str(x) + "'"
return str(x) | 16573e4c53735591f0e3b3d075167fddf1268251 | 116,733 |
def internal_cross(u, v):
"""Returns the cross product of two vectors. Should be identical to the
output of numpy.cross(u, v).
"""
return(u[1]*v[2] - v[1]*u[2],
u[2]*v[0] - v[2]*u[0],
u[0]*v[1] - v[0]*u[1]) | a6e07c592fb77d93bd5e48cd5360820c886942b6 | 116,739 |
def set_value(matrix: list, cell: tuple, value: str) -> list:
""" Changes the value at the x, y coordinates in the matrix """
row, col = cell
matrix[row][col] = value
return matrix | 44577e7810fc39737926741d1a645b4a50463f2b | 116,740 |
import yaml
def readConfigFile(filename):
"""
Read in YAML config file, this is also a wrapper
Inputs:
- str filename: Filename of YAML file
Outputs:
- dict config: Dictionary containing data
"""
try:
# Load entire config file into memory
with open(filename) as config_file:
config = yaml.load(config_file,Loader=yaml.FullLoader)
except:
print("Could not read in config file!")
raise
return config | 4fec90ce18a6f3ef9d590d6afb92dfe9661b0463 | 116,742 |
from typing import Dict
def headers_fixture() -> Dict:
"""Returns headers for a JSON Web Token (JWT)"""
return {
"kid": "5438ade6-064f-4851-9da8-df37a69d1b84",
"alg": "RS256",
} | c8bed1f5f43adc54d634395d80fb1e3ab5502f5b | 116,750 |
def is_contraction(text):
"""
Return True if given string is part of a contraction
"""
return text in ["ll", "ve"] | f31002a712d9b0355900a2abfc66acd5eeb76ac0 | 116,752 |
def toroman(number: int) -> str:
"""
Takes an integer number, returns a string with the roman numeral.
Parameters:
number (int): A positive integer less than 4000
Returns:
numeral (str): The roman numeral of the number.
"""
# Exception handling
if(number <= 0): raise ValueError("Number is less than 1")
if(number >= 4000): raise ValueError("Number is greater than 3999")
# Start of algorithm
# The algorithm starts with a long string of I's
# and replaces rows of smaller letters with one greater letter
# until the numeral is optimized
letters = "IVXLCDM"
# Create a long string of I's
numeral = letters[0]*number
for index, letter in enumerate(letters[:-1]):
# Letters which are related to 5 (V, L, and D)
if index % 2:
# Row of VV is replaced with X
numeral = numeral.replace(letter*2, letters[index + 1])
# Row of VIV is replaced with IX
numeral = numeral.replace(
letter+letters[index - 1]+letter, letters[index - 1] + letters[index + 1])
# Letters which are related to 10 (I, X, C and M)
else:
# Row of IIIII is replaced with V
numeral = numeral.replace(letter*5, letters[index + 1])
# Row of IIII is replaced with IV
numeral = numeral.replace(letter*4, letter + letters[index + 1])
return numeral | 755419a02f876ab8bb30c891e2c244bc8f7e6807 | 116,756 |
from datetime import datetime
def _parse_marathon_event_timestamp(timestamp):
""" Parse Marathon's ISO8601-like timestamps into a datetime. """
return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ') | 88386e604853d67faa4638157ca866732bd6a257 | 116,757 |
def get_ids(xml):
"""
input xml full name, return series id and file id
"""
series_id,xml_name = xml.split('-')
file_id,_ = xml_name.split('_')
return series_id,file_id | 081f4e6c0434a0bbb99d115d9e67684d3e34212f | 116,758 |
def add(x, y):
"""Add two numbers togather"""
return x+y | d5e2eb088fb3cae5fa5e243a727d2226daac6eec | 116,760 |
import fnmatch
def filter_match(patterns, parts):
"""Check if any of the given patterns match the split path"""
# Fast match - assumes that if the length of parts is
# larger than the length of the pattern, then it already matched
# previously
count = len(parts)
for pattern in patterns:
pattern_count = len(pattern)
if count <= pattern_count:
for i in range(count):
if not fnmatch.fnmatch(parts[i], pattern[i]):
return False
return True | 4dbc7fe51817db88b91e0c798db76f476c29896d | 116,761 |
def average_df(df, ch_inds):
"""Average dataframe across requested channel inds.
Parameters
----------
df : pandas.DataFrame
Container which holds ratio, channels, and peak values.
ch_inds : list of ints
Channels to run correlations over.
Returns
-------
avg_df : pandas.DataFrame
Dataframe, with data averaged across channels.
"""
# Select relevant rows from df
avg_df = df.loc[df['Chan_ID'].isin(ch_inds)]
# Average across selected channels per subject
avg_df = avg_df.groupby("Subj_ID").mean()
return avg_df | 6b63452c60bbc11874e0c1988ca9c97292f6f023 | 116,764 |
def get_book_rating(soup):
""" Return book rating """
return soup.find('span', attrs={'itemprop': 'ratingValue'}).get_text() | c1f41701d0b2140791db2739e36a09c8faabccbc | 116,766 |
import re
def parse_column_names(text):
"""
Extracts column names from a string containing quoted and comma separated
column names.
:param text: Line extracted from `COPY` statement containing quoted and
comma separated column names.
:type text: str
:return: Tuple containing just the column names.
:rtype: tuple[str]
"""
return tuple(
re.sub(r"^\"(.*)\"$", r"\1", column_name.strip())
for column_name in text.split(",")
) | 2dec7e011ca00e520cd3cc2b4d3a75ad53537b7e | 116,767 |
def mysql_select_one(conn, query, query_args=None):
"""
Fetches the first record matching the given query
:param conn: Mysql Connection
:param query: The SELECT query to run
:param query_args: Query arguments
:return: Result
"""
# Return first row of result object
cursor = conn.cursor(dictionary=True)
cursor.execute(query, query_args)
row = cursor.fetchone()
cursor.close()
return row | 439669ce354bf167920d9210127112c242add202 | 116,771 |
def remove_keys_recursively(obj, fields_to_remove):
"""Remove specified keys recursively from a python object (dict or list).
Args:
obj (dict/list): from where keys need to be removed.
fields_to_remove (list): fields to remove
Returns:
dict/list: Cleaned object
"""
if isinstance(obj, dict):
obj = {
key: remove_keys_recursively(value, fields_to_remove)
for key, value in obj.items()
if key not in fields_to_remove
}
elif isinstance(obj, list):
obj = [
remove_keys_recursively(item, fields_to_remove)
for item in obj
if item not in fields_to_remove
]
return obj | 3e61dc7745fe3e79561be325775e86a83a5b3254 | 116,774 |
import collections
def plant(ctx, packages, replace, **tend_args):
"""
Install package(s) to the garden.
The list of packages may take the form of paths to package
directories or name:path pairs. If a name is not provided for a
package, the package directory name is used instead.
"""
new = collections.OrderedDict(packages)
return ctx.obj["garden"].plant(new, replace=replace, **tend_args) | 5889828740785d13dc949078888fd6a8f2457492 | 116,776 |
def expectedPacketsPerSecond(ba7):
"""Return the expected number of packets per second we can get from this station.
Args:
ba7 (byte): Holds encoded information about the class of station this is. That directly
translates into the number of packets per second that get sent.
Returns:
int: Number of packets per second.
"""
tisbId = (ba7 & 0xF0) >> 4
if tisbId >= 13:
return 4
elif tisbId >= 10:
return 3
elif tisbId >= 5:
return 2
else:
return 1 | b0454cffffd272fe9b867c347e364b4bdc7164d5 | 116,780 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.