content stringlengths 42 6.51k |
|---|
def single_axes(axes):
"""
*axes* contains positive values, then it is the position
of this axis in the original matrix, otherwise it is -1
meaning this axis is an added single dimension to align
all the dimensions based on the einsum equation.
:param axes: axes described above
:return: list of integer in set `{1, 2}`, 1 for
a single axis, 2 otherwise
"""
if axes is None:
return axes
return [(1 if a == -1 else 2) for a in axes] |
def get_my_guess(num_people,
num_days=365):
"""
Compute my initial guess of the consecutive birthdays probability
"""
left_result = 1 - (num_people - 1) / float(2 * num_days + num_people - 1)
right_result = 1
for i in range(num_people):
right_result *= (num_days - 2 * i) / float(num_days)
guess = left_result * (1 - right_result)
print("Number of people: {}\tMy guess: {:2.5%}".format(num_people, guess))
return guess |
def variance(xl):
"""Return the variance of a list."""
mean = sum(xl) / len(xl)
return sum([(x - mean) ** 2 for x in xl]) / (len(xl) - 1) |
def attenuation(frequency, liquid_water_temperature, liquid_water_density):
"""
Calculate the specific attenuation due to cloud or fog.
:param frequency: The operating frequency (GHz).
:param liquid_water_temperature: The liquid water temperature (K).
:param liquid_water_density: The liquid water density (g/m^3)
:return: The specific attenuation (dB/km)
"""
# Calculate the relative water temperature
theta = 300 / liquid_water_temperature
# Calculate the principal and secondary relaxation frequencies
fp = 20.20 - 146. * (theta - 1.) + 316. * (theta - 1.)**2
fs = 39.8 * fp
# Preliminary calculations for the permittivity
eps_0 = 77.66 + 103.3 * (theta - 1.)
eps_1 = 0.0671 * eps_0
eps_2 = 3.52
# Calculate the complex permittivity
eps_p = (eps_0 - eps_1) / (1. + (frequency/fp)**2) + (eps_1 - eps_2) / (1. + (frequency/fs)**2)
eps_pp = frequency * (eps_0 - eps_1) / (fp * (1. + (frequency/fp)**2)) + \
frequency * (eps_1 - eps_2) / (fs * (1. + (frequency/fs)**2))
# Calculate the impedance
eta = (2. + eps_p) / eps_pp
# Calculate the specific impedance
k_l = 0.819 * frequency / (eps_pp * (1 + eta**2))
return k_l * liquid_water_density |
def epoch_to_steps(epoch: float, steps_per_epoch: int, min_epoch: float = 0.0) -> int:
"""
:param epoch: the (fractional) epoch to convert to the proper number of steps
:param steps_per_epoch: number of steps (batches) taken per epoch while training
:param min_epoch: if the epoch is less than this, will be set to it. Default 0
:return: the number of steps representing the epoch and state of the epoch
"""
if epoch < min_epoch:
epoch = min_epoch
return round(steps_per_epoch * epoch) |
def get_matching_shapes(source_shapes, target_shapes):
""" Returns the matching shapes
This Function will return a dict that contains the target matching shape
name from the source.
:param source_shapes: sources dictionary containing prefix-less shapes
:type source_shapes: dict
:param target: targets dictionary containing prefix-less shapes
:type target: dict
:return: The matching target shapes names
:rtype: dict
.. note:: This function is the core idea of how Flex finds matching shapes
from a source group to the target. Because Flex is not part of a
specific studio pipeline this matching is **shapes name based**.
Because some studios might bring the source scene into the rig
scene as a reference or as an import we cover those two cases.
Using this dict is the fastest way (so far found) to deal with
a huge amount of names. Finding the matching names on a scene
with more than 2000 shapes takes 0.0009... seconds.
"""
# returns matching shapes
return dict([(source_shapes[s], target_shapes[s])
for s in source_shapes
if s in target_shapes]) |
def generate_name(route, deployment):
"""
Generate the name for a route in a given deployment
:param route: the id of the route
:param deployment: the id of the route
:return: the unique name for the route
"""
return f"{route}_{deployment}" |
def figure_format(figure):
"""Takes a dollar figure and formats it with commas.
:param figure: The figure to be formatted
:return: The figure formatted with commas in appropriate places
:rtype: str
:raise ValueError: figure must contain a decimal to split str
:raise ValueError: figure must be at least 4 chars in 'D.CC' format
:raise AttributeError: figure must be a string
"""
if len(figure) < 4:
raise ValueError("figure must be at least 4 characters in "
"D.CC format")
dollars, cents = figure.split(".")
no_format = dollars
with_format = ""
while len(no_format) > 3:
no_format = no_format[:len(no_format) - 3] + "," +\
no_format[len(no_format) - 3:]
left, right = no_format.split(",")
if len(with_format) == 0:
with_format = right
else:
with_format = right + "," + with_format
no_format = left
if len(no_format) > 0 and len(with_format) == 0:
formatted_figure = no_format + "." + cents
elif len(no_format) > 0:
formatted_figure = no_format + "," + with_format + "." + cents
else:
formatted_figure = with_format + "." + cents
return formatted_figure |
def replace0(x):
"""
Replace any floats generated by pandas
"""
x = x.replace(".0\t", "\t")
x = x.replace(".0\n", "\n")
return(x) |
def _combine_ind_ranges(ind_ranges_to_merge):
"""
Utility function for subdivide
Function that combines overlapping integer ranges.
Example
[[1,2,3], [2,3], [3], [4,5], [5]] -> [[1,2,3], [4,5]]
"""
ind_ranges_to_merge = sorted(ind_ranges_to_merge)
stack = []
result = []
for curr in ind_ranges_to_merge:
if len(stack) == 0:
stack.append(curr)
elif stack[-1][-1] >= curr[0]:
prev = stack.pop()
merged = sorted(list(set(prev + curr)))
stack.append(merged)
else:
prev = stack.pop()
result.append(prev)
stack.append(curr)
result += stack
return result |
def control_input(options: int, input_ctl: int) -> bool:
"""Function to control input of user.
Args:
options: limit of input
input_ctl: input of user
Return: Bool
"""
if input_ctl is not None:
if isinstance(input_ctl, int):
if 0 < input_ctl <= 3:
return True
elif options + 1 < 3:
print("Please give a number between 0 and {}.".format(options))
else:
print("Please give a number between 0 and {}.".format(3))
else:
if input_ctl in options:
return True
else:
print("Please give a valid gate between {}".format(options))
return False |
def merge_datasets(num_evts_per_dataset):
""" Return dict `<merged_dataset> : list of <dataset>'
Associates all datasets in `num_evts_per_dataset' that belong by their
name to the same PD but to a different run era. For example:
isolated_mu_runa_v1, isolated_mu_runb_v1, isolated_mu_runc_v2 --> isolated_mu
The returned dict has as value a list of the merged datasets.
"""
datasets = num_evts_per_dataset.keys()
merged_datasets = {}
for dataset in datasets:
bare_name = dataset[0:dataset.find("run")].rstrip("_")
if bare_name in merged_datasets:
merged_datasets[bare_name].append(dataset)
else:
merged_datasets[bare_name] = [dataset]
return merged_datasets |
def action(function=None, *, permissions=None, description=None):
"""
Conveniently add attributes to an action function::
@admin.action(
permissions=['publish'],
description='Mark selected stories as published',
)
def make_published(self, request, queryset):
queryset.update(status='p')
This is equivalent to setting some attributes (with the original, longer
names) on the function directly::
def make_published(self, request, queryset):
queryset.update(status='p')
make_published.allowed_permissions = ['publish']
make_published.short_description = 'Mark selected stories as published'
"""
def decorator(func):
if permissions is not None:
func.allowed_permissions = permissions
if description is not None:
func.short_description = description
return func
if function is None:
return decorator
else:
return decorator(function) |
def linear(xs, slope, y0):
"""A simple linear function, applied element-wise.
Args:
xs (np.ndarray or float): Input(s) to the function.
slope (float): Slope of the line.
y0 (float): y-intercept of the line.
"""
ys = slope * xs + y0
return ys |
def valid_sample(sample):
"""Check whether a sample is valid.
sample: sample to be checked
"""
return (
sample is not None
and isinstance(sample, dict)
and len(list(sample.keys())) > 0
and not sample.get("__bad__", False)
) |
def clamp(value, minimum, maximum):
""" Reset value between minimum and maximum """
return max(min(value, maximum), minimum) |
def solution(A):
"""The solution function."""
num_of_A = len(A)
sum_of_A = 0 # <== The complete sum of elements of A.
sum_of_A_1 = 0 # <== The sum of elements of lower indices of A.
sum_of_A_2 = 0 # <== The sum of elements of higher indices of A.
# Calculating the complete sum of elements of A.
for i in range(0, num_of_A):
sum_of_A += A[i]
# Searching for the equilibrium index of A.
for i in range(0, num_of_A):
sum_of_A_2 = sum_of_A - sum_of_A_1
sum_of_A_2 -= A[i]
print("==> sum_of_A_1: " + str(sum_of_A_1)
+ " ==> sum_of_A_2: " + str(sum_of_A_2)
+ " ==> i: " + str(i)
+ " ==> A[" + str(i) + "]: " + str(A[i]))
if (sum_of_A_1 == sum_of_A_2):
return i # Okay, the equilibrium index found.
sum_of_A_1 += A[i]
return -1 |
def indentblock(text, spaces=0):
"""Indent multiple lines of text to the same level"""
text = text.splitlines() if hasattr(text, 'splitlines') else []
return '\n'.join([' ' * spaces + line for line in text]) |
def fillIdAppE(errMsg, errCode):
"""
Get error message
:param errMsg:
:param errCode:
:return: error message
"""
return "{}{:0>4d} {}".format("EACCAPP", errCode, errMsg) |
def isPostCSP(t, switch=961986575.):
"""
Given a GALEX time stamp, return TRUE if it corresponds to a "post-CSP"
eclipse. The actual CSP was on eclipse 37423, but the clock change
(which matters more for calibration purposes) occured on 38268
(t~=961986575.)
:param t: The time stamp to test.
:type t: float
:param switch: The GALEX time stamp that defines pre- and post-CSP.
:type switch: float
:returns: bool -- Does this time correspond to a post-CSP eclipse?
"""
# Check that the tscale has been applied properly
if not switch/100. < t < switch*100.:
raise ValueError('Did you apply tscale wrong?')
return t >= switch |
def sessions_with_product_views(total_sessions, sessions_with_product_views):
"""Return the percentage of sessions with product views during the period.
Args:
total_sessions (int): Total number of sessions within the period.
sessions_with_product_views (int): Total number of sessions with product views within the period.
Returns:
Percentage (float) of positive reviews received.
"""
return (sessions_with_product_views / total_sessions) * 100 |
def int2float_ensure_precision(value, scale):
"""Cast an int to a float with the given scale but ensure that the values (up to the scale) are correct.
eg. 42112588 with scale 4 should certainly render: 4211.2588 and not 4211.258799999999
"""
if scale == 0 or value == 0:
return value
# Add pow(10.0, -scale - 3) to ensure our smallest digit according to the
# scale is correct
return (value * pow(10.0, -scale)) + pow(10.0, -scale - 3) |
def compute_score_shift(nEM, nLM, nR, c, nEMX, nLMX):
"""
Compute constant shift for RF score as described in FastMulRFS paper
Parameters
----------
nEM : int
Number of edges in MUL-tree
nLM : int
Number of leaves in MUL-tree
nR : int
Number of edges in MUL-tree that induce invalid bipartitions
(i.e., edges split the label set into two non-disjoint sets)
c : int
Number of species with multiple copies in the MUL-tree
nEMX : int
Number of edges in preprocessed MUL-tree
nLMX : int
Number of leaves in preprocessed MUL-tree,
which is the same as the number of species
Returns
-------
Constant shift for RF score as described in FastMulRFS paper
"""
return nLMX + c + nEM - nEMX - (2 * nR) - nLM |
def site_confusion(y_true, y_pred, site_lists):
"""What proportion of misidentified species come from the same site?
Args:
y_true: string values of true labels
y_pred: string values or predicted labels
site_lists: list of site labels for each string label taxonID -> sites
Returns:
Within site confusion score
"""
within_site = 0
cross_site = 0
for index, value in enumerate(y_pred):
#If not correctly predicted
if not value == y_true[index]:
correct_sites = site_lists[y_true[index]]
incorrect_site = site_lists[y_pred[index]]
#Do they co-occur?
site_overlap = any([site in incorrect_site for site in correct_sites])
if site_overlap:
within_site +=1
else:
cross_site +=1
else:
pass
#don't divide by zero
if within_site + cross_site == 0:
return 0
#Get proportion of within site error
proportion_within = within_site/(within_site + cross_site)
return proportion_within |
def div(a, b):
"""Divide two values, ignoring None"""
if a is None:
if b is None:
return None
else:
return 1 / b
elif b is None:
return a
return a / b |
def get_traceback(exc=None):
"""
Returns the string with the traceback for the specifiec exc
object, or for the current exception exc is not specified.
"""
import io, traceback, sys # pylint: disable=multiple-imports
if exc is None:
exc = sys.exc_info()
if not exc:
return None
tb = exc[2]
sio = io.StringIO()
traceback.print_tb(tb, file=sio)
del tb # needs to be done explicitly see: http://docs.python.org/2/library/sys.html#sys.exc_info
return sio.getvalue() |
def getval(constraints, key, db):
"""
Get the value of a constraint
"""
value = constraints[key]
if callable(value):
# find param
param = [e for e in db.parameters if e['Name'] == key][0]
if 'Categories' not in param:
return ''
return [e for e in param['Categories'] if value(e)][0]
else:
return value |
def get_values(line):
"""
Returns the portion of an INSERT statement containing values
"""
return line.partition('` VALUES ')[2] |
def construct_bericht_sent_query(graph_uri, bericht_uri, verzonden):
"""
Construct a SPARQL query for marking a bericht as received by the other party (and thus 'sent' by us)
:param graph_uri: string
:param bericht_uri: URI of the bericht we would like to mark as sent.
:param verzonden: ISO-string representation of the datetetime when the message was sent
:returns: string containing SPARQL query
"""
q = """
PREFIX schema: <http://schema.org/>
INSERT {{
GRAPH <{0}> {{
<{1}> schema:dateReceived "{2}"^^xsd:dateTime.
}}
}}
WHERE {{
GRAPH <{0}> {{
<{1}> a schema:Message.
}}
}}
""".format(graph_uri, bericht_uri, verzonden)
return q |
def sort_lists(time_list,scale_factor_list,initial_rho_list):
"""
Takes the lists and sorts them based on the initial_b_list.
"""
RHO_MAP = {initial_rho_list[i] : (time_list[i],scale_factor_list[i])
for i in range(len(initial_rho_list))}
initial_rho_list.sort()
time_list = [RHO_MAP[rho][0] for rho in initial_rho_list]
scale_factor_list = [RHO_MAP[rho][1] for rho in initial_rho_list]
return time_list,scale_factor_list,initial_rho_list |
def format_bytes(num, suffix="B"):
"""
Format bytes as a human readable string.
Thanks to https://stackoverflow.com/a/1094933.
"""
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix) |
def match_label(_lbls: list, label: str, address: int) -> list:
"""
Given a label and an address, add it (if required) to the list of labels.
Parameters
----------
_lbls: list, mandatory
A list of the known labels and their addresses
label: str, mandatory
The potential new label
address: int, mandatory
The address of the potential new label
Returns
-------
label_address
The integer address of the label
Raises
------
N/A
Notes
-----
This will return -1 if the label is not found
"""
for _i in range(len(_lbls)): # noqa
if _lbls[_i]['label'] == label:
_lbls[_i]['address'] = address
return _lbls |
def preprocess_zenodo_metadata(raw_metadata: dict):
"""Method to pre-process metadata into the Zenodo format for upload."""
# If metadata comes from the UI, need to get some data into the right format.
# Creators must be a JSON array
# A list of dictionaries makes a JSON array type.
creators = []
if "author_details" in raw_metadata:
creator = {}
for author in raw_metadata["author_details"]:
creator["name"] = author[0]
creator["affiliation"] = author[1]
creator["orcid"] = author[2]
creators.append(creator)
raw_metadata["creators"] = creators
# Communities must also be a JSON array
if "communities" in raw_metadata:
communities = [{"identifier": (raw_metadata["communities"]).lower()}]
raw_metadata["communities"] = communities
# Keywords must be a list
if "keywords" in raw_metadata:
keywords = raw_metadata["keywords"].split(",")
keywords = [word.strip() for word in keywords]
raw_metadata["keywords"] = keywords
# Grants must be a JSON array
if "grants" in raw_metadata:
grants = [{'id': raw_metadata["grants"]}]
raw_metadata["grants"] = grants
return raw_metadata |
def encode_labels(label: str, label2id: dict) -> int:
""" Encodes labels with corresponding labels id. If relation is unknown, returns special id for unknown relations"""
return label2id.get(label, "unk") |
def update_orb_spec(spec, param):
"""
Update an orbital specification.
Enter: spec: the text specification.
param: a dictionary of parameters to update.
"""
lines = [line for line in spec.replace('\r', '\n').split('\n') if line.strip()]
out = []
for line in lines:
parts = line.split('#')[0].strip().split()
if len(parts) == 2 and parts[0] in param:
line = line[:line.index(parts[0]) + len(parts[0]) + 1] + str(param[parts[0]])
out.append(line)
out.append('')
return '\n'.join(out) |
def range_size(range_list):
"""
calculate total size of range_list
"""
r_s=0
for r in range_list:
r_s+= r[1]- r[0]
return r_s |
def lookup_object_type(rest_client, type_id):
"""
convert a resilient object_type_id into a label for use in api call for rule invocation
:param type: internal number of object
:return: object name or ValueError if not found
"""
lookup = ['', 'tasks', 'notes', 'milestones', 'artifacts', 'attachments', None, 'organizations']
if type_id <= len(lookup):
if lookup[type_id] is not None:
return lookup[type_id]
else:
# check to see if a datatable
url = "/types/{}".format(type_id)
resp = rest_client.get(url)
if resp['type_id'] == 8:
return "table_data"
raise ValueError("Rule type not supported") |
def encDec0(i):
"""Round to the nearest decade, decade starts with a '0'-ending year."""
return (i // 10) * 10 |
def amol(lst, **kwargs):
"""All Math On List; a=Add, s=Subtract, m=Multiply, d=Divide, p=To the power of"""
# Math Operator acting appon All values of a List
data = list(lst)
rng = range(len(data))
operators = kwargs.keys()
if 'a' in operators:#add
for i in rng:
data[i] += kwargs['a']
if 's' in operators:#subtract
for i in rng:
data[i] -= kwargs['s']
if 'm' in operators:#multiply
for i in rng:
data[i] *= kwargs['m']
if 'd' in operators:#divide
for i in rng:
data[i] /= kwargs['d']
if 'p' in operators:#power
for i in rng:
data[i] **= kwargs['p']
return tuple(data) |
def lyap_lrcf_solver_options(lradi_tol=1e-10,
lradi_maxiter=500,
lradi_shifts='projection_shifts',
projection_shifts_init_maxiter=20,
projection_shifts_init_seed=None,
projection_shifts_subspace_columns=6,
wachspress_large_ritz_num=50,
wachspress_small_ritz_num=25,
wachspress_tol=1e-10):
"""Return available Lyapunov solvers with default options.
Parameters
----------
lradi_tol
See :func:`solve_lyap_lrcf`.
lradi_maxiter
See :func:`solve_lyap_lrcf`.
lradi_shifts
See :func:`solve_lyap_lrcf`.
projection_shifts_init_maxiter
See :func:`projection_shifts_init`.
projection_shifts_init_seed
See :func:`projection_shifts_init`.
projection_shifts_subspace_columns
See :func:`projection_shifts`.
wachspress_large_ritz_num
See :func:`wachspress_shifts_init`.
wachspress_small_ritz_num
See :func:`wachspress_shifts_init`.
wachspress_tol
See :func:`wachspress_shifts_init`.
Returns
-------
A dict of available solvers with default solver options.
"""
return {'lradi': {'type': 'lradi',
'tol': lradi_tol,
'maxiter': lradi_maxiter,
'shifts': lradi_shifts,
'shift_options':
{'projection_shifts': {'type': 'projection_shifts',
'init_maxiter': projection_shifts_init_maxiter,
'init_seed': projection_shifts_init_seed,
'subspace_columns': projection_shifts_subspace_columns},
'wachspress_shifts': {'type': 'wachspress_shifts',
'large_ritz_num': wachspress_large_ritz_num,
'small_ritz_num': wachspress_small_ritz_num,
'tol': wachspress_tol}}}} |
def has_administrative_perm(user_level, obj, ctnr, action):
"""
Permissions for ctnrs or users
Not related to DNS or DHCP objects
"""
return {
'cyder_admin': action == 'view' or action =='update',
'admin': action == 'view' or action =='update',
'user': action == 'view',
'guest': action == 'view',
}.get(user_level, False) |
def second_smallest(numbers):
"""
Find second smallest number on a list
"""
m1, m2 = float('inf'), float('inf')
for x in numbers:
if x <= m1:
m1, m2 = x, m1
elif x < m2:
m2 = x
return m2 |
def pastis_matrix_measurements(nseg):
"""
Calculate the total number of measurements needed for a PASTIS matrix with nseg segments
:param nseg: int, total number of segments
:return: int, total number of measurements
"""
total_number = (nseg**2 + nseg) / 2
return int(total_number) |
def get_float_values(line):
"""
Parse csv string with float values to list of floats.
:param line: csv string, e.g., ".4,2,-3.4"
:return: List of floats. Empty list if parsing failed.
"""
result_list = []
yrp = line.split(",")
for x in yrp:
if x.strip() == "":
result_list += [0]
else:
try:
result_list += [float(x)]
except:
print("Could not parse line: {}", line)
return []
return result_list |
def slugify(error) -> str:
"""Replace newlines by space."""
return str(error).replace("\n", "") |
def get_otool_path(otool_line):
"""Parse path from a line from ``otool -L`` output.
This **assumes** the format, but does not check it.
Args:
otool_line (str): A dependency (or ``install_name``) from ``otool -L``
output. Expected to be of the form '\t{PATH} (compatibility ...)'.
Returns:
str: The ``PATH`` in the ``otool_line``.
"""
parts = otool_line.split()
return parts[0] |
def PolyCoefficients(xt, coeffs):
""" Returns a polynomial for ``x`` values for the ``coeffs`` provided.
The coefficients must be in ascending order (``x**0`` to ``x**o``).
"""
o = len(coeffs)
yt = 0
for i in range(o):
yt += coeffs[i] * xt ** i
return yt |
def sn(ok):
"""converts boolean value to +1 or -1
"""
if ok:return 1
else: return -1 |
def cm2inch(*tupl):
"""Convert input cm to inches
"""
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl) |
def factorial(n):
"""
Return factorial of n
>>> [factorial(n) for n in range(6)]
[1, 1, 2, 6, 24, 120]
"""
fact = 1
for i in range(1, n + 1):
fact = fact * i
return fact |
def is_cmake(file_path):
"""
:param file_path: the path of a file
:return: true if file with suffix .cmake
"""
return file_path.split(".")[-1] == "cmake" |
def is_macho(filename):
""" Check that a file is in the Mach-O format. """
with open(filename, "rb") as fp:
data = fp.read(2)
fp.close()
return data == '\xcf\xfa' |
def get_key(dict, key):
"""Trivial helper for the common case where you have a dictionary and want one value"""
return dict.get(key, None) |
def get_data_struct_name(sub):
###############################################################################
"""
>>> get_data_struct_name("my_sub_name")
'MySubNameData'
>>> get_data_struct_name("sub")
'SubData'
"""
return "".join([item.capitalize() for item in sub.split("_")]) + "Data" |
def screen_collection(src, cols, possibilities):
"""Return entries in a list of dicts where a set of fields match one of a set of possible values for those fields."""
rows = []
for row in src:
check = tuple([row[col] for col in cols])
if check in possibilities:
rows.append(row)
return rows |
def save_secret_file(secret, retrieved_file):
"""
Save the secret data from bunary to file.
"""
try:
with open(retrieved_file, 'wb') as f:
f.write(secret)
except TypeError:
with open(retrieved_file, 'wb') as f:
f.write(secret.encode())
return '{} saved with success'.format(retrieved_file) |
def collatzSeq(n, outputSeq = None):
"""
collatzSeq(int, list) -> list
accepts two inputs:
- n (required): the integer against which the conjecture is about to be tested
- outputSeq (optional): only used during ricursion to store the state of the current test
"""
if outputSeq is None:
outputSeq = []
outputSeq.append(int(n))
while (n != 1):
if (n % 2 == 0):
return collatzSeq(n / 2, outputSeq)
else:
return collatzSeq(n * 3 + 1, outputSeq)
return outputSeq |
def parse_params_int(params, p):
""" Get and parse an int value from request params. """
val = params.pop(p, None)
try:
return int(val, 10)
except ValueError:
return None |
def A2(x: int, y: int) -> dict:
"""Graph function using primitive output type."""
return {'a': x + y} |
def find_factorial(number: int) -> int:
"""Return factorial for specified number."""
if number > 0:
return number * find_factorial(number - 1)
elif number == 0:
return 1
else:
raise ValueError("Negative number") |
def factorial(num):
"""This is a recursive function that calls
itself to find the factorial of given number"""
if num == 1:
return num
else:
return num * factorial(num - 1) |
def get_vigra_feature_names(feature_names):
"""
For the given list of feature names, return the list of feature names to compute in vigra.
Basically, just remove prefixes and suffixes
For example: ['edge_vigra_mean', 'sp_vigra_quantiles_25'] -> ['mean', 'quantiles']
"""
feature_names = list(map(str.lower, feature_names))
# drop prefixes and quantile suffixes like '_25'
vigra_feature_names = [name.split('_')[2] for name in feature_names]
# drop duplicates (from multiple quantile selections)
return list(set(vigra_feature_names)) |
def compare(x, y):
"""Comparison helper function for multithresholding.
Gets two values and returns 1.0 if x>=y otherwise 0.0."""
if x >= y:
return 1.0
else:
return 0.0 |
def _class_required(type_, class_, params):
"""Return true if method requires a `cls` instance."""
if not params or class_ is None:
return False
return type_ == 'classmethod' |
def int8_from_byte(byte):
"""Convert one byte to signed integer."""
if byte > 127:
return (256 - byte) * (-1)
else:
return byte |
def parse_cmd(cmd):
"""
Split the cmd string. Delimiters are: space, simple and double quotes
"""
SINGLE_QUOTE = "'"
DOUBLE_QUOTE = "\""
ESPACE = " "
result = []
cache = ""
quote_context = None
collect_cache = False
for char in cmd:
if quote_context is None: # outside a quote context
if char in (SINGLE_QUOTE, DOUBLE_QUOTE):
quote_context = char
collect_cache = True
elif char == ESPACE:
collect_cache = True
else:
cache += char
else: # inside a quote context
if char == quote_context:
quote_context = None
collect_cache = True
else:
cache += char
# cache collection
if collect_cache:
collect_cache = False
if cache:
result.append(cache)
cache = ""
if cache:
result.append(cache)
return result |
def Like(field, value):
"""
A criterion used to search for objects having a text field's value like the specified `value`.
It's a wildcard operator when the searched value must specify asteriscs. For example:
* search for cases where title is like `*malspam*`
* search for observable where description contains the text `*malware*`
Arguments:
field (value): field name
value (Any): searched value
Returns:
dict: JSON repsentation of the criterion
```python
# Search for tasks where title contains 'Communication'
query = Like('title', '*Communication*')
```
produces
```json
{
"_like": {
"_field": "title",
"_value": "*Communication*"
}
}
```
!!! Note
If the `*` are not specified, the exact same text will be searched for.
`Like('title', 'MISP')` will search for titles equal to `'MISP'`
"""
return {'_like': {'_field': field, '_value': value}} |
def parse_package_status(release, package, status_text, filepath):
"""
parse ubuntu package status string format:
<status code> (<version/notes>)
:return: dict where
'status' : '<not-applicable | unknown | vulnerable | fixed>',
'fix-version' : '<version with issue fixed, if applicable>'
"""
# break out status code and detail
status_sections = status_text.strip().split(' ', 1)
code = status_sections[0].strip().lower()
detail = status_sections[1].strip('()') if len(status_sections) > 1 else None
status = 'unknown'
fix_version = None
if code == 'dne':
status = 'not-applicable'
elif code in ['ignored', 'pending', 'deferred', 'needed', 'needs-triage']:
status = 'vulnerable'
elif code == 'not-affected':
status = 'not-vulnerable'
elif code in ['released', 'released-esm']:
# if there isn't a release version, then just mark
# as vulnerable to test for package existence
if not detail:
status = 'vulnerable'
else:
status = 'fixed'
fix_version = detail
else:
print('Unsupported status "{0}" in {1}_{2} in "{3}". Setting to "unknown".'
.format(code, release, package, filepath))
result = {'status': status}
if fix_version is not None:
result['fix-version'] = fix_version
return result |
def check_input_structure_at_id(input_file_dict):
"""Check all input file strings and make sure they are @id format."""
all_inputs = []
error = ''
for an_input_arg in input_file_dict:
# skip the parameter key
if an_input_arg == 'additional_file_parameters':
continue
inputs = input_file_dict[an_input_arg]
# if the input is array
if isinstance(inputs, list) or isinstance(inputs, tuple):
all_inputs.extend(inputs)
else:
all_inputs.append(inputs)
for an_input in all_inputs:
if an_input.count('/') != 3:
error += an_input + ' '
if error:
error += 'files are not @ids, foursight needs update'
return [error, ]
else:
return [] |
def addText(text):
"""
Updates the textbox in the corner of the svg. Ugly but works for current situation
:param text: text needed to be updated
:return:
"""
return '<text xml:space="preserve" style="font-style:normal;font-weight:normal;font-size:9.75123596px;line-height:1.25;' \
'font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-' \
'width:0.02437809" x="314.22992" y="323.82281" id="text4456"><tspan sodipodi:role="line" id="tspan4454" x="314.22992" ' \
'y="323.82281" style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:' \
'5.85074186px;font-family:Arial;-inkscape-font-specification:Arial;stroke-width:0.02437809">' \
+ text + '</tspan></text>' |
def check_type_value(val, name, expected_type, allow_none=False, print_value=True, none_msg=''):
"""
Check if the given value is of expected type. And also check if the val is None.
:param val: the given value to check
:param name: name of val
:param expected_type: the expected type
:param allow_none: whether the val is allowed to be None
:param print_value: whether or not to print the value name in case of error
:param location: The location of the potential hdf5 value to check
:raise TypeError: if val is not of expected type
:raise ValueError: if val is None while not allow None
"""
message = name
if print_value:
message += ' of value ' + str(val)
if val is None and not allow_none:
raise ValueError(message + ' should not be None.' + none_msg)
if not isinstance(val, expected_type):
raise TypeError(message + ' should be of type ' + str(expected_type) + '.' + ' but is of type ' + type(val).__name__)
return val |
def sorted_string(string: str) -> str:
"""Returns string as sorted block.
Examples:
>>> assert sorted_string("21AxBz") == "xzAB12"
>>> assert sorted_string("abacad") == "abcd-a-a"
>>> assert sorted_string("") == ""
"""
blocks = [f"-{item}" for item in string if list(string).count(item) > 1]
sorted_array = sorted(
sorted(set(string), key=str.swapcase), key=str.isdigit
)
if blocks:
sorted_array.extend(blocks[1:])
return "".join(sorted_array) |
def bool_to_returncode(success: bool) -> int:
"""Return 0 if |success|. Otherwise return 1."""
if success:
print('Success.')
return 0
print('Failed.')
return 1 |
def _az_string(az):
"""Return an azimuth angle as compass direction.
>>> _az_string(0)
'N'
>>> _az_string(11)
'N'
>>> _az_string(12)
'NNE'
>>> _az_string(360 - 12)
'NNW'
>>> _az_string(360 - 11)
'N'
"""
assert 0.0 <= az <= 360
compass = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
assert len(compass) == 16
step = 360 / len(compass)
idx = int(round(az / step)) % len(compass)
assert 0 <= idx < len(compass)
return compass[idx] |
def sm_filter_opcodes(sm_opcodes, code='equal'):
"""Filter SequenceMatcher opcodes
Parameters
----------
sm_opcodes : sequence
The result of difflib.SequenceMatcher.get_opcodes()
code : string
The code to remove.
Returns
-------
result : The sequence with the specified operation removed.
"""
# Define the filter function
condition = lambda opcode: opcode[0] != code
# do the filtering
return list(filter(condition, sm_opcodes)) |
def decode_boolean_setting(setting):
"""
Decodes a boolean string of "True" or "False"
to the coorect boolean value.
"""
return setting == "True" |
def subclasses(cls, abstract=False, private=False):
"""Return the subclasses of class `cls` as a dict.
If abstract, include classes with abstract methods.
If private, include private classes.
"""
return {
sc.__name__: sc
for sc in cls.__subclasses__()
if (abstract or not sc.__abstractmethods__) and (
private or sc.__name__[0] != '_')
} |
def _request_check(input_json):
"""Check if the request json is valid"""
if input_json is None or not isinstance(input_json, dict):
return 'Can not parse the input json data - {}'.format(input_json)
try:
c = input_json['context']
qa = input_json['qas'][0]
qid = qa['qid']
q = qa['question']
except KeyError as e:
return 'Invalid request, key "{}" not found'.format(e)
return 'OK' |
def mind_your_PDQs(P=range(0,3), D=range(1,3), Q=range(0,3), s=None):
"""
pdqs = mind_your_PDQs()
pdqs['pdq']
pdq = pdqs['pdq']
"""
import itertools
pdqs = {}
if s is None:
pdqs['pdq'] = list(itertools.product(P,D,Q))
else:
pdqs['PDQs'] = list(itertools.product(P,D,Q,s))
return pdqs |
def relparse(myver):
"""Parses the last elements of a version number into a triplet, that can
later be compared:
>>> relparse('1.2_pre3')
[1.2, -2, 3.0]
>>> relparse('1.2b')
[1.2, 98, 0]
>>> relparse('1.2')
[1.2, 0, 0]
"""
number = 0
p1 = 0
p2 = 0
mynewver = myver.split('_')
if len(mynewver)==2:
# an _package_weights_
number = float(mynewver[0])
match = 0
for x in _package_ends_:
elen = len(x)
if mynewver[1][:elen] == x:
match = 1
p1 = _package_weights_[x]
try:
p2 = float(mynewver[1][elen:])
except:
p2 = 0
break
if not match:
# normal number or number with letter at end
divider = len(myver)-1
if myver[divider:] not in "1234567890":
# letter at end
p1 = ord(myver[divider:])
number = float(myver[0:divider])
else:
number = float(myver)
else:
# normal number or number with letter at end
divider = len(myver)-1
if myver[divider:] not in "1234567890":
#letter at end
p1 = ord(myver[divider:])
number = float(myver[0:divider])
else:
number = float(myver)
return [number,p1,p2] |
def correct_sentence(text: str) -> str:
"""
returns a corrected sentence which starts with a capital letter
and ends with a dot.
"""
# your code here
if text.endswith('.'):
return text.capitalize()
else:
return text.capitalize() + "." |
def mkdir_cmd(path):
"""Return mkdir command"""
return " ".join(["/bin/mkdir", "-p", path]) |
def is_tool(name):
"""Check whether `name` is on PATH."""
from distutils.spawn import find_executable
return find_executable(name) is not None |
def parameter_dict(nested_parameter_dict, choices_dict):
"""Non-nested parameter as a dictionary."""
return {
'key': 'key',
'type': 'Any',
'multi': False,
'display_name': 'display',
'optional': True,
'default': 'default',
'description': 'desc',
'choices': choices_dict,
'parameters': [nested_parameter_dict],
'nullable': False,
'maximum': 10,
'minimum': 1,
'regex': '.*',
'form_input_type': None
} |
def maximum(a, b, *others):
"""The maximum value of all arguments"""
return max(a, b, *others) |
def list_diff(list1, list2, identical=False):
"""
API to get the differece in 2 lists
:param list1:
:param list2:
:param identical:
:return:
"""
result = list()
for value in list1:
if identical:
if value in list2:
result.append(value)
else:
if value not in list2:
result.append(value)
return result |
def stripIfPrefixFromIfName(ifName):
"""Strip prerix from BDS interface name.
Args:
ifName (str): BDS interface name
Returns:
str: BDS interface name suffix
"""
ifName, _ , ifIndex = ifName.partition('-')
if ifIndex.count('/') in (2, 4):
return ifIndex
elif ifIndex.count('/') == 3:
return ifIndex[ifIndex.index('/') + 1:] |
def is_vararg(param_name):
# type: (str) -> bool
""" Determine if a parameter is named as a (internal) vararg.
:param param_name: String with a parameter name
:returns: True if the name has the form of an internal vararg name
"""
return param_name.startswith('*') |
def dedup(L):
"""
Given a list, deduplicate it
"""
if L:
L.sort()
last = L[-1]
for i in range(len(L)-2, -1, -1):
if last == L[i]:
del L[i]
else:
last = L[i]
return L |
def in_box(coords, box):
"""
Find if a coordinate tuple is inside a bounding box.
:param coords: Tuple containing latitude and longitude.
:param box: Two tuples, where first is the bottom left, and the second is the top right of the box.
:return: Boolean indicating if the coordinates are in the box.
"""
if box[0][0] < coords[0] < box[1][0] and box[1][1] < coords[1] < box[0][1]:
return True
return False |
def greet(name):
"""
function greet() inside dec4 sample
"""
print(f"Hello {name}")
return 42 |
def isIsomorphic(tree1, tree2):
"""Checks if two rooted binary trees (of type Node) are isomorphic."""
# Both roots are empty: trees isomorphic by def
if tree1 == None and tree2 == None:
return True
# Exactly one empty: trees can not be isomorphic
elif tree1 == None or tree2 == None:
return False
if tree1.label != tree2.label:
return False
isNonFlippedIsomorphic = (isIsomorphic(tree1.left,tree2.left) and
isIsomorphic(tree1.right,tree2.right))
isFlippedIsomorphic = (isIsomorphic(tree1.left,tree2.right) and
isIsomorphic(tree1.left,tree2.right))
return isNonFlippedIsomorphic or isFlippedIsomorphic |
def is_safe(board, row, col, size):
"""Check if it's safe to place a queen at board[x][y]"""
#check row on left side
for iy in range(col):
if board[row][iy] == 1:
return False
ix, iy = row, col
while ix >= 0 and iy >= 0:
if board[ix][iy] == 1:
return False
ix -= 1
iy -= 1
jx, jy = row, col
while jx < size and jy >= 0:
if board[jx][jy] == 1:
return False
jx += 1
jy -= 1
return True |
def cardlist_minus(cardlist1, cardlist2):
"""Subtract two cardlist dictionaries"""
for name, amount in cardlist2.items():
if name in cardlist1:
cardlist1[name] = cardlist1[name] - amount
if cardlist1[name] <= 0:
del cardlist1[name]
return cardlist1 |
def get_link_label_position(dictionary_key_string):
"""Get the position of a reference-style link label from a dictionary-key string, returning the position in list format.
The link label's position is stored with 3 comma-separated numbers that indicate a link label's *line number*, *left bracket index*, and *right bracket index*. The numbers are stored in the following way: ["1,2,3"]
The key is stored as a string instead of a list or a tuple because:
1. Python does not support lists as dictionary keys.
2. JSON does not support tuples as dictionary keys.
"""
link_label_position = [ int(list_item) for list_item in dictionary_key_string.split(',') ]
return link_label_position |
def get_many(d, required=None, optional=None, one_of=None):
"""Extract values from a dict for unpacking into simple variables.
``d`` is a dict.
``required`` is a list of keys that must be in the dict. The corresponding
values will be the first elements in the return list. Raise KeyError if any
of the keys are missing.
``optional`` is a list of optional keys. The corresponding values will be
appended to the return list, substituting None for missing keys.
``one_of`` is a list of alternative keys. Take the first key that exists
and append its value to the list. Raise KeyError if none of the keys exist.
This argument will append exactly one value if specified, or will do
nothing if not specified.
Example::
uid, action, limit, offset = get_many(request.params,
required=['uid', 'action'], optional=['limit', 'offset'])
Contributed by Shazow.
"""
r = []
if required:
for k in required:
r.append(d[k])
if optional:
for k in optional:
r.append(d.get(k))
if one_of:
for k in one_of:
if k in d:
r.append(d[k])
break
else:
raise KeyError("none of these keys found: %s" % one_of)
return r |
def fibonacci(n: int) -> int:
"""
:param n: the place in the Fibonacci sequence
:return: the nth number of the Fibonacci sequence
"""
if n < 2:
return n
return fibonacci(n - 2) + fibonacci(n - 1) |
def fsm_transition_hints(context):
"""
Displays hints about why a state transition might not be applicable for
this the model.
"""
original = context.get('original', None)
if not original:
return {}
model_admin = context.get('adminform').model_admin
return {
'transition_hints': model_admin.get_transition_hints(original)
} |
def relmatrix(f, val1, val2):
"""
A table (2d numpy array) obtained by applying function `f` to different combinations of
values from `val1` and `val2`
:param f: applied function
:param val1: row values
:param val2: col values
:return: numpy array -- the table
"""
res = [[''] + list(val2)]
for v1 in val1:
li = [v1]
for v2 in val2:
li.append(f(v1, v2))
res.append(li)
return res |
def clear_double_slashes(_str):
""" Recursive clear double slashes from str """
while _str.count("//"):
_str = _str.replace("//", "/")
return _str |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.