content
stringlengths 42
6.51k
|
|---|
def add_pads_if_necessary(s):
"""adding bits to make an integer number of 64-bit blocks
"""
number_of_vacancy = len(s) % 64
need_pads = number_of_vacancy > 0
if need_pads:
for i in range(64 - number_of_vacancy):
s.append(0)
return s
|
def stringDictToIntDict(dictionary):
"""
Converts dictionary keys into integers; non-integer keys won't be in
result.
:param dictionary:
:return:
"""
result = {}
for k in dictionary:
try:
result[int(k)] = dictionary[k]
except ValueError:
pass
return result
|
def rivers_with_station(stations):
"""Returns a set with the names of the rivers with monitoring stations"""
# Build empty set
rivers = set()
# Add the river of every station, and duplicates are removed automatically
for station in stations:
rivers.add(station.river)
return rivers
|
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
|
def github(path):
"""
:param path: relative (to the root) path of a file in the glottolog data repository
:return: URL to a file in Glottolog's data repository on GitHub
"""
return 'https://github.com/glottolog/glottolog/blob/master/{0}'.format(path)
|
def skip_nulls_rec(dict_obj):
"""
removes dict key/val pairs recursively where value is None,
not needed/wanted(?) in the JSON
:param o: needs to be dict
:return: the trimed dict, or exceptionally the value if it wasn't a dict
"""
if not isinstance(dict_obj, dict):
return dict_obj
else:
result = {}
for k, v in list(dict_obj.items()):
if v is None:
pass
else:
if isinstance(v, dict):
tmp = skip_nulls_rec(v)
result.update({k: tmp})
elif isinstance(v, list):
tmp = [skip_nulls_rec(o) for o in v]
result.update({k: tmp})
else:
result.update({k: v})
return result
|
def isEmbeddedInOtherArc(arc, arcs, startIndex=0, stopIndex=-1):
"""
Check whether an arc is embedded within another arc between two indices.
"""
isEmbedded = False
testArcs = []
for testArc in arcs:
if (testArc[0] >= startIndex
and testArc[-1] <= stopIndex
and testArc != arc):
testArcs.append(testArc)
for testArc in testArcs:
if arc[0] >= testArc[0] and arc[-1] <= testArc[-1]:
isEmbedded = True
return isEmbedded
|
def dmc_task2str(domain_name, task_name):
"""Convert domain_name and task_name to a string suitable for environment_kwargs"""
return '%s-%s-v0' % (domain_name, task_name)
|
def findprev(layer, cols):
"""For a particular column in a particular layer, find the next earlier
column in the layer that contains a node
"""
found = -1
pos = cols - 1
while (pos >= 0) and (found < 0):
if layer[pos] == 1:
found = pos
pos = pos - 1
return found
|
def format(value):
"""
_format_
format a value as python
keep parameters simple, trust python...
"""
if isinstance(value, (str, bytes)):
value = "\'%s\'" % value
return str(value)
|
def get_custom_db(firmware_version, _db):
"""Get db of device if yours doesn't exists."""
if _db:
if firmware_version in _db:
return _db[firmware_version]
return None
|
def getGamePgnUrl(game_id):
"""Returns lichess url for game with `game_id` id"""
URL_TEMPLATE = "https://en.lichess.org/game/export/{}.pgn"
return URL_TEMPLATE.format(game_id)
|
def revnum_to_revref(rev, old_marks):
"""Convert an hg revnum to a git-fast-import rev reference (an SHA1
or a mark)"""
return old_marks.get(rev) or b":%d" % (rev + 1)
|
def is_iterable(obj):
""" Return True if `obj` is iterable
"""
try:
iter(obj)
except TypeError:
return False
return True
|
def trsp(m):
"""Transpose of matrix"""
M = len(m[0])
n = len(m)
T = [n*[0] for i in range(M)]
for i in range(n):
for j in range(M):
T[j][i] = m[i][j]
return T
|
def parse_cmu_seg_line(line, prepend_reco_to_spk=False):
"""This line parses a 'line' from the CMU automatic segmentation for
recording.
The CMU segmentation has the following format:
<file> <channel> <speaker> <start-time> <end-time> <condition>
We force the channel to be 1 and take the file-id to be the recording-id.
"""
line = line.strip()
if len(line) == 0 or line[0:2] == ";;":
return None
parts = line.split()
# Actually a file, but we assuming 1-1 mapping to recording and force
# channel to be 1.
reco = parts[0]
# The channel ID is expected to be 1.
if parts[1] != "1":
raise TypeError("Invalid line {0}".format(line))
spk = parts[2]
start_time = float(parts[3])
end_time = float(parts[4])
if prepend_reco_to_spk:
spk = reco + '-' + spk
utt = "{spk}-{0:06d}-{1:06d}".format(int(start_time * 100),
int(end_time * 100), spk=spk)
else:
utt = "{spk}-{reco}-{0:06d}-{1:06d}".format(int(start_time * 100),
int(end_time * 100),
reco=reco, spk=spk)
segment_line = "{0} {1} {st:.3f} {end:.3f}".format(
utt, reco, st=start_time, end=end_time)
utt2spk_line = "{0} {1}".format(utt, spk)
return (segment_line, utt2spk_line)
|
def deps(value=None):
"""Returns empty for deps table requests since this app doesn't use them."""
del value # Unused.
return ''
|
def edges_to_rings(edges):
"""
Rings creation from pairs of edges.
:param edges: set of (i,j) pairs representing edges of the alpha-shape.
(i,j) are the indices in the points array
:return: closed rings created from the edges
"""
edges_list = list(edges)
rings = []
while len(edges_list):
edge = edges_list.pop(0)
ring = list(edge)
next_edge = [i for i, e in enumerate(edges_list) if e[0] == edge[1]]
while len(next_edge):
edge = edges_list.pop(next_edge[0])
ring.append(edge[1])
next_edge = [i for i, e in enumerate(edges_list) if e[0] == edge[1]]
rings.append(ring)
return rings
|
def get_slide_id(full_filename:str) -> str:
"""get slide id
Get slide id from the slideviewer full file name. The full_filename in
the slideview csv is of the format: year;HOBS_ID;slide_id.svs
for example: 2013;HobS13-283072057510;1435197.svs
Args:
full_filename (str): full filename of slide
Returns:
str: numeric slide id
"""
return full_filename.split(";")[-1].replace(".svs", "")
|
def count(s, *args):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return s.count(*args)
|
def checksum(byte_array):
"""
Checks whether a byte array has a valid checksum or not.
:param byte_array: of length 4 e.g. b'\x19\x10\x00\x29'
:return: True for valid checksum, False or failed communication
"""
if len(byte_array) == 4:
if (sum(byte_array[0:3]) % 256) == byte_array[3]:
return True
return False
|
def merge_next(letters: list, positions: list) -> list:
"""Given a list of letter positions, merge each letter with its next neighbor.
>>> merge_next(['a', 'b', 'o', 'v', 'o' ], [0, 2])
['ab', '', 'ov', '', 'o']
>>> # Note: because it operates on the original list passed in, the effect is not cummulative:
>>> merge_next(['a', 'b', 'o', 'v', 'o' ], [0, 2, 3])
['ab', '', 'ov', 'o', '']
"""
for pos in positions:
letters[pos] = letters[pos] + letters[pos + 1]
letters[pos + 1] = ""
return letters
|
def to_kebab(value: str) -> str:
""" snake_case to kebab-case """
try:
return value.replace('_', '-')
except Exception as e:
raise Exception(e)
|
def word_count(sentence):
"""Return JSON with words from sentence and count of they."""
result = {}
word = ""
for char in sentence.lower():
if char.isalnum():
word += char
else:
if len(word):
if result.get(word):
result[word] += 1
else:
result.update({word: 1})
word = ""
# to process last word
if result.get(word) and len(word):
result[word] += 1
elif len(word):
result.update({word: 1})
return result
|
def validate_fail_under(num_str):
"""Fail under value from args.
Should be under 100 as anything over 100 will be converted to 100.
Args:
num_str (str): string representation for integer.
Returns:
Any[float,int]: minimum of 100 or converted num_str
"""
try:
value = int(num_str)
except ValueError:
value = float(num_str)
if 1 <= value <= 100:
return value
raise ValueError(
"Not allowing docstring coverage below 1% to be tested. Should be between 1 and 100, not: {0}".format(
value
)
)
|
def remove_blank_lines(string):
""" Removes all blank lines in @string
-> #str without blank lines
"""
return "\n".join(line
for line in string.split("\n")
if len(line.strip()))
|
def pf_potential(phi):
""" Phase field potential. """
return 0.25*(1.-phi**2)**2
|
def get_possible_sgrp_suf(sgrp_nr):
"""
determine possible space group suffix. Multiple suffixes might be possible
for one space group due to different origin choice, unique axis, or choice
of the unit cell shape.
Parameters
----------
sgrp_nr : int
space group number
Returns
-------
str or list
either an empty string or a list of possible valid suffix strings
"""
sgrp_suf = ''
if sgrp_nr in [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]:
sgrp_suf = [':b', ':c']
elif sgrp_nr in [48, 50, 59, 68, 70, 85, 86, 88, 125, 126,
129, 130, 133, 134, 137, 138, 141, 142,
201, 203, 222, 224, 227, 228]:
sgrp_suf = [':1', ':2']
elif sgrp_nr in [146, 148, 155, 160, 161, 166, 167]:
sgrp_suf = [':H', ':R']
return sgrp_suf
|
def where_in(name, value, big_range):
"""Determines which bucket of big_range 'value' lies in."""
bottom = big_range[0]
top = big_range[1]
step = big_range[2]
i = 0
bot_range = bottom + i * step
while bot_range < top:
bot_range = bottom + i * step
top_range = bottom + (i + 1) * step
i += 1
if value >= bot_range and value < top_range:
tag = name + " {0}-{1}".format(bot_range, top_range)
return tag
|
def inverse_points(points):
"""
Generate the points for the inverse CDF of a distribution.
Takes the points for the CDF and transforms them into the points for
the inverse. Due to the discrete nature of the function, the x and
y coordinates must be re-paired such that the inverse function is
defined as above.
"""
inverse_points = []
next_y = 0
for x, y in points:
inverse_points.append((next_y, x))
next_y = y
return inverse_points
|
def introspection_email(request):
"""Returns the email to be returned by the introspection endpoint."""
return request.param if hasattr(request, 'param') else None
|
def merge_adjacent(numbers, indicator='..', base=0):
""" Merge adjacent numbers in an iterable of numbers.
Parameters:
numbers (list): List of integers or numeric strings.
indicator (str): Delimiter to indicate generated ranges.
base (int): Passed to the `int()` conversion when comparing numbers.
Return:
list of str: Condensed sequence with either ranges or isolated numbers.
"""
integers = list(sorted([(int("%s" % i, base), i) for i in numbers]))
idx = 0
result = []
while idx < len(numbers):
end = idx + 1
while end < len(numbers) and integers[end-1][0] == integers[end][0] - 1:
end += 1
result.append("%s%s%s" % (integers[idx][1], indicator, integers[end-1][1])
if end > idx + 1
else "%s" % integers[idx][1])
idx = end
return result
|
def _altitude_factor(alt_units_in: str, alt_units_out: str) -> float:
"""helper method for convert_altitude"""
factor = 1.0
# units to feet
if alt_units_in == 'm':
factor /= 0.3048
elif alt_units_in == 'ft':
pass
elif alt_units_in == 'kft':
factor *= 1000.
else:
raise RuntimeError('alt_units_in=%r is not valid; use [ft, m, kft]' % alt_units_in)
# ft to m
if alt_units_out == 'm':
factor *= 0.3048
elif alt_units_out == 'ft':
pass
elif alt_units_out == 'kft':
factor /= 1000.
else:
raise RuntimeError('alt_units_out=%r is not valid; use [ft, m, kft]' % alt_units_out)
return factor
|
def separate_bags(bags):
"""
Seperates the positive and negative bags
takes a list of bags as input and returns list of indices for positive and negative bags
pos, neg=separate_bags(bags)
"""
#random.shuffle(bags)
pos_bags=[]
neg_bags=[]
for ind in range (len(bags)):
if bags[ind].label==1.0:
pos_bags+=[ind]
else:
neg_bags+=[ind]
return pos_bags, neg_bags
|
def parse_line(line):
"""Takes a string of two comma seperated integers. Returns the integers to
the caller.
"""
x, n = line.strip().split(',')
return int(x), int(n)
|
def in_array(key: str, array: list):
"""Return True if given key exists in given array"""
if key in array:
return True
else:
return False
|
def CleanFloat(number, locale = 'en'):
"""\
Return number without decimal points if .0, otherwise with .x)
"""
try:
if number % 1 == 0:
return str(int(number))
else:
return str(float(number))
except:
return number
|
def capitalize_first_letter(string):
"""Capitalize first letter of words"""
return_string = ""
split_array = string.split(" ")
for s in split_array:
return_string = return_string + " " + s.capitalize()
return return_string.strip()
|
def objective_limit(energy, limit):
"""
The algorithm stops as soon as the current objective function value
is less or equal then limit.
"""
if energy <= limit :
return True
else :
return False
|
def reduce(f, seq):
""" takes a lambda function f and
runs thrue it with the given values list seq
"""
if not seq:
return 0
elif isinstance(seq[0], list):
return reduce(f, seq[0])
elif len(seq) == 1:
return seq[0]
else:
return f(seq[0], reduce(f, seq[1:]))
|
def update_dtypes_dict(dtypes_dict: dict) -> dict:
"""
Task to update dtypes_dictionary that will be stored in the schema. It's required due to workaround Pandas to_parquet bug connected with mixed dtypes in object
Args:
dtypes_dict (dict): Data types dictionary inferenced by Visions
Returns:
dtypes_dict_updated (dict): Data types dictionary updated to follow Pandas requeirments in to_parquet functionality.
"""
dtypes_dict_updated = {
k: ("String" if v == "Object" else str(v)) for k, v in dtypes_dict.items()
}
return dtypes_dict_updated
|
def _sbe(exp,synapses):
"""
Subcellular binary expression model for chemical synapses
Paramters:
----------
exp : Expression object
synapses: dict
Synapse data
"""
lus = {}
for cell in synapses:
lus[cell] = [0.,0.]
for cont in synapses[cell]:
adj = set(synapses[cell][cont]['neighbors'])
post = set(synapses[cell][cont]['partners'])
nonsyn = adj - post
for s in post:
lus[cell][0] += 1
for ns in nonsyn:
if ns not in exp.cells.keys(): continue
diff = exp.compute_difference(s,ns)
if diff < 1:
lus[cell][1] += 1
break
LUS = dict([(n,1 - lus[n][1]/lus[n][0]) for n in lus if lus[n][0] > 0])
return LUS
|
def pressure(v, t, n):
"""Hoddie"""
k = 1.38e-23 # boltzmann constant
return n * k * t / v
|
def check_accounts(msg: dict) -> int:
"""
Returns the number of accounts to
process.
The incoming msg is a string that contains the
Account Id, Groups and Account name
"""
accounts = 0
print(f"DEBUG --- check_account msg parameter {msg}")
if msg != "":
accounts = len(msg[0])
return accounts
|
def sortable_date(date_fr):
""" '23/12/1977' -> ('1977', '12', '23') """
return tuple(reversed(date_fr.split("/")))
|
def bestAlgorithm_(sequencingChemistries):
"""
Identify the (de novo) consensus algorithm we expect to deliver
the best results, given the sequencing chemistries represented in
an alignment file.
We key off the sequencing chemistries as follows:
- Just RS chemistry data? Then use quiver (at least for now, until
we get arrow > quiver on P6-C4)
- Else (either all Sequel data, or a mix of Sequel and RS data),
use arrow.
- Unknown chemistry found? Return None; we should abort if this is found
Note that the handling/rejection of chemistry mixtures (including
mixtures of Sequel and RS data) is left to the algorithm itself.
"""
if len(sequencingChemistries) == 0:
raise ValueError("sequencingChemistries must be nonempty list or set")
chems = set(sequencingChemistries)
anyUnknown = "unknown" in chems
allRS = all(not(chem.startswith("S/")) for chem in chems) and (not anyUnknown)
if anyUnknown:
return None
elif allRS:
return "quiver"
else:
return "arrow"
|
def _get_formatted_timestamp(app_type):
"""Different services required different date formats - return the proper format here"""
if app_type.startswith('duo'):
return 1505316432
if app_type.startswith('onelogin'):
return '2017-10-10T22:03:57Z'
if app_type.startswith('gsuite') or app_type == 'salesforce':
return '2017-06-17T15:39:18.460Z'
if app_type.startswith('box'):
return '2017-10-27T12:31:22-07:00'
if app_type == 'slack':
return 1422922593
if app_type == 'aliyun':
return '2018-07-23T15:42:11Z'
return 1234567890
|
def round_(value, digits=0):
"""Rounds a number to the nearest whole number."""
return round(value, digits)
|
def bubble_sort(a):
"""
Sorts the list 'a' using Bubble sort algorithm
>>> from pydsa import bubble_sort
>>> a = [3, 4, 2, 1, 12, 9]
>>> bubble_sort(a)
[1, 2, 3, 4, 9, 12]
"""
for k in range(len(a)):
flag = 0
for i in range(0, len(a)-k-1):
if(a[i] > a[i+1]):
a[i], a[i+1] = a[i+1], a[i]
flag = 1
if(flag == 0):
break
return a
|
def set_direction_of_pp(pp):
""" order direction of path so that we always go the same way and can define locations
order going clock wise from posterior so that left is ~25% and right is ~75% (top view)
"""
pplength = len(pp)
p25 = pp[int(0.25*pplength)]
p75 = pp[int(0.75*pplength)]
# check which is left and right = dim 0
if not p25[0] > p75[0]:
# reorder back to front
pp = [pp[-i] for i in range(1,len(pp)+1)]
return pp
|
def minThresholdClassify(sim_vec_dict, sim_thres):
"""Method to classify the given similarity vector dictionary with regard to
a given similarity threshold (in the range 0.0 to 1.0), where record pairs
that have all their similarities (of all attributes compared) with at
least this threshold are classified as matches and all others as
non-matches.
Parameter Description:
sim_vec_dict : Dictionary of record pairs with their identifiers as
as keys and their corresponding similarity vectors as
values.
sim_thres : The classification minimum similarity threshold.
"""
assert sim_thres >= 0.0 and sim_thres <= 1.0, sim_thres
print('Minimum similarity threshold based classification of ' + \
'%d record pairs' % (len(sim_vec_dict)))
print(' Classification similarity threshold: %.3f' % (sim_thres))
class_match_set = set()
class_nonmatch_set = set()
# Iterate over all record pairs
#
for (rec_id_tuple, sim_vec) in sim_vec_dict.items():
# Flag to check is all attribute similarities are high enough or not
#
record_pair_match = True
# check for all the compared attributes
#
for sim in sim_vec:
if sim < sim_thres: # Similarity is not enough
record_pair_match = False
break # No need to compare more similarities, speed-up the process
if (record_pair_match == True): # All similaries are high enough
class_match_set.add(rec_id_tuple)
else:
class_nonmatch_set.add(rec_id_tuple)
print(' Classified %d record pairs as matches and %d as non-matches' % \
(len(class_match_set), len(class_nonmatch_set)))
print('')
return class_match_set, class_nonmatch_set
|
def compute_FLOP(nl, nL, sl, ml):
"""
compute the flop of the model (operation complex)
:param nl: channel(filter) num of the input
:param nL: filter num
:param sl: kernel size
:param ml: output spacial size(length)
:return: the flop estimation
"""
return nl * nL * sl * sl * ml * ml
|
def flatten_list(alist, howdeep=1):
"""Flattens nested sequences."""
if howdeep > 0:
newlist = []
for nested in alist:
try:
newlist.extend(nested)
except TypeError:
newlist.append(nested)
howdeep -= 1
alist = flatten_list(newlist, howdeep)
return alist
|
def _left_parser(tab, i):
"""Helper function to build google observations
"""
coord = []
tab_ = tab[i]["left_team_positions"]
for list_ in tab_:
for value in list_:
coord.append(value)
return coord
|
def required_props(props):
"""Pull names of required props from the props object.
Parameters
----------
props: dict
Returns
-------
list
List of prop names (str) that are required for the Component
"""
return [prop_name for prop_name, prop in list(props.items()) if prop["required"]]
|
def bytesToStr(filename):
"""Return str for a bytes filename.
"""
return filename.decode("utf8", errors="backslashreplace")
|
def smart_split(item, split_key=':'):
"""
split string in first matching with key
:param item: string which contain field_name:value or field_name:[00:00:00 TO 01:00:00]
:param split_key: key, which we use to split string
:return:
"""
split_index = item.find(split_key)
return [item[0:split_index], item[split_index + 1:]]
|
def to_latex(var):
"""Returns a latex representation for a given variable string name.
Parameters
----------
var : string
One of the variable names used in the bicycleparameters package.
Returns
-------
latex : string
A string formatting for pretty LaTeX math print.
"""
latexMap = {'f': 'f',
'w': 'w',
'gamma': '\gamma',
'g': 'g',
'lcs': 'l_{cs}',
'hbb': 'h_{bb}',
'lsp': 'l_{sp}',
'lst': 'l_{st}',
'lamst': '\lambda_{st}',
'whb': 'w_{hb}',
'LhbF': 'l_{hbF}',
'LhbR': 'l_{hbR}',
'd': 'd',
'l': 'l',
'c': 'c',
'lam': '\lambda',
'xcl': 'x_{cl}',
'zcl': 'z_{cl}',
'ds1': 'd_{s1}',
'ds3': 'd_{s3}'}
try:
latex = latexMap[var]
except KeyError:
if var.startswith('alpha'):
latex = r'\alpha_{' + var[-2:] + '}'
elif var.startswith('a') and len(var) == 3:
latex = 'a_{' + var[-2:] + '}'
elif var.startswith('T'):
latex = 'T^' + var[1] + '_{' + var[-2:] + '}'
elif len(var) == 2:
latex = var[0] + '_' + var[1]
elif var.startswith('I'):
latex = var[0] + '_{' + var[1:] + '}'
else:
raise
return latex
|
def filter_queryset_real_organization(queryset, auto_generated: bool):
""" Filters a given REST framework queryset for real (not auto generated) organizations.
Only keeps organizations that are or not auto generated.
Args:
queryset: A queryset containing elements
auto_generated (bool): Whether the real or auto generated organizations shall be returned
Returns:
queryset: The given queryset which only contains matching elements
"""
if auto_generated is not None and isinstance(auto_generated, bool):
queryset = queryset.filter(
is_auto_generated=auto_generated
)
return queryset
|
def get_custom_endpoints(origin_endpoints, offset=0):
"""
origin_endpoint: ip:port
user_define_endpoint: ip:(port+offset)
"""
assert origin_endpoints != None
paddle_user_define_endpoints_list = []
for ip_port in origin_endpoints.split(","):
ip = ip_port.split(":")[0]
port = ip_port.split(":")[1]
new_port = int(port) + offset
paddle_user_define_endpoints_list.append(":".join((ip, str(new_port))))
paddle_user_define_endpoints = ",".join(paddle_user_define_endpoints_list)
return paddle_user_define_endpoints
|
def indent(text: str, spaces: int):
"""
Prepend every line of the specified text with a set number of spaces. Line endings are
preserved.
"""
prefix = " " * spaces
return "".join(prefix + t for t in text.splitlines(keepends=True))
|
def textToFloat(text, defaultFloat):
"""Converts text to a float by using an eval and returns the float.
If something goes wrong, returns defaultFloat.
"""
try:
returnFloat = float(text)
except Exception:
returnFloat = defaultFloat
return returnFloat
|
def split_python_text(text):
"""
splits first '#ifdef PYTHON_SETUP ... #endif python section from text.
Returns:
(pythontext, nonpythontext)
"""
s = text.split('#ifdef PYTHON_SETUP', 1)
if len(s) < 2:
return (None, text)
(beforep, afterp) = s
(pythontext, aftere) = afterp.split('\n#endif', 1)
nonpythontext = beforep + aftere
return (pythontext, nonpythontext)
|
def MakeArgs(l):
"""['-a', '', 'abc', ''] -> '-a abc'"""
return " ".join(filter(None, l))
|
def normalize_url(url):
"""Adds trailing slash if necessary."""
if not url.endswith('/'):
return '%(url)s/' % {'url': url}
else:
return url
|
def sum_of_n_even_nums(i):
"""Calculate sum of first i even numbers"""
count = i
num = 0
ans = 0
iterator = 1
while iterator <= count:
if num % 2 == 0:
ans = ans + num
iterator = iterator + 1
num = num + 1
else:
num = num + 1
return ans
|
def _is_valid_transformer(transformer_name):
"""Determine if transformer should be tested or not."""
return transformer_name != 'IdentityTransformer' and 'Dummy' not in transformer_name
|
def format_time(seconds):
"""Returns a short string human readable duration (5 chars)
Args:
seconds (float)
"""
if seconds < 1:
return f"{seconds:.3f}s"[1:] # e.g. .652s
if seconds < 10:
return f"{seconds:.2f}s" # e.g. 5.21s
if seconds < 100:
return f"{seconds:.1f}s" # e.g. 85.2s
if seconds < 180:
return f"{seconds:4.0f}s" # e.g. 152s
minutes = seconds // 60
seconds = seconds % 60
if minutes < 100:
return f"{minutes:2.0f}m{seconds:02.0f}"
return f"{minutes:4.0f}m"
|
def get_always_bytes(path: str) -> bytes:
"""
Returns bytes
"""
if path:
pass
return b"\x04\x00"
|
def most_frequent(data):
"""
determines the most frequently occurring string in the sequence.
"""
# your code here
print([(data.count(x),x) for x in data])
return max([(data.count(x),x) for x in data])[1]
|
def _coerce_field_name(field_name, field_index):
"""
Coerce a field_name (which may be a callable) to a string.
"""
if callable(field_name):
if field_name.__name__ == '<lambda>':
return 'lambda' + str(field_index)
return field_name.__name__
return field_name
|
def marital_status_from_string(str):
"""Convert marital status to one of ['single', 'partner', 'married', 'separated', 'widowed']."""
marital_status_dict = {
'Single': 'single',
'Significant other': 'partner',
'Life Partner': 'partner',
'Married': 'married',
'Divorced': 'separated',
'Legally Separated': 'separated',
'Separated': 'separated',
'Widowed': 'widowed'
}
return marital_status_dict.get(str, 'other')
|
def rfam_problems(status):
"""
Create a list of the names of all Rfam problems.
"""
ignore = {"has_issues", "messages", "has_issue", "id"}
problems = sorted(n for n, v in status.items() if v and n not in ignore)
return problems or ["none"]
|
def get_spc_info(spc_dct_i):
""" convert species dictionary to species_info array
"""
err_msg = ''
props = ['ich', 'chg', 'mul']
for i, prop in enumerate(props):
if prop in spc_dct_i:
props[i] = spc_dct_i[prop]
else:
err_msg = prop
if err_msg:
print('ERROR: No {} found'.format(err_msg))
return props
|
def _dict_clean(d):
""" Replace None with empty string in dict
Args:
d (dict): dictionary
Returns:
dict
"""
result = {}
for key, value in d.items():
if value is None:
value = ''
result[key] = value
return result
|
def _find_replacement(key, kwargs):
"""Finds a replacement for key that doesn't collide with anything in kwargs."""
key += '_'
while key in kwargs:
key += '_'
return key
|
def update_upper_bound(update_model, Ediffs, thresholds, upper_bound,
scaling_factor, upper_bound_limit):
"""Update upper bound
Args:
update_model (bool) : True : update model.
False: do not update model.
Ediffs (dict) : Differences of error
for increasing(or decreasing) upper bound of thresholds
thresholds (dict) : Pruning error thresholds
upper_bound (float) : Upper bound of 'thresholds'
scaling_factor (float) : Scaling factor for 'upper_bound'
upper_bound_limit (float): upper limit for 'upper_bound'
Returns:
upper_bound(float) : Updated upper bound of thresholds
"""
if update_model:
Qscale = {}
for k in Ediffs.keys():
Qscale[k] = 1 + Ediffs[k] / thresholds[k]
if Qscale[k] == 0:
Qscale[k] = 1
upper_bound *= max(scaling_factor, min(Qscale.values()))
upper_bound = min(upper_bound, upper_bound_limit)
else:
Qscale = {}
for k in Ediffs.keys():
Qscale[k] = 1 - Ediffs[k] / thresholds[k]
upper_bound *= min(1 / scaling_factor, max(Qscale.values()))
upper_bound = min(upper_bound, upper_bound_limit)
return upper_bound
|
def cluster_points(cluster_name: str) -> list:
"""
Return points composing the cluster, removing brackets and hyphen from cluster name,
e.g. ((a)-(b))-(c) becomes [a, b, c].
:param cluster_name: name of the cluster.
:return: points forming the cluster.
"""
return cluster_name.replace("(", "").replace(")", "").split("-")
|
def initialize_object_list(inp, cls):
""" Utility function to return list of objects from a valid input (a single object or list of objects where each object is of the class ``cls``).
If invalid input ``None`` is returned.
:param inp: Input.
:paramtype inp: (list, cls) or cls
:param cls: Class of each element of the list.
:paramtype cls: cls
:return: List of objects if valid input else None.
:rtype: list, cls or None
"""
obj_list = None
if inp is not None and isinstance(inp, list):
if all(isinstance(x, cls) for x in inp):
obj_list = inp
if inp is not None and isinstance(inp, cls): # make into list if not list
obj_list = [inp]
return obj_list
|
def queryBeforeTraverse(container, app_handle):
"""Find __before_traverse__ hook objects, given an 'app_handle'.
Returns a list of (priority, object) pairs."""
btr = getattr(container, '__before_traverse__', {})
objects = []
for k in btr.keys():
if k[1] == app_handle:
objects.append((k[0], btr[k]))
return objects
|
def get_attributes_threshold(alist, decreasing_factor, min_activity_count=1, max_activity_count=25):
"""
Get attributes cutting threshold
Parameters
----------
alist
Sorted attributes list
decreasing_factor
Decreasing factor of the algorithm
min_activity_count
Minimum number of activities to include
max_activity_count
Maximum number of activities to include
Returns
---------
threshold
Activities cutting threshold
"""
index = max(0, min(min_activity_count - 1, len(alist) - 1))
threshold = alist[index][1]
index = index + 1
for i in range(index, len(alist)):
value = alist[i][1]
if value > threshold * decreasing_factor:
threshold = value
if i >= max_activity_count:
break
return threshold
|
def square(x):
"""
funciton documentation
can we have your liver then?
"""
return x ** 2 # square
|
def numberOfSteps(num):
"""
:type num: int
:rtype: int
"""
res = num
count = 0
while num != 0:
if num % 2 == 0:
res = num / 2
count += 1
num = res
else:
res = num - 1
count += 1
num = res
return count
|
def _translate_ip_xml_json(ip):
"""
Convert the address version to int.
"""
ip = dict(ip)
version = ip.get('version')
if version:
ip['version'] = int(version)
if ip.get('type'):
ip['type'] = ip.get('type')
if ip.get('mac_addr'):
ip['mac_addr'] = ip.get('mac_addr')
return ip
|
def suma_total(monto=0):
""" Calcula la suma total """
calculo_suma = 20
calculo_suma += monto
return calculo_suma
|
def split_dataset_sizes(stream_list, split_sizes):
"""Splits with different sizes
Args:
stream_list (list): list of stream path
split_sizes (list): batch size per worker
"""
out = []
start = 0
total = sum(split_sizes)
for split_size in split_sizes[:-1]:
num = int(split_size / total * len(stream_list))
end = start + num
out.append(stream_list[start:end])
start = end
out.append(stream_list[start:])
return out
|
def stripDrive(path):
"""Strips off leading "drive:foo" specification from path, if present
"""
# strip any drives off the front of the filename
first, *rest = path.strip("/").split("/")
return "/".join([first.split(":").pop(), *rest])
|
def merge_dict(destination, source, path=None):
"""merges source into destination"""
if path is None:
path = []
for key in source:
if key in destination:
if isinstance(destination[key], dict) and isinstance(source[key], dict):
merge_dict(destination[key], source[key], path + [str(key)])
elif destination[key] == source[key]:
pass # same leaf value
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
destination[key] = source[key]
return destination
|
def get_dict_value(key, data):
"""Return data[key] with improved KeyError."""
try:
return data[key]
except (KeyError, TypeError):
raise KeyError("No key [%s] in [%s]" % (key, data))
|
def left_top_submatrices(left_top_tuple: tuple, n_half: int) -> tuple:
"""
Calculates the coordinates of the top left corner of the four submatrices
and returns these as four tuples of two coordinates
"""
left, top = left_top_tuple
M00 = (left, top)
M01 = (left + n_half, top)
M10 = (left, top + n_half)
M11 = (left + n_half, top + n_half)
return (M00, M01, M10, M11)
|
def match_fields(exp_fields, fields):
"""
Check field names and values match the expected ones.
- exp_fields:
A list of dictionaries with field name/value pairs.
- fields:
SPARKL event fields as returned by the listener.
[
{'attr': {'name':'n', 'value':3}},
{'attr': {'name': 'div', 'value':2}}]
"""
# Reformat event fields to contain only name and value. If value is not
# given (in case of FLAG fields), value is None.
fields = [{field['attr']['name']: field['attr']['value']}
if 'value' in field['attr']
else {field['attr']['name']: None}
for field in fields]
return exp_fields == fields
|
def parse_state_line(line):
"""
Parse a line from a Tor state line and return the data that we should plot in the histogram
For example if it's (CircuitBuildTimeBin 342 4) return (342, 342, 342, 342)
"""
items = line.split()
# We only use CircuitBuildTimeBin lines
if len(items) < 1:
return None
if items[0] == "CircuitBuildTimeBin":
value = int(items[1])
occurences = int(items[2])
elif items[0] == "CircuitBuildAbandonedCount":
value = float("NaN")
occurences = int(items[1])
else:
return None
return ([value] * occurences)
|
def mu_air(BP,RH,TC):
"""Returns the inverse 1/e penetration depth [mm-1] as a function of
barometric pressure (BP) in torr, relative humidity (RH) in %, and
temperature in Celsius. The expressions below were derived from equations
and tabulated data found in:
https://en.wikipedia.org/wiki/Antoine_equation
The gas transmission was calculated at 12 keV and 295 K using:
http://henke.lbl.gov/optical_constants/gastrn2.html
The temperature dependence of the returned absorption coefficient assumes
ideal gas behavior. The absorbance coefficient returned corresponds to 12 keV.
"""
A,B,C = 8.07131,1730.63,233.426
VP = (RH/100)*10**(A - B/(C+TC))
mu_a = 4.3803e-07 #(mm-1 torr-1 @ 12 keV)
mu_wv = 2.8543e-07 #(mm-1 torr-1 @ 12 keV)
return (BP-VP)*mu_a*295/(273+TC)+VP*mu_wv
|
def _diff(state_data, resource_object):
"""helper method to compare salt state info with the PagerDuty API json structure,
and determine if we need to update.
returns the dict to pass to the PD API to perform the update, or empty dict if no update.
"""
objects_differ = None
for k, v in state_data["service"].items():
if k == "escalation_policy_id":
resource_value = resource_object["escalation_policy"]["id"]
elif k == "service_key":
# service_key on create must 'foo' but the GET will return 'foo@bar.pagerduty.com'
resource_value = resource_object["service_key"]
if "@" in resource_value:
resource_value = resource_value[0 : resource_value.find("@")]
else:
resource_value = resource_object[k]
if v != resource_value:
objects_differ = "{0} {1} {2}".format(k, v, resource_value)
break
if objects_differ:
return state_data
else:
return {}
|
def create_text(text, hashtags):
"""Returns a solid string containing the entire text of the posting
Parameters:
text (string): text of your posting
hashtags (list): list of hashtags e.g. from get_random_hashtags()
Returns:
string that contains the posting
"""
output = text + '\n.\n.\n.\n.\n' + ' '.join(map(str, hashtags))
return output
|
def determine_sentiment(delta):
"""Returns 1 for positive sentiment, 0 otherwise"""
if delta > 0:
return 1
else:
return 0
|
def get_es_substitutions (num):
"""Return Spanish substitutions for plurals"""
subs = [{'n': '', 's': ''},
{'n':'n', 's':'s'}][(num > 1)]
subs['num'] = num
return subs
|
def create_filepath_template(intemp: str, output_1partition: bool):
"""Process the user input's template to a python string that allows us to pass variables' value
to get the correct file name.
There are three variables:
1. `{auto}`: an auto-incremental ID of the new partition
2. `{stem}`: the stem of current processing partition.
3. `{}` or `*`: will be `{auto}` if we are generating multiple partitions, otherwise `{stem}`
Args:
intemp (str): user input's template
output_1partition (bool): true if we are generate one partition
Returns:
str: the template that we can use the python string's format function to pass the variables' value
"""
if output_1partition:
default = "{stem}"
else:
default = "{auto:05d}"
intemp = intemp.replace("*", default)
intemp= intemp.replace("{}", default)
intemp = intemp.replace("{auto}", "{auto:05d}")
return intemp
|
def inverse_ip(ip_v4):
""" Inverse ip order a.b.c.d -> d.c.b.a """
tmp = ip_v4.split(".")
tmp.reverse()
return ".".join(tmp)
|
def to_bytes(value):
"""
Convert int to a byte
Args:
The int to convert
Return:
The byte value
Exception:
If value is not a byte
"""
if not isinstance(value, int):
raise TypeError('Value is type %s, but needs to be an int'
% type(value))
return bytes([value])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.