content stringlengths 42 6.51k |
|---|
def to_lists(scraped_data):
"""
Translate elements of scraped data (a list) from dicts to lists and return it. Convert 'research_fields' and 'research_objects' lists to ' :: '-delimited strings.
"""
delimiter = " :: "
return [[lab["number"], lab["certdate"], lab["org_name"], lab["org_address"], lab["lab_name"],
lab["lab_address"], lab["phone"], lab["cellphone"], lab["email"], lab["www"],
delimiter.join([rf for rf in lab["research_fields"]]),
delimiter.join([ro for ro in lab["research_objects"]])] for lab in scraped_data] |
def w_def(typ: str) -> dict:
"""
Make definition reference
"""
return {'#ref': f'#/definitions/{typ}'} |
def _get_team_together(postgame, de_data):
"""Get team together flag."""
if de_data is not None:
return not de_data.random_positions
if postgame is not None:
return not postgame.random_positions
return None |
def compliment_name(name, flag):
""" if the alignment is a reverse, add _comp to the end of its identification """
if flag == 16 or flag == 272:
return '%s_comp' % (name)
else:
return name |
def _normalise_options(options):
"""
Return a sequence of (value, label) pairs for all options where each option
can be a scalar value or a (value, label) tuple.
"""
out = []
if hasattr(options, '__call__'):
options = options()
for option in options:
if isinstance(option, tuple):
out.append( option )
else:
out.append( (option, str(option)) )
return out |
def ci2se(ci):
"""
Converts a tuple of (lower, upper) confidence interval bounds to standard error
"""
ci = sorted(ci)
return (ci[1] - ci[0]) / (2 * 1.96) |
def _extract_xy_from_gdcm_str_seg(lines, dicom_tags):
"""Get rows, columns from gdcmdump output."""
rows = 0
cols = 0
for line in lines:
line = line.lstrip()
tag = line.split(" ")[0]
if tag == dicom_tags["rows"]:
rows = line.split(" ")[2]
elif tag == dicom_tags["columns"]:
cols = line.split(" ")[2]
return int(rows), int(cols) |
def minimallyEqualXML(one, two, removeElements=()):
""" Strip all the whitespace out of two pieces of XML code, having first converted
them to a DOM as a minimal test of equivalence.
"""
from xml.dom import minidom
sf = lambda x: ''.join(filter(lambda y: y.strip(), x))
onedom = minidom.parseString(one)
twodom = minidom.parseString(two)
# a hugely hackish way to remove the elements, not full proof, but being
# a test situation, we should be providing very controlled input
for element in removeElements:
oneremovenodes = onedom.getElementsByTagName(element)
tworemovenodes = twodom.getElementsByTagName(element)
for removenode in oneremovenodes:
removenode.parentNode.removeChild(removenode)
for removenode in tworemovenodes:
removenode.parentNode.removeChild(removenode)
return sf(onedom.toxml()) == sf(twodom.toxml()) |
def do_intervals_intersect(a, b):
"""Returns true if the given 2-tuples overlap.
Args:
a: 2-tuple containing integer coordinates representing a half-open interval.
b: 2-tuple containing integer coordinates representing the other half-open interval.
Returns:
True if a and b overlap.
"""
return a[0] < b[1] and a[1] > b[0] |
def split_path_to_parts(path: str):
"""
Helper to separate paths into information pieces
:param path: the path of a java file including the file itself
:return: a tuple of path,file,package name derived from path and class name derived from file
"""
parts = path.split('/')
package = ".".join(parts[:-1])
path = "/".join(parts[:-1])
file_name = parts[-1]
classname = file_name.split(".")[0]
values = (path,file_name,package,classname)
return values |
def connectChunk(key, chunk):
"""
Parse Card Chunk Method
"""
upLinks = []
schunk = chunk[0].strip().split()
for idx in range(4, len(schunk)):
upLinks.append(schunk[idx])
result = {'link': schunk[1],
'downLink': schunk[2],
'numUpLinks': schunk[3],
'upLinks': upLinks}
return result |
def array_madness(arr1: list, arr2: list) -> bool:
""" This function returns True if the sum of the squares of each element in arr1 is strictly greater than the sum of the cubes of each element in arr2. """
if len(arr1) and len(arr2) >= 1:
return True if sum([i**2 for i in arr1]) > sum([i**3 for i in arr2]) else False
return False |
def merge_dict(*dicts):
"""Merge dicts and return a dictionary mapping key to list of values.
Order of the values corresponds to the order of the original dicts.
"""
ret = dict()
for dict_ in dicts:
for key, val in dict_.items():
ret.setdefault(key, []).append(val)
return ret |
def get_value(values, register):
"""Gets the value"""
if register >= 'a' and register <= 'z':
return values[register]
return int(register) |
def havrilliak_negami_conductivity(x, logtau0, de, a, b, einf):
"""2-d HNC: HNC(x, logtau0, de, a, b, einf)"""
return de / ( 1 + ( 1j * x * 10**logtau0 )**a )**b + einf / ( x * 8.854187817 * 10**(-12) ) |
def simplified(text, delete=""):
"""Returns text with multiple whitespace reduced to single spaces
Any characters in delete are excluded from the resultant string.
>>> simplified(" this and\\n that\\t too")
'this and that too'
>>> simplified(" Washington D.C.\\n")
'Washington D.C.'
>>> simplified(" Washington D.C.\\n", delete=",;:.")
'Washington DC'
>>> simplified(" disemvoweled ", delete="aeiou")
'dsmvwld'
"""
result = []
word = []
for char in text:
if char in delete:
continue
elif char.isspace():
if word:
result.append("".join(word))
word = []
else:
word.append(char)
if word:
result.append("".join(word))
return " ".join(result) |
def Snow(temp, rf, sf, wc_old, sp_old, tt, cfmax, cfr, cwh):
"""
========================================================
Snow(cfmax, temp, tt, cfr, cwh, rf, sf, wc_old, sp_old)
========================================================
Snow routine.
The snow pack consists of two states: Water Content (wc) and Snow Pack
(sp). The first corresponds to the liquid part of the water in the snow,
while the latter corresponds to the solid part. If the temperature is
higher than the melting point, the snow pack will melt and the solid snow
will become liquid. In the opposite case, the liquid part of the snow will
refreeze, and turn into solid. The water that cannot be stored by the solid
part of the snow pack will drain into the soil as part of infiltration.
All precipitation simulated to be snow, i.e. falling when the temperature
is bellow TT, is multiplied by a snowfall correction factor, SFCF [-].
Snowmelt is calculated with the degree-day method (cfmax). Meltwater and
rainfall is retained within the snowpack until it exceeds a certain
fraction, CWH [%] of the water equivalent of the snow
Liquid water within the snowpack refreezes using cfr
Parameters
----------
cfmax : float
Day degree factor
temp : float
Temperature [C]
tt : float
Temperature treshold for Melting [C]
cfr : float
Refreezing factor
cwh : float
Capacity for water holding in snow pack [%]
rf : float
Rainfall [mm]
sf : float
Snowfall [mm]
wc_old : float
Water content in previous state [mm]
sp_old : float
snow pack in previous state [mm]
Returns
-------
in : float
Infiltration [mm]
wc_new : float
Liquid Water content in the snow[mm]
sp_new : float
Snowpack in posterior state [mm]
"""
if temp > tt:# if temp > melting threshold
# then either some snow will melt or the entire snow will melt
if cfmax*(temp-tt) < sp_old+sf: #if amount of melted snow < the entire existing snow (previous amount+new)
melt = cfmax*(temp-tt)
else: #if amount of melted snow > the entire existing snow (previous amount+new)
melt = sp_old+sf # then the entire existing snow will melt (old snow pack + the current snowfall)
sp_new = sp_old + sf - melt
wc_int = wc_old + melt + rf
else: # if temp < melting threshold
#then either some water will freeze or all the water willfreeze
if cfr*cfmax*(tt-temp) < wc_old+rf: #then either some water will freeze or all the water willfreeze
refr = cfr*cfmax*(tt - temp) #cfmax*(ttm-temp) is the rate of melting of snow while cfr*cfmax*(ttm-temp) is the rate of freeze of melted water (rate of freezing > rate of melting)
else: # if the amount of frozen water > entire water available
refr = wc_old + rf
sp_new = sp_old + sf + refr
wc_int = wc_old - refr + rf
#
if wc_int > cwh*sp_new: # if water content > holding water capacity of the snow
inf = wc_int-cwh*sp_new #water content will infiltrate
wc_new = cwh*sp_new # and the capacity of snow of holding water will retained
else: # if water content < holding water capacity of the snow
inf = 0.0 # no infiltration
wc_new = wc_int
return inf, wc_new, sp_new |
def get_inv_otu_map(lines):
"""Map a cluster member to its GG ID"""
otu_map = {line[0]: line[1:] for line in lines}
inv_otu_map = {}
for gg_id, deblur_seqs in otu_map.items():
for seq in deblur_seqs:
inv_otu_map[seq] = gg_id
return inv_otu_map |
def tvm_callback_verilog_postproc(code):
"""Hook to inspect the verilog code before actually run it"""
print(code)
return code |
def del_chars_from_string(s, chars_to_del):
"""
Delete characters from list
:param s: string to clean
:param chars_to_del: characters to delete in string
"""
if type(chars_to_del) != "str":
for c in chars_to_del:
s = s.replace(c, "")
else:
s = s.replace(chars_to_del, "")
return s |
def config_prop_name(cls, name, value):
"""Configure name on property.
Attempts to configure the name of a property. If attribute value has
__config__ method will call it with attribute name.
Args:
cls: Class property will belong to.
name: Name of attribute.
value: Value of attribute.
Returns:
True if attribute was determined to be a property and was configured,
else False.
"""
if not isinstance(cls, type):
raise TypeError('Class must be a type')
if not isinstance(name, str):
raise TypeError('Name must be a string')
if not name:
raise ValueError('Name must be non-empty')
try:
config = value.__config__
except AttributeError:
return False
else:
config(cls, name)
return True |
def convertir(string):
"""
Definicion de la funcion convertir:
Funcion para convertir un string separado por comas
en elementos de una lista
Parametros
----------
string: string
String que contiene la palabra que tiene que convertirse
en una lista
Returns
------
li: string
String que contiene la palabra separada por comas
Ejemplo
-------
>>> df.PLUGTYPE = list(map(convertir, df.PLUGTYPE.values.tolist()))
"""
li = list(string.split(","))
return li |
def get_projects(slug):
"""Get project information from TimeSync"""
p_list = [
{
"uri": "https://code.osuosl.org/projects/ganeti-webmgr",
"name": "Ganeti Web Manager",
"slugs": [slug if slug else "gwm"],
"uuid": "a034806c-00db-4fe1-8de8-514575f31bfb",
"revision": 4,
"created_at": "2014-07-17",
"deleted_at": None,
"updated_at": "2014-07-20",
"users": {
"patcht": {
"member": True,
"spectator": False,
"manager": False
},
"tschuy": {
"member": True,
"spectator": True,
"manager": True
}
}
}
]
if not slug:
p_list.append(
{
"uri": "https://code.osuosl.org/projects/timesync",
"name": "TimeSync",
"slugs": ["timesync", "ts"],
"uuid": "a034806c-rrrr-bbbb-8de8-514575f31bfb",
"revision": 2,
"created_at": "2014-07-17",
"deleted_at": None,
"updated_at": "2014-07-20",
"users": {
"patcht": {
"member": True,
"spectator": False,
"manager": False
},
"mrsj": {
"member": True,
"spectator": True,
"manager": False
},
"tschuy": {
"member": True,
"spectator": True,
"manager": True
}
}
}
)
p_list.append(
{
"uri": "https://code.osuosl.org/projects/pymesync",
"name": "pymesync",
"slugs": ["pymesync", "ps"],
"uuid": "a034806c-ssss-cccc-8de8-514575f31bfb",
"revision": 1,
"created_at": "2014-07-17",
"deleted_at": None,
"updated_at": "2014-07-20",
"users": {
"patcht": {
"member": True,
"spectator": False,
"manager": False
},
"tschuy": {
"member": True,
"spectator": True,
"manager": False
},
"mrsj": {
"member": True,
"spectator": True,
"manager": True
},
"MaraJade": {
"member": True,
"spectator": False,
"manager": False
},
"thai": {
"member": True,
"spectator": False,
"manager": False
}
}
}
)
return p_list |
def back_to_tag(tag, attrs):
"""
recover tag from tag name and attributes.
"""
sol = '<' + tag
for (prop, val) in attrs:
sol += ' ' + prop + '="' + val + '"'
sol += '>'
return sol |
def _group_by_expression(potential_types):
"""Group a dictionary of potentials by their expression."""
expr_group = {}
for potential in potential_types:
potential_type = potential_types[potential]
atom_types_list = expr_group.get(str(potential_type.expression), [])
atom_types_list.append(potential_type)
expr_group[str(potential_type.expression)] = atom_types_list
return expr_group |
def reverse_class_def(class_def_dict):
"""Reverses a ClassDef dictionary."""
reverse = {}
for key in class_def_dict:
value = class_def_dict[key]
try:
reverse[value].add(key)
except KeyError:
reverse[value] = {key}
return reverse |
def hamming_distance(str1, str2):
"""
Calculate the hamming distance of the two strings
Args:
str1(string),str2(string): Strings to be used for finding the hamming distance
Returns:
int: Hamming Distance
"""
if len(str1) != len(str2):
return None
hamming_dist = 0
for i_char, _ in enumerate(str1):
if str1[i_char] != str2[i_char]:
hamming_dist += 1
return hamming_dist |
def _find_top_ten(current,candidate,size=10):
"""
Private: find top ten highest numbers
* current list
* new candidate
* (optional) size of list. default=10
Returns array with top
"""
if len(current) == 0:
current.append(candidate)
else:
if candidate > current[0]:
current.append(candidate)
current = sorted(current)
if len(current) > size:
del current[0]
return current |
def _assign_modality_from_estimate(mean_alpha, mean_beta):
"""
Given estimated alpha and beta parameters from an Markov Chain Monte Carlo
run, assign a modality.
"""
# check if one parameter is much larger than another, and that they're
# both larger than 1
if mean_alpha / mean_beta > 2 or mean_beta / mean_alpha > 2:
if mean_alpha > mean_beta:
return 'included'
else:
return 'excluded'
else:
if mean_alpha < .9 and mean_beta < .9:
return 'bimodal'
elif mean_alpha > 2 and mean_beta > 2:
return 'middle'
elif abs((mean_alpha + mean_beta) / 2 - 1) < 0.5:
return 'uniform'
else:
return None |
def nationality_normalizer(nationality: str) -> str:
""" Take a nationality string and return a normalized nationality.
E.g. Taiwan -> Taiwanese, R.O.C. -> Taiwanese """
nationality = str(nationality).lower()
if 'bangl'.lower() in nationality:
return 'Bangladeshi'
elif 'fili'.lower() in nationality:
return 'Filipino'
elif 'india'.lower() in nationality:
return 'Indian'
elif 'indo'.lower() in nationality:
return 'Indonesian'
elif 'mala'.lower() in nationality:
return 'Malaysian'
elif 'pak'.lower() in nationality:
return 'Pakistani'
elif 'roc'.lower() in nationality or 'r.o.c.'.lower() in nationality or 'taiw'.lower() in nationality or 'republic of ch'.lower() in nationality:
return 'Taiwanese'
# China must come after because first match wins, and 'china' would match 'Republic of China'
elif 'china'.lower() in nationality or 'chinese'.lower() in nationality:
return 'Chinese'
elif 'sing'.lower() in nationality:
return 'Singaporean'
elif 'viet'.lower() in nationality:
return 'Vietnamese'
elif 'usa'.lower() in nationality or 'america'.lower() in nationality:
return 'USA'
else:
return f'OTHER: {nationality}' |
def days_in_year_month(year: int, month: int) -> int:
"""Return the number of days in the given (year, month). The
month is usually 1-12, but can be 0 to indicate December of the previous
year, and 13 to indicate Jan of the following year.
"""
DAYS_IN_MONTH = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
is_leap = (year % 4 == 0) and ((year % 100 != 0) or (year % 400) == 0)
days = DAYS_IN_MONTH[(month - 1) % 12]
if month == 2:
days += is_leap
return days |
def dBoxId(boxId):
"""Return box id if valid, raise an exception in other case"""
if boxId >= 0:
return boxId
else:
raise ValueError(
'{} is not a valid Box Id, Box Id must be >= 0'.format(boxId)) |
def pathdata_first_point(path):
"""
Return the first (X,Y) point from an SVG path data string
Input: A path data string; the text of the 'd' attribute of an SVG path
Output: Two floats in a list representing the x and y coordinates of the first point
"""
# Path origin's default values are used to see if we have
# Written anything to the path_origin variable yet
MaxLength = len(path)
ix = 0
tempString = ''
x_val = ''
y_val = ''
# Check one char at a time
# until we have the moveTo Command
while ix < MaxLength:
if path[ix].upper() == 'M':
break
# Increment until we have M
ix = ix + 1
# Parse path until we reach a digit, decimal point or negative sign
while ix < MaxLength:
if(path[ix].isdigit()) or path[ix] == '.' or path[ix] == '-':
break
ix = ix + 1
# Add digits and decimal points to x_val
# Stop parsing when next character is neither a digit nor a decimal point
while ix < MaxLength:
if (path[ix].isdigit()):
tempString = tempString + path[ix]
x_val = float(tempString )
ix = ix + 1
# If next character is a decimal place, save the decimal and continue parsing
# This allows for paths without leading zeros to be parsed correctly
elif (path[ix] == '.' or path[ix] == '-'):
tempString = tempString + path[ix]
ix = ix + 1
else:
ix = ix + 1
break
# Reset tempString for y coordinate
tempString = ''
# Parse path until we reach a digit or decimal point
while ix < MaxLength:
if(path[ix].isdigit()) or path[ix] == '.' or path[ix] == '-':
break
ix = ix + 1
# Add digits and decimal points to y_val
# Stop parsin when next character is neither a digit nor a decimal point
while ix < MaxLength:
if (path[ix].isdigit() ):
tempString = tempString + path[ix]
y_val = float(tempString)
ix = ix + 1
# If next character is a decimal place, save the decimal and continue parsing
# This allows for paths without leading zeros to be parsed correctly
elif (path[ix] == '.' or path[ix] == '-'):
tempString = tempString + path[ix]
ix = ix + 1
else:
ix = ix + 1
break
return [x_val,y_val] |
def verify_filetype(filename: str, valid_filetypes=["jpeg", "png", "jpg"]) -> bool:
"""
Helper function which determines the filetype of
a file based on it's filename
valid filetypes = ["jpeg", "png", "jpg"]
Parameters:
filename (str)
Returns:
True if filetype is valid
"""
if filename.split(".")[-1] in valid_filetypes:
return True
else:
return False |
def ordinal(number):
"""Returns the string ordinal for the input number.
Algorithm from Gareth's solution at:
http://codegolf.stackexchange.com/questions/4707/outputting-ordinal-numbers-1st-2nd-3rd
"""
k = number % 10
return "{}{}".format(number,
"tsnrhtdd"[(number / 10 % 10 != 1) * (k < 4) * k::4]) |
def get_head(content):
""" xxx """
return_data = '<head>'+ content +'</head>'
return return_data |
def get_file_path(local_path):
"""Get file and dir given a full path
Note: only to be used internally right now
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class within device.py
path (str): path of a dir on switch
Returns:
list: list of files or sub-dirs in a given directory
[], if invalid path
Note:
Specific for Ansible module(s). Not to be called otherwise.
"""
location = local_path.split(':')[0]
rightmost = local_path.split(':')[-1]
if '/' in rightmost:
file_name = rightmost.split('/')[-1]
path_list = rightmost.split('/')[:-1]
path = location + ':' + '/'.join(path_list) + '/'
else:
file_name = rightmost
path = location + ':'
return file_name, path |
def group_nearby_indices(indices, max_gap=None, max_group_spread=None):
"""Return a list of groups of the different indices.
Indices are considered from smaller to larger and placed into groups
Parameters
----------
max_gap
Maximal allowed difference between two consecutive numbers of a group
max_group_spread
Maximal allowed difference between the smallest and largest elements
of a group.
"""
if len(indices) == 0:
return []
indices = sorted(indices)
current_group = [indices[0]]
groups = [current_group]
for ind in indices[1:]:
gap_small_enough = (max_gap is None) or (
ind - current_group[-1] < max_gap
)
spread_small_enough = (max_group_spread is None) or (
ind - current_group[0] < max_group_spread
)
if gap_small_enough and spread_small_enough:
current_group.append(ind)
else:
current_group = [ind]
groups.append(current_group)
return groups |
def palindrome(step):
"""
Turn sequential integers into a palindromic sequence (so look-ahead mapping is not a function, but requires state)
"""
return (5.0 - abs(float(step % 10) - 5.0)) / 10.0 |
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
for field in opts['fields']:
# type checking feels dirty, but it seems like the best way here
if type(field) == tuple:
field_names.extend(field)
else:
field_names.append(field)
return field_names |
def get_tot_pop(dwellings):
"""Get total population of all dwellings
Return
------
tot_pop : float or bool
If population is not provided, return `None`,
otherwise summed population of all dwellings
"""
tot_pop = 0
for dwelling in dwellings:
if dwelling.population is None:
return None
else:
tot_pop += dwelling.population
return tot_pop |
def bubble(arr):
"""
:param arr: an array to be sorted
:returns: sorted array
"""
n = len(arr)
# Traverse through all array elements
for i in range(n - 1):
# range(n) also work but outer loop will repeat
# one time more than needed.
# Last i elements are already in place
for j in range(0, n - i - 1):
# traverse the array from 0 to n-i-1
# Swap if the element found is greater
# than the next element
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
return arr |
def make_italic(text: str) -> str:
"""
Returns the text surrounded by *
"""
return "*" + text + "*" |
def get_module_short_name(klass):
"""
Return the short module name.
For example, full module name is `django.forms.fields` and
the short module name will be `fields`.
"""
return klass.__module__.rsplit('.', 1)[-1] |
def append(text, to_append):
"""Appends text to a title string in a way that respects tags."""
first_open = text.find("[")
if first_open == -1:
return text + to_append
else:
return "%s%s %s" % (text[:first_open].strip(),
to_append,
text[first_open:]) |
def embeddedness(target_list, compared_list):
"""
Measure the embedddedness of one list within another; embeddedness of A in B = #(A int B)/#A
:param target_list: The target list
:param compared_list: The list to be compared with
:return: Embeddedness score
"""
intersection = [e for e in target_list if e in compared_list]
return len(intersection)/len(target_list) |
def list_split(lst, sections):
"""
Splits a list into N sections. From https://stackoverflow.com/a/2135920.
Examples:
>>> list_split(list(range(10)), 3)
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> list_split(list(range(20)), 4)
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19]]
"""
k, m = divmod(len(lst), sections)
return [
lst[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(sections)
] |
def list_to_string(lst):
""" Convert [1.0, 2, 0] ==> '1.0 2 0' """
return " ".join([str(v) for v in lst]) |
def substitute_all_by_empty(qry_string, qry_subs):
"""Substitute all occurrences of specific string sequences by an empty sequence.
Args:
qry_string: A string containing sequences to be replaced.
qry_subs: A list of strings containing sequences to be replaced.
Returns:
The qry_string where all sequences defined in qry_subs are deleted.
"""
tmp_str = qry_string
for sub in qry_subs:
tmp_str = tmp_str.replace(sub, '')
return tmp_str |
def remote_branch(stdin_first_line):
"""
Reads the name of the remote git branch from runtime parameters.
In the pre-push.py hook the name of the remote branch is passed as the $1 parameter.
:param stdin_first_line the first line of the standard input
>>> remote_branch("refs/heads/master a9d45baccd631601087a75a6605909c16bbfdbca refs/heads/master 67b6dc7a5e256ae590d305a766e627258b164899")
'master'
>>> remote_branch("refs/heads/master a9d45baccd631601087a75a6605909c16bbfdbca refs/heads/hot-fix 67b6dc7a5e256ae590d305a766e627258b164899")
'hot-fix'
"""
stdin_parts = stdin_first_line.split(" ")
remote = stdin_parts[2]
remote_parts = remote.split("/")
return remote_parts[-1] |
def mass_funct(pb, x):
"""
mass_funct(pb, x):
Return the mass function of an orbit given the following:
'pb' is the binary period in days.
'x' is the projected semi-major axis in lt-sec.
"""
pbs = pb * 86400.0
return 8015123.37129 * x**3.0 / (pbs * pbs) |
def initial_investment(pv_size, battery_size, n_batteries = 1, capex_pv = 900, capex_batt = 509):
"""Compute initial investment"""
return pv_size*capex_pv + battery_size*capex_batt*n_batteries |
def roll_right(sequence, alleles, ref_pos, bound):
"""Determines common distance all alleles can be rolled (circularly permuted) right
within the reference sequence without altering it.
Args:
sequence (str): The reference sequence.
alleles (list of str): The sequences to be normalized.
ref_pos (int): The start index for rolling.
bound (int): The upper bound index in the reference sequence for normalization, hence also for rolling.
Returns:
int: The distance that the alleles can be rolled
"""
# circularly permute sequence d steps, using modulo arithmetic
lens = [len(a) for a in alleles]
d = 0
max_d = bound - ref_pos
while (d <= max_d and not any(a and a[d % lens[i]] != sequence[ref_pos + d] for i, a in enumerate(alleles))):
d += 1
return d |
def hasPTC(sequence):
"""
Determines whether a PTC exits in a sequence
Arguments:
sequence (str): IMGT gapped sequence in frame 1.
Returns:
int: negative if not PTCs, position of PTC if found.
"""
ptcs = ("TAA", "TGA", "TAG", "TRA", "TRG", "TAR", "TGR", "TRR")
for i in range(0, len(sequence), 3):
if sequence[i:(i+3)] in ptcs:
return i
return -1 |
def yte_syntax(file):
"""
Return the full name of a YouTube Editor syntax based on the short name.
"""
return "Packages/YouTubeEditor/resources/syntax/%s.sublime-syntax" % file |
def string_in_file(file_name, string_to_search):
""" Check if any line in the file contains given string """
with open(file_name, 'r') as read_obj:
for line in read_obj:
if string_to_search in line:
return True
return False |
def delete_repeated_lines(log):
"""Lines have format [step, [formula], value, clause, on_steps]"""
log2 = list()
# Start backwards
for index1 in range(len(log)-1, 0, -1):
# Then check forward until that index for the same formula
for index2 in range(index1):
if log[index1][1] == log[index2][1]:
# If this happens then change the occurrences of index1 + 1 in on_steps for index2 + 1
# Also subtract 1 from every on_steps greater than index1 + 1
# And finally delete the line
for index3 in range(index1 + 1, len(log)):
# Change every on_step that uses the repeated step for the previous one
for index4 in range(len(log[index3][4])):
if log[index3][4][index4] == index1 + 1:
log[index3][4].remove(index1 + 1)
log[index3][4].append(index2 + 1)
log[index3][4].sort()
# If it is greater than the repeated line, subtract one
elif log[index3][4][index4] > index1 + 1:
log[index3][4][index4] -= 1
# Subtract 1 to every step for the following ones
log[index3][0] -= 1
del log[index1]
break
return log |
def parse_mimetype(mimetype):
"""Parses a MIME type into its components.
:param str mimetype: MIME type
:returns: 4 element tuple for MIME type, subtype, suffix and parameters
:rtype: tuple
Example:
>>> parse_mimetype('text/html; charset=utf-8')
('text', 'html', '', {'charset': 'utf-8'})
"""
if not mimetype:
return '', '', '', {}
parts = mimetype.split(';')
params = []
for item in parts[1:]:
if not item:
continue
key, value = item.split('=', 1) if '=' in item else (item, '')
params.append((key.lower().strip(), value.strip(' "')))
params = dict(params)
fulltype = parts[0].strip().lower()
if fulltype == '*':
fulltype = '*/*'
mtype, stype = fulltype.split('/', 1) \
if '/' in fulltype else (fulltype, '')
stype, suffix = stype.split('+', 1) if '+' in stype else (stype, '')
return mtype, stype, suffix, params |
def vprint(verbose: bool):
"""
Utility function for optional printing.
Parameters
----------
verbose: bool
If True, returns print function, else False
Returns
-------
callable
"""
return print if verbose else lambda *a, **k: None |
def yields_from_leung_nomoto_2018_table10(feh):
"""
Supernova data source: Leung & Nomoto, 2018, ApJ, Volume 861, Issue 2, Id 143, Table 10/11
The seven datasets are provided for Z/Zsun values of 0, 0.1, 0.5, 1, 2, 3 and 5.
Using Zsun = 0.0169 the corresponding FeH values are -1, -0.301, 0.0, 0.301, 0.4771 and 0.69897.
We use seven intervals delimited by midpoints of those values.
"""
if feh <= -1.65:
return [0.0, 5.48e-4, 1.3e-11, 2.15e-9, 3.46e-2, 1.63e-4, 2.50e-3, 1.72e-1, 1.14e-1, 2.55e-2, 7.57e-1]
elif -1.65 < feh <= -0.65:
return [0.0, 5.44e-4, 1.54e-12, 4.34e-10, 3.81e-2, 1.63e-4, 1.84e-3, 1.79e-1, 1.12e-1, 2.24e-2, 7.60e-1]
elif -0.65 < feh <= -0.15:
return [0.0, 5.88e-4, 3.24e-12, 2.94e-10, 4.85e-2, 6.58e-4, 1.69e-3, 2.30e-1, 1.14e-1, 1.84e-2, 7.20e-1]
elif -0.15 < feh <= 0.15:
return [0.0, 5.82e-4, 6.45e-12, 3.69e-10, 4.90e-2, 6.56e-4, 1.22e-3, 2.8e-1, 1.9e-1, 1.59e-2, 6.81e-1]
elif 0.15 < feh <= 0.39:
return [0.0, 5.71e-4, 1.62e-11, 5.52e-10, 4.94e-2, 6.46e-4, 8.41e-4, 2.13e-1, 9.81e-2, 1.26e-2, 6.44e-1]
elif 0.39 < feh <= 0.59:
return [0.0, 5.47e-4, 5.54e-11, 9.20e-10, 6.23e-2, 6.82e-4, 7.57e-4, 2.21e-1, 9.27e-2, 1.11e-2, 5.87e-1]
elif 0.59 <= feh:
return [0.0, 5.36e-4, 8.29e-11, 7.60e-10, 7.54e-2, 2.81e-4, 8.39e-4, 2.25e-1, 8.00e-2, 8.93e-3, 4.99e-1] |
def validate_file(rules: dict, fname: dict):
"""
Validates files by BIDS-compatible identifiers
Parameters
----------
rules : dict
Dictionary with keys of BIDS-recognized key and their accepted values.
fname : str
File to validate.
"""
valid = []
for key, value in rules.items():
if f"{key}-{value}" in fname:
valid.append(True)
else:
valid.append(False)
return all(valid) |
def propsample(freqs, num):
"""Proportionally samples from the given frequencies.
Returns a list of same length with the number of times each index should be
sampled such that the total number of elements sampled is `num`.
"""
lens = [int(f*num)+1 for f in freqs]
total = 0
for i, l in enumerate(lens):
if l+total > num:
lens[i] = num-total
total += lens[i]
return lens |
def map_clone(function, xs):
"""
Sends every element xs to the function and returns a processed list.
"""
map_cloned_lst = []
for elem in xs:
map_cloned_lst += [function(elem)]
return map_cloned_lst |
def prep_querystring( get_params ):
""" Makes querystring from params.
Called by handle_bad_params() """
if get_params:
querystring = '?%s' % get_params.urlencode() # get_params is a django QueryDict object, which has a urlencode() method! yay!
else:
querystring = ''
return querystring |
def get_mtl_tile_file_name(secondary=False):
"""Convenience function to grab the name of the MTL tile file.
Parameters
----------
secondary : :class:`bool`, optional, defaults to ``False``
If ``True`` return the name of the MTL tile file for secondary
targets instead of the standard, primary MTL tile file.
Returns
-------
:class:`str`
The name of the MTL tile file.
"""
fn = "mtl-done-tiles.ecsv"
if secondary:
fn = "scnd-mtl-done-tiles.ecsv"
return fn |
def coalesce_repeated_switches(cmd):
"""Combines known repeated command line switches.
Repetition of a switch notably happens when both per-test switches and the
additional driver flags specify different --enable-features. For instance:
--enable-features=X --enable-features=Y
Conceptually, this indicates to enable features X and Y. However
Chrome's command line parsing only applies the last seen switch, resulting
in only feature Y being enabled.
To solve this, transform it to:
--enable-features=X,Y
"""
def parse_csv_switch(prefix, switch, values_set):
"""If |switch| starts with |prefix|, parses it as a comma-separated
list of values and adds them all to |values_set|. Returns False if the
switch was not a match for |prefix|."""
if not switch.startswith(prefix):
return False
values = switch[len(prefix):].split(',')
for value in values:
values_set.add(value)
return True
def add_csv_switch(prefix, values_set, result):
if len(values_set) == 0:
return
sorted_values = sorted(list(values_set))
result.append('%s%s' % (prefix, ','.join(sorted_values)))
result = []
ENABLE_FEATURES_FLAG = '--enable-features='
DISABLE_FEATURES_FLAG = '--disable-features='
enabled_set = set()
disabled_set = set()
for switch in cmd:
if parse_csv_switch(ENABLE_FEATURES_FLAG, switch, enabled_set):
continue
if parse_csv_switch(DISABLE_FEATURES_FLAG, switch, disabled_set):
continue
result.append(switch)
# Append any coalesced (comma separated) flags to the end.
add_csv_switch(ENABLE_FEATURES_FLAG, enabled_set, result)
add_csv_switch(DISABLE_FEATURES_FLAG, disabled_set, result)
return result |
def get_name(parameters):
#{{{
"""
Generate a model name from its parameters.
"""
l = []
for k, v in parameters.items():
if type(v) is str and "/" in v:
l.append((k, v[::-1][:v[::-1].index('/')][::-1]))
else:
l.append((k, v))
name = ",".join(["%s=%s" % (k, str(v).replace(',', '')) for k, v in l])
return "".join(i for i in name if i not in "\/:*?<>|") |
def uses_all(word, required):
"""Checks if the word uses all the required letters."""
for letter in required:
if letter not in word:
return False
return True |
def extract_risk(risk_assessment):
"""Extracts risk from a RiskAssessment resource."""
prediction = risk_assessment['prediction']
return prediction[0]['qualitativeRisk']['coding'][0]['code'] |
def parse_section(fin):
""" Parse a section from the tssv report
NOTE: this moves the file pointer forwards
"""
section = list()
# Get the header
header = next(fin).strip('\n').split('\t')
# Parse the other files
for line in fin:
# End of section
if not line.strip('\n'):
break
# Extract the data for an allele
allele = {k:v for k, v in zip(header, line.strip('\n').split('\t'))}
# The allele field can be "A", or "A(1.0)". So sometimes, we need to
# clean the allele
al = allele['allele']
try:
i = al.index('(')
allele['allele'] = al[:i]
except ValueError:
pass
# Convert all counts to int
for key in allele:
try:
allele[key] = int(allele[key])
except ValueError: # not an int
pass
section.append(allele)
return section |
def human_readable(bytes):
"""Return a human-readable representation of the input bytes."""
for n, label in enumerate(['bytes', 'KiB', 'MiB', 'GiB', 'TiB']):
value = bytes / (1024 ** n)
if value < 1024:
return f'{round(value, 2)} {label}'
else:
continue |
def nearest(items, pivot):
"""Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot`
"""
return min(items, key=lambda x: abs(x - pivot)) |
def make_string_pep440_compatible(raw_str):
"""
pep440 only allows a subset of characters in the
version name:
- alphanumeric
- dots
this will restrict the string to that set.
"""
final_chars = []
for r in raw_str:
if "a" <= r <= "z" or "A" <= r <= "Z" or "0" <= r <= "9" or r == ".":
final_chars.append(r)
return "".join(final_chars) |
def parse_url_name_args(string):
"""
Parse and return url_name and kwargs as a tuple from the node's
url_name parameter (which can be just the url_name or additionally define
some kwargs)
Example: node['url_name'] = 'url_name|kwarg1:value,kwarg2:value'
"""
chunks = string.split('|')
url_name = chunks[0]
kwargs = {}
if len(chunks) > 1:
for pair in chunks[1].split(','):
k, v = pair.strip().split(':')
k = k.strip()
v = v.strip()
if k and v:
kwargs[k] = v
return (url_name, kwargs) |
def pascal(n):
"""Prints out n rows of Pascal's triangle.
It returns False for failure and True for success."""
row = [1]
k = [0]
for x in range(max(n,0)):
print(row)
row=[l+r for l,r in zip(row+k,k+row)]
return n>=1 |
def prefix_path(prefix, path):
""""Return True if prefix is a parent directory of path.
Assume that prefix and path are strings."""
return prefix == path or (prefix + '/' == path[:len(prefix) + 1]) |
def _list_separators_in_xmlformat(separators, indent=''):
"""Generates XML encoding of a list of list separators.
Args:
separators: A list of list separators. Usually, this should be a
string whose characters are the valid list separators, e.g., ','
means that both comma (',') and space (' ') are valid list
separators.
indent: A string that is added at the beginning of each generated
XML element.
Returns:
A string.
"""
result = ''
separators = list(separators)
separators.sort()
for sep_char in separators:
result += ('%s<list_separator>%s</list_separator>\n' %
(indent, repr(sep_char)))
return result |
def how_many_5(numbers):
"""Returns number of numbers greater than 5."""
# Modify example to take argument that specifies threshold
return sum( 1 for number in numbers if number > 5 ) |
def _get_gid(name):
"""Returns a gid, given a group name."""
if name is None:
return None
try:
from grp import getgrnam
except ImportError:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None |
def dont_give_me_five(start, end):
"""
Start number and the end number of a region and should return the count of all numbers except numbers with a 5 in
it. The start and the end number are both inclusive!
:param start: starting integer for range.
:param end: ending integer for range.
:return: the amount of numbers within the range without a 5 in it.
"""
return len([x for x in range(start, end+1) if "5" not in str(x)]) |
def collapse_list(items):
"""Given an ordered list of numbers, returns the ranges of
present items. For example, given a list [1,2,3,5,6,7,10], it
would return the string '1-3, 5-7, 10'."""
result = ""
previtem = items[0]
sequence = False
for i in items:
if sequence:
if not i == previtem + 1:
sequence = False
result += "-" + str(previtem) + ", " + str(i)
else:
if i == previtem + 1:
sequence = True
else:
result += ", " + str(i)
previtem = i
if sequence:
result += "-" + str(previtem)
result = result[2:]
return result |
def sort_by_name(seq):
"""Returns a copy of the input sequence sorted by name."""
return sorted(seq, key=lambda x: x['name']) |
def parse_boolean(value):
"""
Parses strings into booleans using the following mapping (case-sensitive):
'true' => True
'false' => False
'1' => True
'0' => False
"""
if value in ["true", "1"]:
return True
elif value in ["false", "0"]:
return False
else:
raise ValueError("expected 'true' or 'false', got '%s'" % value) |
def calc_years(seconds: int) -> int:
"""
Calculate years
:param seconds:
:return:
"""
return seconds // (60 * 60 * 24 * 365) |
def changeCourses(previousMessage, currentMessage):
"""
Determine if one of observed currency has changed..
:param previousMessage (str) : previous message for twitter
:param currentMessage (str) : current message for twitter
:return change (bool) : change currencies ?
"""
return bool(previousMessage!=currentMessage) |
def urljoin(url, suffix=""):
"""
Will join url and its suffix
Example:
"https://google.com/", "/" => "https://google.com/"
"https://google.com", "/" => "https://google.com/"
"https://google.com", "api" => "https://google.com/api"
"https://google.com", "/api" => "https://google.com/api"
"https://google.com/", "api" => "https://google.com/api"
"https://google.com/", "/api" => "https://google.com/api"
:type url: ``string``
:param url: URL string (required)
:type suffix: ``string``
:param suffix: the second part of the url
:rtype: ``string``
:return: Full joined url
"""
if url[-1:] != "/":
url = url + "/"
if suffix.startswith("/"):
suffix = suffix[1:]
return url + suffix
return url + suffix |
def key_der_to_list(key_data):
"""
Read AES key from der format
:param key_data: Input Key data
:return: key as list
"""
key_list = list()
for i in key_data:
key_list.append(ord(chr(i)))
return key_list |
def str_xor(a, b):
"""
(string, string) -> string
xor two strings(a and b) of different lengths
>>> str_xor("string", "integer")
'\x1a\x1a\x06\x0c\t\x02'
>>> str_xor("hello", "world")
'\x1f\n\x1e\x00\x0b'
>>> str_xor("foo", "bar!")
'\x04\x0e\x1d'
>>> str_xor("AI", " ")
'ai'
:param a: string a
:param b: string b
:return: the XORed string
"""
if len(a) > len(b):
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a[:len(b)], b)])
else:
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b[:len(a)])]) |
def get_as_dict(x):
"""Return an object as a dictionary of its attributes."""
if isinstance(x, dict):
return x
else:
try:
return x._asdict()
except AttributeError:
return x.__dict__ |
def demandNameItem(listDb,phrase2,mot):
"""
put database name of all items in string to insert in database
listDb: list with datbase name of all items
phrase2: string with database name of all items
mot: database name of an item
return a string with database name of all items separated with ','
"""
for i in range(len(listDb)):
mot = str(listDb[i])
phrase2 += mot
if not i == len(listDb)-1:
phrase2 += ','
return phrase2 |
def indent(st, indent=4):
"""
Indent string.
"""
if isinstance(indent, int):
indent = " " * indent
return "".join(indent + ln for ln in st.splitlines(keepends=True)) |
def calculate_divisors(meta):
""" Description ...
Args:
l: ...
Returns:
meta ... For example:
...
"""
for unique_value in meta.keys():
k = int(unique_value)
k_indices = meta[unique_value]["indices"]
k_divisors = meta[unique_value]["divisors"]
for divisor_candidate in meta.keys():
candidate = int(divisor_candidate)
if k % candidate == 0:
if candidate not in k_divisors:
k_divisors.append(candidate)
meta.update({
unique_value: {
"indices": sorted(k_indices, reverse=True),
"divisors": k_divisors
}
})
return meta |
def curly_bracket_to_img_link(cb):
"""
Takes the curly-bracket notation for some mana type
and creates the appropriate image html tag.
"""
file_safe_name = cb[1:-1].replace('/', '_').replace(' ', '_')
ext = 'png' if 'Phyrexian' in file_safe_name or file_safe_name in ('C', 'E') else 'gif'
return f"<img src=\"/images/mana/{file_safe_name}.{ext}\">" |
def quote_string(s: str, force:bool=False) -> str:
"""Sometimes wraps strings in double quotes, depending on the content and force parameter.
Description:
This function provides conditional wrapping of strings inside double quotes.
If the input string contains a space, OR force is set to True, then the input string will
always be quoted.
If the input string does not contain a space, AND force is set to False, then the input
string is returned unmodified.
Args:
s: The string that needs to be wrapped in quotes (maybe)
force (optional): Whether to force quotes around the string even if not needed. Defaults to False.
Examples:
>>> quote_string("nevermore", False)
'nevermore'
>>> quote_string("nevermore")
'nevermore'
>>> quote_string("nevermore", True)
'"nevermore"'
>>> quote_string("never more", False)
'"never more"'
Returns:
The string, maybe wrapped in double quotes
"""
return f'"{s}"' if force or ' ' in s else s |
def get_query_counter(request):
""" hhs_oauth_server.request_logging.RequestTimeLoggingMiddleware
adds request._logging_pass
we grab it or set a counter and return it.
"""
if not hasattr(request, '_logging_pass'):
return 1
else:
return request._logging_pass |
def rk4(fun, tn, h, yn):
"""Rugge-Kutta 4, single step
Args:
fun (func): function in t (float) and y (ND numpy array)
tn (float): time
h (float): time step
yn (n-d array): values as of tn
Returns:
n-d array: values as of tn+1
"""
k1 = fun(tn, yn)
k2 = fun(tn + h/2.0, yn + h*k1/2.0)
k3 = fun(tn + h/2.0, yn + h*k2/2.0)
k4 = fun(tn + h, yn + h*k3)
yn1 = yn + (1.0/6.0)*h*(k1 + 2*k2 + 2*k3 + k4)
return yn1 |
def flag(argument):
"""
Check for a valid flag option (no argument) and return ``None``.
(Directive option conversion function.)
Raise ``ValueError`` if an argument is found.
"""
if argument and argument.strip():
raise ValueError('no argument is allowed; "%s" supplied' % argument)
else:
return None |
def isNumber(n):
"""
checks if n is an integer
:param n: value to be check
:return: true if n is a number, false in other case
"""
try:
int(n)
return True
except ValueError:
return False |
def sizeof(bsObj):
""" Size of object in bytes. Size is contextual by object type. """
return bsObj.__sizeof__() |
def is_valid_minimizer(object):
"""
Checks if the minimzer object has the following attributes/methods:
* minimize
"""
has_minimize = hasattr(object, "minimize")
return has_minimize |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.