content
stringlengths 42
6.51k
|
|---|
def u2q(u1, u2, warnings=True):
"""
Convert the linear and quadratic terms of the quadratic limb-darkening
parameterization -- called `u_1` and `u_2` in Kipping 2013 or `a` and `b` in
Claret et al. 2013 -- and convert them to `q_1` and `q_2` as described in
Kipping 2013:
http://adsabs.harvard.edu/abs/2013MNRAS.435.2152K
Parameters
----------
u1 : float
Linear component of quadratic limb-darkening
u2 : float
Quadratic component of quadratic limb-darkening
Returns
-------
(q1, q2) : tuple of floats
Kipping (2013) style quadratic limb-darkening parameters
"""
q1 = (u1 + u2)**2
q2 = 0.5*u1/(u1+u2)
if warnings and (u1 < 0 or u2 < 0):
print("WARNING: The quadratic limb-darkening parameters " +
"u1={0:.3f} or u2={0:.3f} violate Kipping's ".format(u1, u2) +
"conditions for a monotonically increasing or everywhere-" +
"positive intensity profile. Returning them as is.")
return q1, q2
|
def euclidean(p,q):
"""
Calculate the euclidean distance
"""
same = 0
for i in p:
if i in q:
same +=1
e = sum([(p[i] - q[i])**2 for i in range(same)])
return 1/(1+e**.5)
|
def url_mod(url: str, new_params: dict) -> str:
"""
Modifies existing URL by setting/overriding specified query string parameters.
Note: Does not support multiple querystring parameters with identical name.
:param url: Base URL/path to modify
:param new_params: Querystring parameters to set/override (dict)
:return: New URL/path
"""
from urllib.parse import urlparse, parse_qsl, urlunparse, urlencode
res = urlparse(url)
query_params = dict(parse_qsl(res.query))
for k, v in new_params.items():
if v is None:
query_params[str(k)] = ''
else:
query_params[str(k)] = str(v)
parts = list(res)
parts[4] = urlencode(query_params)
return urlunparse(parts)
|
def IsStringFloat(string_to_check):
"""Checks whether or not the given string can be converted to a float."""
try:
float(string_to_check)
return True
except ValueError:
return False
|
def pooling_output_shape(dimension_size, pool_size, padding, stride,
ignore_border=True):
"""
Computes output shape for pooling operation.
Parameters
----------
dimension_size : int
Size of the dimension. Typically it's image's
weight or height.
filter_size : int
Size of the pooling filter.
padding : int
Size of the zero-padding.
stride : int
Stride size.
ignore_border : bool
Defaults to ``True``.
Returns
-------
int
"""
if dimension_size is None:
return None
if ignore_border:
output_size = dimension_size + 2 * padding - pool_size + 1
output_size = (output_size + stride - 1) // stride
elif stride >= pool_size:
output_size = (dimension_size + stride - 1) // stride
else:
output_size = (dimension_size - pool_size + stride - 1) // stride
output_size = max(1, output_size + 1)
return output_size
|
def header_time_series(N, ibm=False):
""" Return string for header of statistical parameters of N time series."""
line = ""
# Statistical parameters for one time series.
subline = 'number_%d,' \
'variation_mean_%d,variation_std_%d,variation_min_%d,variation_max_%d,' \
'variationnorm_mean_%d,variationnorm_std_%d,variationnorm_min_%d,variationnorm_max_%d,' \
'JS_mean_%d,JS_std_%d,JS_min_%d,JS_max_%d,JS_stab_%d,' \
'log_width_%d,log_loc_%d,log_scale_%d,log_stat_%d,log_pval_%d,' \
'pareto_a_%d,pareto_loc_%d,pareto_scale_%d,pareto_stat_%d,pareto_pval_%d,' \
'pow_a_%d,pow_loc_%d,pow_scale_%d,pow_stat_%d,pow_pval_%d,' \
'tpow_a_%d,tpow_scale_%d,tpow_R_%d,tpow_p_%d,tpow_stat_%d,tpow_pval_%d,' \
'exp_loc_%d,exp_scale_%d,exp_stat_%d,exp_pval_%d,' \
'norm_loc_%d,norm_scale_%d,norm_stat_%d,norm_pval_%d'
Npars = 43 # number of paramters in line
# Add stability paramter if not for ibm.
if not ibm:
subline = ',stability_%d,' + subline
Npars += 1
# Add statistical parameters for number of time series N.
for i in range(1, N + 1):
line += subline % ((i,) * Npars)
return line
|
def declare_constant(name, value):
"""declare constant"""
try:
if value and value[0] not in ('"', "'"):
int(value)
except ValueError:
value = repr(value)
return "\n{0} = {1}".format(name.upper(), value)
|
def get_type(geojson):
"""."""
if geojson.get('features') is not None:
return geojson.get('features')[0].get('geometry').get('type')
elif geojson.get('geometry') is not None:
return geojson.get('geometry').get('type')
else:
return geojson.get('type')
|
def remove_instrumentation_breakpoint(eventName: str) -> dict:
"""Removes breakpoint on particular native event.
Parameters
----------
eventName: str
Instrumentation name to stop on.
**Experimental**
"""
return {
"method": "DOMDebugger.removeInstrumentationBreakpoint",
"params": {"eventName": eventName},
}
|
def check(file):
"""
if a file exists, return 'True,' else, 'False'
"""
try:
open(file)
return True
except (OSError, IOError) as e:
return False
|
def parse_so_ft_map(so_ft_map_file):
"""Parse out mappings between feature keys and SO.
"""
so_ft_map = {}
with open(so_ft_map_file) as in_handle:
in_handle.readline()
for line in in_handle:
parts = line.split()
if parts[1] not in ['undefined']:
so_ft_map[parts[0]] = parts[1]
return so_ft_map
|
def mod_inverse(a, b):
"""Solves a%c=b, returning c"""
# assert find_gcd(a, b) != 1, "a and b do not have a mod inverse and are " \
# "not relatively prime"
u1, u2, u3 = 1, 0, a
v1, v2, v3 = 0, 1, b
while v3 != 0:
q = u3 // v3
v1, v2, v3, u1, u2, u3 = u1-q*v1, u2-q*v2, u3-q*v3, v1, v2, v3
return u1 % b
|
def see(m: list, x, y) -> int:
"""Return count of how many occupied seats can be seen when looking out from given seat position."""
seen = 0
# (dx, dy) is the current direction of gaze.
for dx, dy in [(-1, -1), (0, -1), (1, -1),
(-1, 0), (1, 0),
(-1, 1), (0, 1), (1, 1)]:
check_x, check_y = x, y
found = 'None'
while found == 'None':
# Continue looking in current gaze direction.
check_x += dx
check_y += dy
# print(check_x, check_y)
# Out of bounds checks.
if check_x == -1 or check_y == -1 or check_y == len(m):
found = 'Out'
elif check_x == len(m[check_y]):
found = 'Out'
# Check if found either an occupied or empty seat.
elif m[check_y][check_x] in ['#', 'L']:
found = m[check_y][check_x]
if found == '#':
seen += 1
return seen
|
def wrap_response(status_code, data, role):
"""wraps response according to api specification"""
if 200 <= status_code < 300:
return {
"status":status_code,
role:data if isinstance(data, list) else [data]
}
return {
"status":status_code,
"error":data
}
|
def kthSmall(A, B, k):
"""
o(log(m)+log(n)) solution n=sizeof a ,m=sizeof b
"""
if not A:
return B[k]
elif not B:
return A[k]
# finding median of both arrays
ia = len(A)//2
ib = len(B)//2
ma = A[ia]
mb = B[ib]
# idea is not to elimiate only portion of one array at a time
if ia + ib < k:
if ma > mb:
return kthSmall(A,B[ib+1:],k-(ib+1))
else:
return kthSmall(A[ia+1:],B,k-(ia+1))
else:
if ma > mb:
return kthSmall(A[:ia],B,k)
else:
return kthSmall(A,B[:ib],k)
|
def apply(object, args=None, kwargs=None):
"""Call a callable object with positional arguments taken from the
tuple args, and keyword arguments taken from the optional dictionary
kwargs; return its results.
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
return object(*args, **kwargs)
|
def get_indent_of_specifier(file, current_line, encapsulators):
"""
Get indentation of the indent specifer itself.
:param file: A tuple of strings.
:param current_line: Line number of indent specifier (initial 1)
:param encapsulators: A tuple with all the ranges of encapsulators
:return: Indentation of the specifier.
"""
start = current_line
_range = 0
while (_range < len(encapsulators) and
encapsulators[_range].end.line <= current_line):
if current_line == encapsulators[_range].end.line:
if encapsulators[_range].start.line < start:
start = encapsulators[_range].start.line
_range += 1
return len(file[start - 1]) - len(file[start - 1].lstrip())
|
def is_shared(resource):
"""Checks if a resource is shared
"""
return resource['object'].get('shared', False)
|
def divide_cases(ncases, nprocs):
"""
Divide up load cases among available procs.
Parameters
----------
ncases : int
Number of load cases.
nprocs : int
Number of processors.
Returns
-------
list of list of int
Integer case numbers for each proc.
"""
data = []
for j in range(nprocs):
data.append([])
wrap = 0
for j in range(ncases):
idx = j - wrap
if idx >= nprocs:
idx = 0
wrap = j
data[idx].append(j)
return data
|
def filter_list(list,minLengthSize):
"""Una funcion para filtrar las palabras de una lista que no tengan mas de 'n' caracteres"""
result=[]
for att in list[:]:
if len(att)>minLengthSize:
result.append(att)
return result
|
def get_ip(record, direction):
"""
Return required IPv4 or IPv6 address (source or destination) from given record.
:param record: JSON record searched for IP
:param direction: string from which IP will be searched (e.g. "source" => ipfix.sourceIPv4Address or "destination" => ipfix.destinationIPv4Address)
:return: value corresponding to the key in the record
"""
key_name = "ipfix." + direction + "IPv4Address"
if key_name in record.keys():
return record[key_name]
key_name = "ipfix." + direction + "IPv6Address"
return record[key_name]
|
def frequency_distributions(text, words_num=75):
"""
Creates frequency distribution form text and returns words_num most commom words.
"""
all_words = list(set(text))
res = list()
for word in all_words:
res.append((text.count(word), word))
res.sort(reverse=True)
return res[:words_num]
|
def count_nodes(d:dict) -> int:
"""Count the number of nodes (leaves included) in a nested dictionary.
"""
n = len(d.keys())
for k,v in d.items():
if type(v) is dict:
n += count_nodes(v)
return n
|
def mostSimilarToLatex(origin,most_similar):
"""
Returns the result of the most_similar words to a given origin word
into a latex table.
"""
finaltext = "\hline \multicolumn{2}{|c|}{%s} \\\ \n\hline\n"%(origin)
for [t,s] in most_similar:
finaltext += "%s & %.2f \\\ \n\hline\n"%(t,s)
return finaltext
|
def process_plus_minus(i):
"""
Input: {
var_mean - mean value
var_range - variation
(force_round) - if 'yes', use it in %.(force_round)f
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
var_mean - rounded
var_range - rounded
string
html
tex
}
"""
import math
vm=i['var_mean']
vr=i['var_range']
round=0
if vr<1:
vr1=1/vr
x=int(math.log10(vr1))+1
round=x
y=pow(10,int(x))
vr2=int(vr*y)
vr=vr2/y
vm1=int(vm*y)
vm=vm1/y
else:
x=int(math.log10(vr))
y=pow(10,int(x))
vr2=int(vr/y)
vr=vr2*y
vm1=int(vm/y)
vm=vm1*y
fr=i.get('force_round',None)
if fr!=None and fr!='':
round=fr
ff='%.'+str(round)+'f'
x1=ff % vm
x2=ff % vr
s=x1 +' +- '+ x2
h=x1 +' ± '+ x2
t=x1 +' $\pm$ '+ x2
return {'return':0, 'var_mean':vr, 'var_range':vr, 'string':s, 'html':h, 'tex':t}
|
def uniq(listinput):
""" This finds the unique elements of the list listinput. """
""" This will be provided for the student. """
output = []
for x in listinput:
if x not in output:
output.append(x)
return output
|
def format_time(t):
"""
Helper function format_time that converts time
in tenths of seconds into formatted string A:BC.D
"""
total_seconds = t//10
minutes = total_seconds//60
seconds = total_seconds - minutes*60
tens = t - 10*(minutes*60+seconds)
# %02d format ensures a leading 0 in second count
out_str = "%d:%02d.%d" % (minutes, seconds, tens)
return out_str
|
def get_namelist_value(key, lines):
"""Return the value corresponding to key in lines, and the index
at which key was found.
lines is expected to be a FVCOM namelist in the form of a list of strings.
:param str key: The namelist key to find the value and line number of.
:param list lines: The namelist lines.
:returns: The value corresponding to key,
and the index in lines at which key was found.
:rtype: 2-tuple
"""
line_index = [
i for i, line in enumerate(lines)
if line.strip() and line.split()[0] == key
][-1]
value = lines[line_index].split()[2]
return value, line_index
|
def permutation_size(total_size: int, selection_size: int) -> int:
""" Not for duplicate items: P(N,r) = N!/(N-r)! """
if selection_size > total_size:
raise ValueError(f"selection_size:{selection_size} cannot be greater than total_size:{total_size}")
if selection_size < 0:
raise ValueError(f"selection_size:{selection_size} cannot be negative")
result = 1
for i in range(total_size - selection_size + 1, total_size + 1):
result *= i
return result
|
def gray2bin(bits):
"""Converts a gray-encoded binary string into a binary string.
Parameters
----------
bits : list or tuple of bool
The gray-encoded binary string as a list of True/False values.
"""
b = [bits[0]]
for nextb in bits[1:]: b.append(b[-1] ^ nextb)
return b
|
def cross_checks(human_nums, output_nums):
"""Recieves two array of numbers. Compares the two arrays and returns how many
mismatches are between the two arrays. This function checks to see how identical
the two arrays are.
Args:
human_nums ([list]): Human read optical rotation values
output_nums ([list]): Code computed optical rotation values
Returns:
[Int]: How many values these two arrays differ by
"""
count_diffs = 0
for i in range(0, len(human_nums)):
if human_nums[i] != output_nums[i]:
count_diffs += 1
print("This diff happens at ", i)
print(human_nums[i])
print(output_nums[i])
return count_diffs
|
def keyword_originating_from_og_key(keyword, og_key_tree):
"""
Input: keyword: A keyword proposed by the expansion API
og_key_tree: A tree resulting from a keyword entered by the user
Output: True if keyword was proposed because of its similarity with og_key_tree["original_keyword], False if not
"""
if og_key_tree["referentiel"]["tags"] is not None:
for keyw in og_key_tree["referentiel"]["tags"]:
if keyw == keyword:
return True
for sense in og_key_tree["tree"]:
for similar_sense in sense["similar_senses"]:
if similar_sense[0]["sense"] == keyword:
return True
return False
|
def core_capex(region, asset_type, costs, core_lut, strategy, country_parameters):
"""
Return core asset costs for only the 'new' assets that have been planned.
"""
core = strategy.split('_')[1]
geotype = region['geotype'].split(' ')[0]
networks = country_parameters['networks']['baseline' + '_' + geotype]
if asset_type == 'core_edge':
if asset_type in core_lut.keys():
total_cost = []
#only grab the new edges that need to be built
combined_key = '{}_{}'.format(region['GID_id'], 'new')
if combined_key in core_lut[asset_type].keys():
distance_m = core_lut[asset_type][combined_key]
cost = int(distance_m * costs['core_edge_capex'])
total_cost.append(cost)
sites = ((region['upgraded_mno_sites'] + region['new_mno_sites']) / networks)
if sites == 0:
return 0
elif sites < 1:
return sum(total_cost)
else:
return sum(total_cost) / sites
else:
return 0
elif asset_type == 'core_node':
if asset_type in core_lut.keys():
total_cost = []
#only grab the new nodes that need to be built
combined_key = '{}_{}'.format(region['GID_id'], 'new')
nodes = core_lut[asset_type][combined_key]
cost = int(nodes * costs['core_node_{}_capex'.format(core)])
total_cost.append(cost)
sites = ((region['upgraded_mno_sites'] + region['new_mno_sites']) / networks)
if sites == 0:
return 0
elif sites < 1:
return sum(total_cost)
else:
return sum(total_cost) / sites
else:
return 0
else:
print('Did not recognise core asset type {}'.format(asset_type))
return 0
|
def from_db(db, power=False):
"""Convert decibel back to ratio.
Parameters
----------
db : array_like
Input data.
power : bool, optional
If ``power=False`` (the default), was used for conversion to dB.
"""
return 10 ** (db / (10 if power else 20))
|
def realm_from_principal(principal):
"""
Attempt to retrieve a realm name from a principal, if the principal is fully qualified.
:param principal: A principal name: user@DOMAIN.COM
:type: principal: str
:return: realm if present, else None
:rtype: str
"""
if '@' not in principal:
return
else:
parts = principal.split('@')
if len(parts) < 2:
return
return parts[-1]
|
def areaofcircle(r):
"""
calculates area of a circle
input:radius of circle
output:area of circle
"""
PI = 3.14159265358
area = PI*(r**2)
return area
|
def grader(marks):
"""
marks: list of marks of students in integer
Return average marks of students
"""
# raise AssertionError if list is empty
try:
assert not len(marks) == 0, 'list cannot be empty'
except AssertionError as ae:
print(f'AssertionError: {ae}')
return 0.0
else:
sum = 0
for m in marks:
sum += m
return sum/len(marks)
|
def hyps2word(hyps):
"""
Converts a list of hyphens to a string.
:param hyps: a list of strings (hyphens)
:return: string of concatenated hyphens
"""
return ''.join(hyps)
|
def _TransformPreservedState(instance):
"""Transform for the PRESERVED_STATE field in the table output.
PRESERVED_STATE is generated from the fields preservedStateFromPolicy and
preservedStateFromConfig fields in the managedInstance message.
Args:
instance: instance dictionary for transform
Returns:
Preserved state status as one of ('POLICY', 'CONFIG', 'POLICY,CONFIG')
"""
preserved_state_value = ''
if ('preservedStateFromPolicy' in instance and
instance['preservedStateFromPolicy']):
preserved_state_value += 'POLICY,'
if ('preservedStateFromConfig' in instance and
instance['preservedStateFromConfig']):
preserved_state_value += 'CONFIG'
if preserved_state_value.endswith(','):
preserved_state_value = preserved_state_value[:-1]
return preserved_state_value
|
def init_r0_pulse(r, l, k):
"""calculate initial total RNA via ODE formula of RNA kinetics for one-shot/kinetics experiment
Parameters
----------
r:
total RNA at current time point.
l:
labeled RNA at current time point.
k:
$k = 1 - e^{-\gamma t}$
Returns
-------
r0:
The intial total RNA at the beginning of the one-shot or kinetics experiment.
"""
r0 = (r - l) / (1 - k)
return r0
|
def filter_id(mention: str):
"""
Filters mention to get ID "<@!6969>" to "6969"
Note that this function can error with ValueError on the int call, so the
caller of this function must take care of that.
"""
for char in ("<", ">", "@", "&", "#", "!", " "):
mention = mention.replace(char, "")
return int(mention)
|
def shoelace_area(points):
"""
Calculate the area of a *simple* polygon
"""
n = len(points) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += points[i][0] * points[j][1]
area -= points[j][0] * points[i][1]
area = abs(area) / 2.0
return area
|
def merge_with(f, *dicts): # pragma: no cover
""" Merges dicts using f(old, new) when encountering collision. """
r = {}
sentinel = object()
for d in filter(None, dicts):
for k, v in d.iteritems():
tmp = r.get(k, sentinel)
if tmp is sentinel:
r[k] = v
else:
r[k] = f(tmp, v)
return r
|
def parse_ranges(s):
"""Return a list of (start, end) indexes, according to what is
specified in s. It is up to the client to interpret the semantics
of the ranges.
Samples:
16 [(16, 16)]
1-6 [(1, 6)]
1,3,5-8,99 [(1, 1), (3, 3), (5, 8), (99, 99)]
"""
import re
assert type(s) is type(""), "s must be string"
assert not re.search(r"[\r\n]", s), "linebreaks not allowed"
s = re.sub(r"\s+", "", s) # no spaces
assert not re.search(r"[^0-9,-]", s), "invalid characters"
ranges = []
for r in s.split(","):
# Case 1: 1 ['1']
# Case 2: 1-2 ['1', '2']
# Case 3: -1 ['', '1']
# Case 4: -1-5 ['', '1', '5']
# Case 5: -5-1 ['', '5', '1']
# Case 6: -5--1 ['', '5', '', '1']
# Case 7: --1 ['', '', '5'] ERROR
# Case 8: 1--5 ['1', '', '5'] ERROR
# Case 9: 5- ['5', ''] ERROR
parts = r.split("-")
# Blanks should be interpreted as negatives for the next
# digit.
i = 0
while i < len(parts):
if parts[i] == '':
assert i < len(parts)-1, "Invalid range: %s" % r
assert parts[i+1] != "", "Invalid range: %s" % r
parts[i+1] = "-" + parts[i+1]
del parts[i]
else:
i += 1
assert len(parts) <= 2, "I do not understand ranges %s" % r
if len(parts) == 1:
start = int(parts[0])
stop = start
else:
start, stop = map(int, parts)
stop = stop
assert start <= stop, "Invalid range %s" % r
ranges.append((start, stop))
return ranges
|
def get_total_number_of_pending_tasks(
total_number_of_tasks: int,
total_number_of_done_tasks: int,
total_number_of_tasks_in_progress: int,
) -> int:
"""
Get total number of the pending tasks.
Caculate the total number of pending tasks when given the total number of
tasks, plus the total number of the done and in progress task then return
the result.
Args:
----
total_number_of_tasks (int): The total number of all tasks.
total_number_of_done_tasks (int): The total number of the done tasks.
total_number_of_tasks_in_progress (int): The total number of tasks in
progress.
Returns
-------
An integer which represents the total number of the pending tasks.
"""
return (
total_number_of_tasks
- total_number_of_done_tasks
- total_number_of_tasks_in_progress
)
|
def seconds_to_str(num):
"""
Convert seconds to [1-9]['S','M','H'] format.
:param num: seconds as float
:return: string of [1-9]['S','M','H']
"""
num = float(num)
for unit in ['S','M','H']:
if abs(num) < 60.0:
return "%3.1f%s" % (num, unit)
num /= 60.0
return "%.1f%s" % (num, 'S')
|
def parser_video_window_Descriptor(data,i,length,end):
"""\
parser_video_window_Descriptor(data,i,length,end) -> dict(parsed descriptor elements).
This descriptor is not parsed at the moment. The dict returned is:
{ "type": "video_window", "contents" : unparsed_descriptor_contents }
(Defined in ISO 13818-1 specification)
"""
return { "type" : "video_window", "contents" : data[i+2:end] }
|
def path_replace(path, path_replacements):
"""Replaces path key/value pairs from path_replacements in path"""
for key in path_replacements:
path = path.replace(key,path_replacements[key])
return path
|
def busca_sequencial_index(lista, elemento):
"""
Realiza busca sequencial do elemento passado por parametro
lista -- lista de inteiros desordenada
elemento -- elemento a ser buscado
"""
contador = 0
try:
while contador <= len(lista):
if contador == elemento:
break
contador += 1
print('SEQUENCIAL INDEX DEU CERTO')
return contador
except IndexError:
print('Elemento nao achado')
|
def selective_find(str, char, index, pos):
"""Return a pair (index, pos), indicating the next occurrence of
char in str. index is the position of the character considering
only ordinals up to and including char, and pos is the position in
the full string. index/pos is the starting position in the full
string."""
l = len(str)
while 1:
pos += 1
if pos == l:
return (-1, -1)
c = str[pos]
if c == char:
return index+1, pos
elif c < char:
index += 1
|
def corr_lists(varlist, builtins):
"""
this corrects list in case the split with .Control didn't split correctly
this could be much easier if we hadn't split for sections before, but this has to be revisited at some other point,
right now I'm just happy this works
:param varlist: list of dict with variables
:param builtins: list of dict with builtins
:return: clean list of dict with variables and builtins (of which there are 4)
"""
# is used to ensure that there is no infinite loop, two iterations are enough
# it is for example possible that (BI = builtin):
# builtins = [var1] and varlist = [var2, var3, BI1, BI2, BI3, BI4]
# in that case the function will first move the builtins to the builtin list with the result that
# builtins = [var1, BI1, BI2, BI3, BI4] and varlist = [var2, var3]
# now in the second iteration the elif condition applies and the var1 is moved to the varlist, resulting
# builtins = [BI1, BI2, BI3, BI4] and varlist = [var1, var2, var3]
i = 0
while len(builtins) != 4 and i <= 2:
if len(builtins) < 4:
translist = [x for x in varlist if x['name'] in ['FINAL TIME', 'INITIAL TIME', 'TIME STEP', 'SAVEPER']]
varlist = [x for x in varlist if x['name'] not in ['FINAL TIME', 'INITIAL TIME', 'TIME STEP', 'SAVEPER']]
for item in translist:
builtins.append(item)
elif len(builtins) > 4:
translist = [x for x in builtins if x['name'] not in ['FINAL TIME', 'INITIAL TIME', 'TIME STEP', 'SAVEPER']]
builtins = [x for x in builtins if x['name'] in ['FINAL TIME', 'INITIAL TIME', 'TIME STEP', 'SAVEPER']]
for item in translist:
varlist.append(item)
i = i + 1
return varlist, builtins
|
def kodi_to_ascii(string):
"""Convert Kodi format tags to ascii"""
if string is None:
return None
string = string.replace('[B]', '')
string = string.replace('[/B]', '')
string = string.replace('[I]', '')
string = string.replace('[/I]', '')
string = string.replace('[COLOR gray]', '')
string = string.replace('[COLOR yellow]', '')
string = string.replace('[/COLOR]', '')
return string
|
def two_sum(arr: list, target: int) -> list:
"""
time complexity: O(n)
space complexity: O(n)
"""
lookup = dict()
for i in range(0, len(arr)):
num = arr[i]
complement = target - num
# if the complement is not present in lookup then
# insert the number as key, index as value
index = lookup.get(complement) # O(1), not found return None
if index is not None:
return [index, i]
lookup[num] = i
# scenario where target is not found
return [None, None]
|
def list_to_sql_in_list(l):
"""Convert a python list into a string that can be used in an SQL query with operator "in" """
return '(' + ','.join(f"'{e}'" for e in l) + ')'
|
def valueFromMapping(procurement, subcontract, grant, subgrant, mapping):
"""We configure mappings between FSRS field names and our needs above.
This function uses that config to derive a value from the provided
grant/subgrant"""
subaward = subcontract or subgrant
if mapping is None:
return ''
elif isinstance(mapping, str):
return getattr(subaward, mapping)
elif isinstance(mapping, tuple) and subcontract:
return valueFromMapping(procurement, subcontract, grant, subgrant,
mapping[0])
elif isinstance(mapping, tuple) and subgrant:
return valueFromMapping(procurement, subcontract, grant, subgrant,
mapping[1])
else:
raise ValueError("Unknown mapping type: {}".format(mapping))
|
def highlight(text):
"""
This function takes a text and returns it in bold
:type text: string
:param text: The string to be highlighted
"""
return '*{}*'.format(text)
|
def is_rm_textfile(filename):
"""Returns True if the given filename is a known remarkable-specific textfile."""
if filename.endswith('.json'):
return True
if filename.endswith('.content'):
return True
if filename.endswith('.pagedata'):
return True
if filename.endswith('.bookm'):
return True
return False
|
def _alternating_sequence(token1, token2, length):
"""Make alternating sequence of token1 and token2 with specified length."""
return [(token2 if i % 2 else token1) for i in range(length)]
|
def long_errors(errors_summary, min_length=10):
"""
Use the error_summary to isolate tokens that are longer thatn the min_length.
Used to identify strings of words that have been run together due to the failure
of the OCR engine to recognize whitespace.
Arguments:
- errors_summary --
"""
errors = list(errors_summary.keys())
return ([x for x in errors if len(x) > min_length], min_length)
|
def beach_validation(beach):
""" Decide if the beach input is valid.
Parameters:
(str): A user's input to the beach factor.
Return:
(str): A single valid string, such as "1", "0" or "-5" and so on.
"""
while beach != "5" and beach != "4" and beach != "3" and beach != "2" and beach != "1" and beach != "0" and beach != "-1" and beach != "-2" and beach != "-3" and beach != "-4" and beach != "-5" :
print("\nI'm sorry, but " + beach + " is not a valid choice. Please try again.")
beach = input("\nHow much do you like the beach? (-5 to 5)"
+ "\n> ")
return beach
|
def urlify_algo(string, length):
"""replace spaces with %20 and removes trailing spaces"""
# convert to list because Python strings are immutable
char_list = list(string)
new_index = len(char_list)
for i in reversed(range(length)):
if char_list[i] == " ":
# Replace spaces
char_list[new_index - 3 : new_index] = "%20"
new_index -= 3
else:
# Move characters
char_list[new_index - 1] = char_list[i]
new_index -= 1
# convert back to string
return "".join(char_list[new_index:])
|
def pargsort(seq):
""" Like numpy's argsort, but works on python lists.
"""
# http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
return sorted(range(len(seq)), key = seq.__getitem__)
|
def length(node):
"""
Count how many values there are in the linked list
:param node: value of head node, start of list
:return: int: number of list elements
"""
if node is not None:
return 1 + length(node.next_node) # count nodes basically
return 0
|
def remove_prefix(word, prefixes=[]):
"""
This function used for remove prefixes.
"""
result = word
# Convert prefixes to list if user give string
if not isinstance(prefixes, list):
prefixes = [prefixes]
for prefix in prefixes:
if prefix == word[:len(prefix)]:
result = word[len(prefix):]
break
return result
|
def clean_number(number):
"""Leave only Russian full length numbers"""
if len(number) < 10:
return
number = number.replace('+', '')
if len(number) == 11:
if number[0] not in ['7', '8']:
return
number = '{0}{1}'.format('7', number[1:])
elif len(number) == 10:
number = '{0}{1}'.format('7', number)
return number
|
def func1(n, a, b, c):
"""
Function 1
"""
fn = n / a + n**2 / b + n**3 / c
return fn
|
def is_float(input):
"""Checks whether input can be converted to float"""
try:
num = float(input)
except ValueError:
return False
return True
|
def _calc_rtc_fatalities(cas_rate, first_appliance_time, second_appliance_time):
"""
Calc the rtc risk
Parameters:
cas rate (float): The rate to be applied
First app (float): The time of first in attendance in decimalised mins
Second app (float): The time of second in attendance in decimalised mins
Returns:
Calculated lives lost
"""
a = first_appliance_time * 0.0024
b = a + 0.0202
c = b * 0.93
d = second_appliance_time / first_appliance_time
e = (0.026 * d) + 0.93
f = c * e
return cas_rate * f
|
def remove_non_ascii(input_string):
"""Remove non-ascii characters
Source: http://stackoverflow.com/a/1342373
"""
no_ascii = "".join(i for i in input_string if ord(i) < 128)
return no_ascii
|
def deref_vtk(obj):
"""Dereferences the VTK object from the object if possible. This
is duplicated from `tvtk_base.py` because I'd like to keep this
module independent of `tvtk_base.py`.
"""
if hasattr(obj, '_vtk_obj'):
return obj._vtk_obj
else:
return obj
|
def polyFunc(x, a, b, c, d):
""" 3rd order polynomial. """
return a + x*b + c*x**2 + d*x**3
|
def clear(strlist):
"""Remove empty strings and spaces from sequence.
>>> clear(['123', '12', '', '2', '1', ''])
['123', '12', '2', '1']
"""
return list(filter(None, map(lambda x: x.strip(), strlist)))
|
def display_percent(chunk_size, chunk_percent, last_percent, progress):
"""
Used to monitor progress of a process. Example useage:
Progress = 0
chunk_percent = 10.0
chunk_size = int(math.ceil(all_files*(chunk_percent/100)))
for x in all_files:
Progress += 1
last_percent = display_percent(chunk_size, chunk_percent, last_percent, Progress)
"""
percent = int(progress / chunk_size)
if percent > last_percent:
print("{0}%".format(percent * chunk_percent))
return percent
|
def is_api_token(token: str) -> bool:
"""
Checks if the provided token COULD be an api-token
:param token:
:return:
"""
return token[5] == ":"
|
def build_url(main_url, url_params):
"""Build url by concating url and url_params"""
return main_url + "/" + "/".join(url_params)
|
def stereographic_equal_angle_projection_conv_XY_plane_for_MTs(x,y,z):
"""Function to take 3D grid coords for a cartesian coord system and convert to 2D stereographic equal angle projection."""
X = x / (1-z)
Y = y / (1-z)
return X,Y
|
def split_obs(obs):
"""Split a dict obs into state and images."""
return obs['state'], obs['img']
|
def is_distributed_model(state_dict):
"""
determines if the state dict is from a model trained on distributed GPUs
"""
return all(k.startswith("module.") for k in state_dict.keys())
|
def compute_struc_matching_score(matching_struc, idf_values):
"""
Computes the structural matching score.
Args:
matching_struc: Set of structural features that exists in both query
and related formula.
idf_values: Map of formula term and its IDF score.
Returns:
The structural matching score.
"""
struc_score = 0
for term in matching_struc:
if term in idf_values:
struc_score += idf_values.get(term)
return struc_score
|
def hello(name):
"""
Function that returns a greeting for whatever name you enter.
Usage:
>>> hello('Emiel')
'Hello, Emiel!'
"""
return ''.join(["Hello, ", name, '!'])
|
def hardware_props(hardware):
"""Props which should be added to the device running the hardware."""
return hardware["props"]
|
def isbool(value):
"""
check if the value is bool
Parameters
----------
value : any
can accept any value to be checked.
Returns
-------
bool
True if the value is 1 ,0 , True or False.
Flase for anything else.
"""
temp=str(value).lower()
if temp == 'true' or temp == 'false' or temp == '1' or temp =='0':
return True
else:
return False
|
def find_ss_regions(dssp_residues):
"""Separates parsed DSSP data into groups of secondary structure.
Notes
-----
Example: all residues in a single helix/loop/strand will be gathered
into a list, then the next secondary structure element will be
gathered into a separate list, and so on.
Parameters
----------
dssp_residues : [tuple]
Each internal list contains:
[0] int Residue number
[1] str Secondary structure type
[2] str Chain identifier
[3] str Residue type
[4] float Phi torsion angle
[5] float Psi torsion angle
[6] int dssp solvent accessibility
Returns
-------
fragments : [[list]]
Lists grouped in continuous regions of secondary structure.
Innermost list has the same format as above.
"""
loops = [" ", "B", "S", "T"]
current_ele = None
fragment = []
fragments = []
first = True
for ele in dssp_residues:
if first:
first = False
fragment.append(ele)
elif current_ele in loops:
if ele[1] in loops:
fragment.append(ele)
else:
fragments.append(fragment)
fragment = [ele]
else:
if ele[1] == current_ele:
fragment.append(ele)
else:
fragments.append(fragment)
fragment = [ele]
current_ele = ele[1]
return fragments
|
def get_str_indent(indent):
"""
docstring
"""
return ' ' * indent
|
def severity_mapping(severity: int) -> int:
"""
Maps AWS finding severity to demisto severity
Args:
severity: AWS finding severity
Returns:
Demisto severity
"""
if 1 <= severity <= 30:
demisto_severity = 1
elif 31 <= severity <= 70:
demisto_severity = 2
elif 71 <= severity <= 100:
demisto_severity = 3
else:
demisto_severity = 0
return demisto_severity
|
def is_prime(number):
"""
Check number is prime or not
:param number: number
:return: Boolean
"""
#
# _div = number - 1
# check = 1
# while _div > 1:
# if number % _div == 0:
# check = 0
# # break
# _div -= 1
#
# if check == 1:
# return True
# else:
# return False
# for i in range(2, number+1):
# for j in range(2, i):
# if i % j == 0:
# break
#
# else:
# print(F"{i} Number is prime")
for i in range(2, number):
if number % i == 0:
return False
return True
|
def convertDBZtoPrec(dbz):
"""
Funcion para convertir el valor de dbz a lluvia
param: dbz : valor
"""
rain = ((10**(dbz/10))/200)**(5/8)
if rain <= 1:
return 0
else:
return rain
|
def parse_model_value(value, context):
"""
do interpolation first from context,
"x is {size}" with size = 5 will be interpolated to "x is 5"
then return interpolated string
:param value:
:param context:
:return:
"""
return value.format(**context)
|
def distance(point1, point2):
"""
Calculate distance between two points.
Parameters
----------
point1 : array_like
The first point.
point2 : array_like
The second point.
Returns
-------
distance : float
The distance between point1 and point2.
"""
return sum([(x1 - x2) ** 2 for x1, x2 in zip(point1, point2)]) ** 0.5
|
def union(list1, list2):
"""Union of two lists, returns the elements that appear in one list OR the
other.
Args:
list1 (list): A list of elements.
list2 (list): A list of elements.
Returns:
result_list (list): A list with the union elements.
Examples:
>>> union([1,2,3], [2,3,4])
[1, 2, 3, 4]
"""
return list(set(list1) | set(list2))
|
def trimData(yieldStress, plateauRegionDefiningFactor, xList, yList):
"""Trims data so ~only plateau region is considered,
to improve processing time. Specifically, it cuts off data before yield point
and after end of plateau region (based on multiple of yield stress)"""
plateauEndingStressValue = yieldStress * plateauRegionDefiningFactor
cutIndexStart = yList.index(yieldStress)
xListTrimmed = xList[cutIndexStart: len(xList): 1]
yListTrimmed = yList[cutIndexStart: len(yList): 1]
tempyList = []
for element in yListTrimmed:
if element < plateauEndingStressValue:
tempyList.append(element)
else:
break
yListTrimmed = tempyList
xListTrimmed = xListTrimmed[0: len(yListTrimmed): 1]
return xListTrimmed, yListTrimmed
|
def get_insertions_y(parsed_mutations):
"""Get y coordinates of insertion markers to overlay in heatmap.
These are the linear y coordinates used in the Plotly graph object.
i.e., the indices of data["heatmap_y_strains"]
:param parsed_mutations: A dictionary containing multiple merged
``get_parsed_gvf_dir`` return "mutations" values.
:type parsed_mutations: dict
:return: List of y coordinate values to display insertion markers
:rtype: list[str]
"""
ret = []
for y, strain in enumerate(parsed_mutations):
for pos in parsed_mutations[strain]:
for mutation in parsed_mutations[strain][pos]:
insertion = mutation["mutation_type"] == "insertion"
hidden = mutation["hidden_cell"]
if insertion and not hidden:
ret.append(y)
return ret
|
def extract_validation_results(event):
"""
Extract the validation results from the step function event
"""
for output in event["Outputs"]:
if "validation-report.json" in output["Location"]["Key"]:
return output["Location"]
else:
continue
|
def get_user_name(email):
"""Returns username part of email, if valid email is provided."""
if "@" in email:
return email.split("@")[0]
return email
|
def get_branch_name(ref):
"""
Take a full git ref name and return a more simple branch name.
e.g. `refs/heads/demo/dude` -> `demo/dude`
:param ref: the git head ref sent by GitHub
:return: str the simple branch name
"""
refs_prefix = 'refs/heads/'
if ref.startswith(refs_prefix):
# ref is in the form "refs/heads/master"
ref = ref[len(refs_prefix):]
return ref
|
def tsv2table(text):
"""Parse a "tsv" (tab separated value) string into a list of lists
of strings (a "table").
The input text is tabulated using tabulation characters to separate
the fields of a row and newlines to separate columns.
The output lines are padded with '' values to ensure that all lines
have the same length.
Note that only '\n' is acceptable as the newline character. Other
special whitespace characters will be left untouched.
"""
rows = text.split('\n')
# 'splitlines' can't be used as it loses trailing empty lines.
table_data = []
max_len = 0
for row in rows:
line = row.split('\t')
l = len(line)
if l > max_len:
max_len = l
table_data.append((line, l))
result = []
for line, l in table_data:
if l < max_len:
line += ['']*(max_len - l)
result.append(line)
return result
|
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
""" Overriding naming schemes. """
name = referred_cls.__name__.lower() + "_ref"
return name
|
def ordinal_to_alpha(sequence):
"""
Convert from ordinal to alpha-numeric representations.
Just for funsies :)
"""
corpus = ['a','b','c','d','e','f','g','h','i','j','k','l',
'm','n','o','p','q','r','s','t','u','v','w','x','y','z',
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, '?']
conversion = ""
for item in sequence:
conversion += str(corpus[int(item)])
return conversion
|
def lr_update(
num_updates: int,
warmup_updates: int,
warmup_init_lr: float,
lr_step: float,
decay_factor: float,
) -> float:
"""InverseSquareRootSchedule.
https://github.com/pytorch/fairseq/blob/master/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py#L32
Args:
num_updates: number of batches already used.
warmup_updates: number of batch steps for warm up.
warmup_init_lr: initial learning rate.
lr_step: step for increasing learning rate during warm up.
decay_factor: factor for decreasing learning rate after warm up.
Returns:
learning rate multiplicate factor
"""
if num_updates < warmup_updates:
lr = warmup_init_lr + num_updates * lr_step
else:
lr = decay_factor * num_updates ** -0.5
if warmup_init_lr > 0:
return lr / warmup_init_lr
return 0
|
def _pre_process_txt_records(text):
""" This looks only for the cases of multiple text records not surrounded by quotes.
This must be done after flattening but before any tokenization occurs, as this strips out
the quotes. """
lines = text.split('\n')
for line in lines:
pass
return text
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.