content
stringlengths 42
6.51k
|
|---|
def _expand_ALL_constant(model, fieldnames):
"""Replaces the constant ``__all__`` with all concrete fields of the model"""
if "__all__" in fieldnames:
concrete_fields = [f.name for f in model._meta.get_fields() if f.concrete]
i = fieldnames.index("__all__")
return fieldnames[:i] + concrete_fields + fieldnames[i + 1 :]
return fieldnames
|
def scope_validator(x):
"""
Property: DockerVolumeConfiguration.Scope
"""
valid_values = ["shared", "task"]
if x not in valid_values:
raise ValueError("Scope type must be one of: %s" % ", ".join(valid_values))
return x
|
def euler29(lima=100, limb=100):
"""Solution for problem 29."""
n = set()
for a in range(2, lima + 1):
p = a
for _ in range(2, limb + 1):
p *= a
n.add(p)
return len(n)
|
def resolve_pattern(pattern, args):
"""
returns a string in which slots have been resolved with args, if the string has slots anyway,
else returns the strng itself (no copy, should we??)
:param pattern:
:param args:
:return:
"""
if args is None or len(args) == 0:
return pattern
elif pattern.find('%') >= 0:
return pattern % args
elif pattern.find("{") >= 0:
# star magic does not work for single args
return pattern.format(*args)
else:
# fixed pattern, no placeholders
return pattern
|
def extract_lsb(origin: int, count: int):
"""Receives an integer number, converts it to it's binary representation (on string)
and returns a string representation of the N least significant bits, with N equals to [count]
Args:
origin (int): integer number from which the LSB's will be extracted
count (int): numbers of bits that shall be returned
Returns:
str: String binary representation of the N LSB's
Example: extract_lsb(10, 2) returns '10'.
First the function converts 10 to '1010' then returns the N last characters from the representation
"""
if origin == 0:
return "00"
if origin == 1:
return "01"
binary_origin = bin(origin)
binary_origin = binary_origin[-count:]
return binary_origin
|
def to_bin(data, width):
"""
Convert an unsigned integer to a numpy binary array with the first
element the MSB and the last element the LSB.
"""
data_str = bin(data & (2**width-1))[2:].zfill(width)
return [int(x) for x in tuple(data_str)]
|
def create_header(header_format, num_species, gene_length, filename=None):
""" (dict, str) -> str
Return the file header.
"""
if filename:
header = header_format.format(filename, num_species, gene_length)
else:
header = header_format.format(num_species, gene_length)
return header
|
def factI(n):
"""assumes that n is an int > 0
returns n!"""
res = 1
while n > 1:
res = res * n
n -= 1
return res
|
def dbamp(db):
"""Convert db to amplitude"""
return 10 ** (db / 20.0)
|
def generate_neighbours(coordinates):
"""
Returns the coordinates of potential neighbours of a given cell
:param coordinates: (tuple) the coordinates of the cell
:return: (list(tuples(int, int))) the list of the coordinates of the potential neighbours of a cell
Examples:
>>> generate_neighbours((0, 0))
[(0, -1), (-1, -1), (-1, 0), (0, 1), (1, 0), (1, -1)]
>>> generate_neighbours((4, 2))
[(4, 1), (3, 1), (3, 2), (4, 3), (5, 2), (5, 1)]
"""
x = coordinates[1]
y = coordinates[0]
if y % 2 == 0: # If the number of the line is even
return [(y, x-1), (y-1, x-1), (y-1, x), (y, x+1), (y+1, x), (y+1, x-1)]
else:
return [(y, x-1), (y-1, x), (y-1, x+1), (y, x+1), (y+1, x+1), (y+1, x)]
|
def stack_back(flattened, raw):
"""
Organize a new iterable from a flattened list according to raw iterable.
Parameters
----------
flattened : list
flattened list
raw: list
raw iterable
Returns
-------
ret : list
Examples
--------
>>> raw = [[0, 1], [2, [3, 4]]]
>>> flattened = flatten(raw)
>>> flattened
[0, 1, 2, 3, 4]
>>> a = [f + 1 for f in flattened]
>>> a
[1, 2, 3, 4, 5]
>>> stack_back(a, raw)
[[1, 2], [3, [4, 5]]]
"""
flattened_iter = iter(flattened)
result = list()
def _stack(container, items):
for item in items:
if not isinstance(item, (list, tuple)):
container.append(next(flattened_iter))
else:
new_container = list()
container.append(new_container)
_stack(new_container, item)
return container
return _stack(result, raw)
|
def ceil(attrs, inputs, proto_obj):
""" Calculate ceil value for input """
return 'ceil', attrs, inputs
|
def text_level(level, sen_lev,tense_text):
"""Function to calculate the level of the text"""
calcu = {'A1': 1, 'A2': 2, 'B1': 3, 'B2': 4, 'C1': 5, 'C2':6}
W = int(calcu[level])*0.8 #80% word level
S = int(calcu[sen_lev])*0.1 #10% sentence level
T = int(calcu[tense_text]*0.1) #10% temporal value the definition doesn't exist yet
nr = (W + S + T)
if nr > 5:
return 'C2'
elif nr > 4:
return 'C1'
elif nr > 3:
return 'B2'
elif nr > 2:
return 'B1'
elif nr > 1:
return 'A2'
else:
return 'A1'
|
def select_files(flist, pattern):
""" Remove fnames from flist that do not contain 'pattern'. """
return [fname for fname in flist if pattern in fname]
|
def is_json(_path) -> bool:
"""Return True if file ends with .json, otherwise False."""
if _path.endswith(".json"):
return True
else:
return False
|
def isDBReleaseFile(dbh, lfn):
""" Is the LFN a DBRelease file? """
if dbh:
return dbh.extractVersion(lfn)
else:
return False
|
def _get_bit(epaddr, v):
"""
>>> _get_bit(0, 0b11)
True
>>> _get_bit(0, 0b10)
False
>>> _get_bit(0, 0b101)
True
>>> _get_bit(1, 0b101)
False
"""
return bool(1 << epaddr & v)
|
def newline(text, number=1):
"""returns text with esactly number newlines at the end"""
return text.strip() + ("\n" * number)
|
def sumar_lista(lista):
"""suma un conjunto de valores en una lista
"""
suma = 0
for numero in lista:
suma += numero
return suma
|
def remove_duplicates(list1):
"""
Eliminate duplicates in a sorted list.
Returns a new sorted list with the same elements in list1, but
with no duplicates.
This function can be iterative.
"""
list_unique = []
append = list_unique.append
for element in list1:
if element not in list_unique:
append(element)
return list_unique
|
def get_filenames_in_release(manifestdict):
"""
<Purpose>
Get the list of files in a manifest
<Arguments>
manifestdict: the manifest for the release
<Exceptions>
TypeError, IndexError, or KeyError if the manifestdict is corrupt
<Side Effects>
None
<Returns>
A list of file names
"""
filenamelist = []
for fileinfo in manifestdict['fileinfolist']:
filenamelist.append(fileinfo['filename'])
return filenamelist
|
def multiples_of_3_or_5(limit: int) -> int:
"""Computes the sum of all the multiples of 3 or 5 below the given limit,
using tail recursion.
:param limit: Limit of the values to sum (exclusive).
:return: Sum of all the multiples of 3 or 5 below the given limit.
"""
def loop(acc, num):
if num < 1:
return acc
if num % 3 == 0 or num % 5 == 0:
return loop(acc + num, num - 1)
return loop(acc, num - 1)
return loop(0, limit - 1)
|
def get_pdistindex(i, j, M):
"""
Return compressed pdist matrix given [i, j] and size of observations M
See http://scipy.github.io/devdocs/reference/generated/scipy.spatial.distance.pdist.html
:param i: column index
:param j: row index
:param M:
"""
if i == j:
raise ValueError
if i < j:
i, j = j, i
return M * i + j - ((i + 2) * (i + 1)) // 2
|
def _roll_negative_time_fields(year, month, day, hour, minute, second):
"""
Fix date/time fields which have nonsense negative values for any field
except for year by rolling the overall date/time value backwards, treating
negative values as relative offsets of the next higher unit.
For example minute=5, second=-63 becomes minute=3, second=57 (5 minutes
less 63 seconds)
This is very unsophisticated handling of negative values which we would
ideally do with `dateutil.relativedelta` but cannot because that class does
not support arbitrary dates, especially not negative years which is the
only case where these nonsense values are likely to occur anyway.
NOTE: To greatly simplify the logic we assume all months are 30 days long.
"""
if second < 0:
minute += int(second / 60.0) # Adjust by whole minute in secs
minute -= 1 # Subtract 1 for negative second
second %= 60 # Convert negative second to positive remainder
if minute < 0:
hour += int(minute / 60.0) # Adjust by whole hour in minutes
hour -= 1 # Subtract 1 for negative minutes
minute %= 60 # Convert negative minute to positive remainder
if hour < 0:
day += int(hour / 24.0) # Adjust by whole day in hours
day -= 1 # Subtract 1 for negative minutes
hour %= 24 # Convert negative hour to positive remainder
if day < 0:
month += int(day / 30.0) # Adjust by whole month in days (assume 30)
month -= 1 # Subtract 1 for negative minutes
day %= 30 # Convert negative day to positive remainder
if month < 0:
year += int(month / 12.0) # Adjust by whole year in months
year -= 1 # Subtract 1 for negative minutes
month %= 12 # Convert negative month to positive remainder
return (year, month, day, hour, minute, second)
|
def __normalise_size_name(name):
"""Makes sure the name has the correct capitalisation and punctuation."""
return name.lower().replace('_', ' ').replace('-', ' ')
|
def factR(n):
""" Assumes that n is an int > 0
Returns n! """
if n == 1:
return n
else:
return n*factR(n-1)
|
def _get_task_file_name(task):
"""Returns the file name of the compile task. Eg: ${issue}-${patchset}.json"""
return '%s-%s.json' % (task['issue'], task['patchset'])
|
def get_stanford_tag(siera_tag):
"""Returns the corresponding Stanford NER tag on given Siera entity tag"""
mapping = {
'Individual' : lambda _: 'PERS',
'Location' : lambda _: 'LOC',
'Organization' : lambda _: 'ORG',
'Brand' : lambda _: 'O',
'Publication' : lambda _: 'O',
'Hashtag' : lambda _: 'O'
}
return mapping[siera_tag](None) if siera_tag in mapping else 'O'
|
def sieve(n):
"""[Sieve of Eratosthenes](https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes) - Finds prime numbers
Args:
n (Integer): Maximum value to look for primes under
Returns:
Integer Array: Array of all primes less than n
"""
integers = []
for x in range(n):
integers.append(True)
prime_selected = True
p = 2
while p * p < n and prime_selected:
prime_selected = False
for x in range(p + p, n, p):
integers[x] = False
for x in range(p + 1, n):
if integers[x]:
p = x;
prime_selected = True
break;
primes = []
for x in range(2, n):
if integers[x]:
primes.append(x)
return primes
|
def index(subseq, seq):
"""Return an index of `subseq`uence in the `seq`uence.
Or `-1` if `subseq` is not a subsequence of the `seq`.
The time complexity of the algorithm is O(n*m), where
n, m = len(seq), len(subseq)
>>> index([1,2], range(5))
1
>>> index(range(1, 6), range(5))
-1
>>> index(range(5), range(5))
0
>>> index([1,2], [0, 1, 0, 1, 2])
3
"""
i, n, m = -1, len(seq), len(subseq)
try:
while True:
i = seq.index(subseq[0], i + 1, n - m + 1)
if subseq == seq[i:i + m]:
return i
except ValueError:
return -1
|
def DifferenceLists(entries):
"""
Find difference of one list with another.
Useful for existing lists or complex searches.
Inputs:
entries (list) : list of two lists to
difference [[...],[...]]
Outputs:
diff (list) : difference of all entry lists
"""
if len(entries) > 2:
raise ValueError('Symmetric difference only works on two lists')
entryset = set(entries[0])
diff = list(entryset.symmetric_difference(entries[1]))
# Re-sort if necessary
diff = sorted(diff)
return diff
|
def format_interval(value, year_tmpl='{: 05d}'):
"""
>>> format_interval({'start_year': -9999,
... 'start_month': 1,
... 'start_day': 1,
... 'end_year': 9999,
... 'end_month': 12,
... 'end_day': 31})
'-9999-01-01/9999-12-31'
>>> assert format_interval(None) is None
"""
if value is None:
return None
year_values = [value.pop(key) for key in ('start_year', 'end_year')]
# https://en.wikipedia.org/wiki/ISO_8601#Years
for year in year_values:
assert -9999 <= year <= 9999
start_year, end_year = (year_tmpl.format(y).strip() for y in year_values)
context = dict(value, start_year=start_year, end_year=end_year)
return ('{start_year}-{start_month:02d}-{start_day:02d}'
'/'
'{end_year}-{end_month:02d}-{end_day:02d}').format_map(context)
|
def _parse_semicolon_separated_data(input_data):
"""Reads semicolon-separated Unicode data from an input string.
Reads a Unicode data file already imported into a string. The format is
the Unicode data file format with a list of values separated by
semicolons. The number of the values on different lines may be different
from another.
Example source data file:
http://www.unicode.org/Public/UNIDATA/PropertyValueAliases.txt
Example data:
sc; Cher ; Cherokee
sc; Copt ; Coptic ; Qaac
Args:
input_data: An input string, containing the data.
Returns:
A list of lists corresponding to the input data, with each individual
list containing the values as strings. For example:
[['sc', 'Cher', 'Cherokee'], ['sc', 'Copt', 'Coptic', 'Qaac']]
"""
all_data = []
for line in input_data.split("\n"):
line = line.split("#", 1)[0].strip() # remove the comment
if not line:
continue
fields = line.split(";")
fields = [field.strip() for field in fields]
all_data.append(fields)
return all_data
|
def drop_paragraphs(paragraphs, list_to_drop):
"""function to allow the user to remove paragraphs they feel are unimportant"""
for i in sorted(list_to_drop, reverse=True):
del paragraphs[i - 1]
return paragraphs
|
def squeeze(obj):
"""
Return the item of an array of length 1,
otherwise return original object.
"""
try:
return obj.item()
except (ValueError, AttributeError):
return obj
|
def _url_info(name):
"""
Compose the URL for a json-file with information about the datasets.
:param name: Name of the information e.g. 'datasets' or 'columns'.
:return: String with the full URL.
"""
url = 'https://simfin.com/api/bulk_info/{0}.php'
url = url.format(name)
return url
|
def find_missing_integer(arr):
"""
Find the first missing integer in unsorted array of integers
"""
# segregate integers, with negative (including zero) on right and positives
# on left
left = 0
right = len(arr) - 1
while left < right:
if arr[left] > 0:
left += 1
elif arr[right] <= 0:
right -= 1
else:
arr[left], arr[right] = arr[right], arr[left]
left += 1
right -= 1
# mark indexes of positive integers as negative
for idx in arr[:left+1]:
pos_idx = abs(idx)
if pos_idx < len(arr):
arr[pos_idx-1] = -abs(arr[pos_idx-1])
# find first positive integer
for idx, elt in enumerate(arr[:left+1]):
if elt > 0:
return idx + 1
return left + 1
|
def compara_assinatura(main_sign: list, text_sign: list) -> float:
"""Essa funcao recebe duas assinaturas de texto e deve devolver o grau
de similaridade entre as assinaturas.
Args:
main_sign: param text_sign:
text_sign:
Returns:
float: Grau de similaridade entre as assinaturas
"""
sign_calc = 0
for i, _ in enumerate(main_sign):
sign_calc += abs(main_sign[i] - text_sign[i])
return sign_calc / 6
|
def conjugado (num):
"""Funcion que retorna el conjugado de un numero imaginario
(list 1D) -> list 1D"""
num1 = num[1] * -1
return (num[0], num1)
|
def _last_input_block(output_list):
""" return the index of the last input block in the given list of blocks.
"""
lastindex = 0
for index, block in enumerate(output_list):
if block[0] == "inputBlock":
lastindex = index
return lastindex + 1
|
def coherenstimestring(s):
"""
Returns time string s in COHERENS model compatible format: YYYY/MM/DD;hh:mm:ss:fff
"""
ns = s[0:4] + "/" + s[5:7] +"/" + s[8:10] + ";" \
+ s[11:13] + ":" + s[14:16] +":" + s[17:19] + ":" + s[20:23]
return ns
|
def build_datastore_path(datastore_name, path):
"""Build the datastore compliant path."""
return "[%s] %s" % (datastore_name, path)
|
def to_ucsc_string(triplet):
"""
Convert a triplet to a UCSC string.
Parameters
----------
triplet : (chrom, start, end)
Returns
-------
ucsc_string : str
UCSC-style string, 'chrom:start-end'
"""
return "{0}:{1}-{2}".format(*triplet)
|
def h_eval(data):
"""
Function takes dictionary
Evaluate values and convert string to correct type (boolean/int/float/long/string)
"""
if isinstance(data, dict):
for _k in list(data.keys()):
data[_k] = h_eval(data[_k])
if data[_k] is None or (isinstance(data[_k], dict) and not data[_k]):
data.pop(_k)
return data
if isinstance(data, list) or isinstance(data, tuple) or isinstance(data, set):
res = []
for _k in data:
res.append(h_eval(_k))
if isinstance(data, tuple):
return tuple(res)
if isinstance(data, set):
return set(res)
return res
try:
if isinstance(data, str):
if data.endswith("%"):
data = data[:-1]
if data.lower() == "false":
return False
if data.lower() == "true":
return True
if data.lower() == "n/e":
return None
try:
return int(data)
except Exception:
pass
try:
return float(data)
except Exception:
pass
return data
except Exception:
return data
|
def get_cl_string(clpos, pepseq, lbl):
"""
Inserts the lbl sequence into the peptide sequence
"""
return pepseq[:clpos + 1] + "[" + lbl + "]" + pepseq[clpos + 1:]
|
def convert_bc_stage_text(bc_stage: str) -> str:
"""
function that converts bc stage.
:param bc_stage: the string name for business cases that it kept in the master
:return: standard/shorter string name
"""
if bc_stage == "Strategic Outline Case":
return "SOBC"
elif bc_stage == "Outline Business Case":
return "OBC"
elif bc_stage == "Full Business Case":
return "FBC"
elif bc_stage == "pre-Strategic Outline Case":
return "pre-SOBC"
else:
return bc_stage
|
def check_win(d):
"""Input is a dictionary with keys 1-9 according to a numerical
keypad: 789
456
123
function checks win condition for tic tac toe and returns the dicts value"""
w = ( (7, 8, 9), (4, 5, 6), (1, 2, 3,),
(1, 4, 7), (2, 5, 8), (3, 6, 7),
(7, 5, 3), (9, 5, 1))
for fields in w:
# if not all fields present, can not be a win
if all(f in d for f in fields):
# if all fields the same, return 1st field as result
if len(set(d[num] for num in fields)) == 1:
return d[fields[0]]
return None
|
def _record_calls(cls):
"""Replace methods on cls with methods that record that they have been called.
Iterate all attributes of cls, and for public methods, replace them with a wrapped method
that records the method called along with the arguments and keyword arguments.
"""
for meth_name, orig_method in cls.__dict__.items():
if meth_name.startswith('_'):
continue
def decorator(orig_method):
def wrapped(self, *args, **kwargs):
full_args = (orig_method.__name__,) + args
if kwargs:
full_args = full_args + (kwargs,)
self._calls.append(full_args)
return orig_method(self, *args, **kwargs)
return wrapped
setattr(cls, meth_name, decorator(orig_method))
return cls
|
def get_classifiers(setup_text):
"""
Return a list of classifiers
"""
# FIXME: we are making grossly incorrect assumptions.
classifiers = [line for line in setup_text.splitlines(False) if '::' in line]
# strip spaces/tabs/quotes/commas
classifiers = [line.strip('\t \'",;') for line in classifiers]
return classifiers
|
def kTdiff(i, j, zs, kTs):
"""Compute the difference vector (kTi/zi - kTj/zj)."""
return kTs[i-1]/zs[i-1] - kTs[j-1]/zs[j-1]
|
def until_ruler(doc):
"""
Utilities to clean jinja template;
Remove all ``|`` and `` `` until the last leading ``|``
"""
lines = doc.split("\n")
new = []
for l in lines:
while len(l.lstrip()) >= 1 and l.lstrip()[0] == "|":
l = l.lstrip()[1:]
new.append(l)
return "\n".join(new)
|
def validate_gain(gain):
"""
Validate the gain in the image
Parameters
----------
gain: float or list of floats
gain value(s)
Returns
-------
missing: boolean
True if gain is missing or invalid
"""
missing = True
if not gain:
return missing
try:
missing = not all(gain)
except TypeError:
missing = False
return missing
|
def get_exchange_trading_pair(table_name):
"""Function to get exchange and trading_pair bc coinbase_pro has an extra '_' """
# for coinbase_pro
if len(table_name.split('_')) == 4:
exchange = table_name.split('_')[0] + '_' + table_name.split('_')[1]
trading_pair = table_name.split('_')[2] + '_' + table_name.split('_')[3]
# for all other exchanges
else:
exchange = table_name.split('_')[0]
trading_pair = table_name.split('_')[1] + '_' + table_name.split('_')[2]
return exchange, trading_pair
|
def bool_or_fail(v):
""" A special variant of :code:`bool` that raises :code:`ValueError`s
if the provided value was not :code:`True` or :code:`False`.
This prevents overeager casting like :code:`bool("bla") -> True`
Parameters
----------
v : mixed
Value to be cast
Returns
-------
b : boolean
The result of the cast
"""
try:
if v.lower() == 'true':
return True
elif v.lower() == 'false':
return True
except Exception:
pass
raise ValueError()
|
def dom_level(dns_name):
"""
Get domain level
"""
return dns_name.count(".")
|
def chi2fn_2outcome_wfreqs(N, p, f):
"""
Computes chi^2 for a 2-outcome measurement using frequency-weighting.
The chi-squared function for a 2-outcome measurement using
the observed frequency in the statistical weight.
Parameters
----------
N : float or numpy array
Number of samples.
p : float or numpy array
Probability of 1st outcome (typically computed).
f : float or numpy array
Frequency of 1st outcome (typically observed).
Returns
-------
float or numpy array
N(p-f)^2 / (f*(1-f*)),
where f* = (f*N+1)/N+2 is the frequency value used in the
statistical weighting (prevents divide by zero errors)
"""
f1 = (f * N + 1) / (N + 2)
return N * (p - f)**2 / (f1 * (1 - f1))
|
def sudoku(t):
"""Resi dani sudoku. V tabeli t velikosti 9 x 9 so ze vpisana nekatera stevila.
Prazna polja so oznacena z 0."""
def mozne_poteze(u,v):
sez_vrstica= [t[u][i] for i in range(0,9) if t[u][i]!=0]
sez_stolpec= [t[i][v] for i in range(0,9) if t[i][v]!=0]
x= (u//3)*3
y= (v//3)*3
sez_kvadratek= [t[i][j] for i in range(x,x+3) for j in range (y,y+3) if t[i][j]!=0]
sez=sez_kvadratek+sez_stolpec+sez_vrstica
return [i for i in range (1,10) if i not in sez]
def naslednje_polje(u,v):
if v < 8:
return (u, v+1)
else:
return (u+1, 0)
def sestopaj(u,v):
# (u,v) je koordinata, v kateri moramo preizkusiti vse moznosti.
# premaknemo se v naslednje prazno polje
while u < 9 and t[u][v] != 0: (u,v) = naslednje_polje(u,v)
if (u,v) == (9,0):
# obdelali smo vsa polja in nasli resitev
return t
else:
# izracunamo vse dovoljene poteze za polje (u,v)
for k in mozne_poteze(u,v):
t[u][v] = k
r = sestopaj(u,v)
if r is None:
# odstranimo potezo
t[u][v] = 0
else:
# nasli smo resitev
return r
# Pregledali smo vse poteze, ni resitve
return None
# zacetni klic
return sestopaj(0,0)
|
def factorial_iter(num: int) -> int:
"""
Return the factorial of an integer non-negative number.
Parameters
----------
num : int
Raises
------
TypeError
if num is not integer.
ValueError
if num is less than zero.
Returns
-------
int
"""
if not isinstance(num, int):
raise TypeError("an integer number is required")
if num < 0:
raise ValueError("a non-negative integer number is required")
product = 1
for factor in range(2, num + 1):
product *= factor
return product
|
def find_missing_number(C, size: int) -> int:
"""Find the missing number.
Find the missing number using the
difference between sum of known and
arithmetic sums.
"""
known_numbers_sum = sum(C)
expected_numbers_sum = (size/2) * (C[0] + C[-1])
return int(expected_numbers_sum - known_numbers_sum)
|
def factorial(num):
"""
Returns the factorial value of the given number.
:arg num: Interger value of whose factorial we will calculate.
:return: The value of the the factorial or -1 in case negative value passed.
"""
if num >= 0:
if num == 0:
return 1
return num * factorial(num -1)
else:
return -1
|
def _get_inclinations(spectrum, inclinations):
"""Get the inclination angles which will be used.
If all is passed, then the inclinations from the spectrum object will
be used. Otherwise, this will simply create a list of strings of the
inclinations.
Parameters
----------
spectrum: pypython.Spectrum
The spectrum object.
inclinations: list
A list of inclination angles wanted to be plotted.
"""
if type(inclinations) != list:
inclinations = str(inclinations)
if type(inclinations) == str:
inclinations = [inclinations]
if len(inclinations) > 1:
if inclinations[0] == "all": # ignore "all" if other inclinations are passed
inclinations = inclinations[1:]
else:
if inclinations[0] == "all":
inclinations = spectrum.inclinations
return [str(inclination) for inclination in inclinations]
|
def translate_generics_to_class(a_class_generics, a_object_generics,
a_class_name):
"""
Retreive the class name associate to the generic `a_class_name'.
The Generics of the class is `a_class_generics' and the classes
associate to the generics are in `a_object_generics'.
"""
l_object_generics = a_object_generics.replace(" ", "").\
replace("\t", "").split(",")
l_result = None
i = 0
while i < len(a_class_generics) and a_class_generics[i] != a_class_name:
i = i + 1
l_result = None
if i < len(l_object_generics):
l_result = l_object_generics[i]
return l_result
|
def get_line(x0, y0, x1, y1):
""" Returns m and b of y = mx + b equation for the provided points. """
m = (y1 - y0) / (x1 - x0)
b = y0 - m * x0 # y = mx + b -> y - mx = b
return m, b
|
def stat_by_group(stat: str, group: str) -> str:
"""Provides consistant naming to statistic descriptors"""
return f'{stat} by {group}'
|
def rivers_with_station(stations):
"""Given a list of stations, returns a list of corresponding rivers"""
return {i.river for i in stations}
|
def convert_dateranges(kwargs):
"""Creates the DateRange object from the parameters dictionary.
Args:
kwargs: a dict containing the parameters passed in the request.
Returns:
A list containing a DateRange object.
"""
date_range = {
'startDate': kwargs.get('start_date', None),
'endDate': kwargs.get('end_date', None)
}
return [date_range]
|
def get_selected_file_name(sel_user, sel_game):
"""
Returns the name of the file which will be used
"""
if sel_game in ("A", "B", "C", "D", "E", "F"):
print("Working with file: USER{0}_game_{1}".format(sel_user, sel_game))
_csv_file = "resurse/" "USER{0}_game_{1}.csv".format(sel_user, sel_game)
else:
print("Working with file: USER{0}_{1}".format(sel_user, sel_game))
_csv_file = "resurse/" "USER{0}_{1}.csv".format(sel_user, sel_game)
return _csv_file
|
def sort_string(string: str) -> str:
"""Sort a string into alphabetical order."""
return "".join(sorted(string))
|
def binary_encoding(string, encoding = 'utf-8'):
"""
This helper function will allow compatibility with Python 2 and 3
"""
try:
return bytes(string, encoding)
except TypeError: # We are in Python 2
return str(string)
|
def merge_dicts(*dicts):
"""
Given a collection of dictionaries, merge them.
e.g.
merge_dicts({'a': 1, 'b': 2}, {'c': 3, 'd': 4})
returns {'a': 1, 'b': 2, 'c': 3, 'd': 4}
Later dicts overwrite earlier ones.
:param dicts: dictionaries.
:return: A merged dictionary.
"""
return dict((k, v) for d in dicts for k, v in d.items())
|
def get_dim_order(x, y, z):
"""
Returns a tuple with the order of dimensions. Tuple can be used with
DIM_ROT.
"""
if x <= y and x <= z:
if y <= z:
return ('x', 'y', 'z')
else:
return ('x', 'z', 'y')
elif y <= x and y <= z:
if x <= z:
return ('y', 'x', 'z')
else:
return ('y', 'z', 'x')
else:
if x <= y:
return ('z', 'x', 'y')
else:
return ('z', 'y', 'x')
|
def isFloat(string):
""" is the given string a float? """
try: float(string)
except ValueError: return 0
else: return 1
|
def normalise_time(time_str):
"""
Some of the raw Dublin Bus data has invalid times for hours after midnight
(e.g., 25:00 for 1am). This function corrects any time string with this
problem so that we can work with it using Pandas datetimes
Args
---
time_str: str
A time as a string
Returns
---
A time string with only the hour corrected if necessary
"""
hour = time_str.split(":")[0]
if int(hour) >= 24:
normalised_hour = int(hour) % 24
return time_str.replace(hour, f"{normalised_hour:02}")
return time_str
|
def list_assignement(o):
"""
--> moved to tools
Helper function to make initialization with lists and single elements possible
:param o: iterable object or single element
:return: Gives back a list if a single element was given
"""
if o is None:
return []
elif hasattr(o, "__get_item__"):
return list(o)
elif hasattr(o, "__iter__"):
return list(o)
else:
return [o]
|
def get_wait_for_acknowledgement_of(response_modifier):
"""
Gets the `wait_for_acknowledgement` value of the given response modifier.
Parameters
----------
wait_for_acknowledgement : `None`, ``ResponseModifier``
The respective response modifier if any,
Returns
-------
wait_for_acknowledgement : `bool`
"""
if response_modifier is None:
wait_for_acknowledgement = False
else:
wait_for_acknowledgement = response_modifier.wait_for_acknowledgement
return wait_for_acknowledgement
|
def vapour_pressure(temperature_C):
"""Tetens' formula for calculating saturation pressure of water vapor,
return value is in pascals.
https://wahiduddin.net/calc/density_altitude.htm
NOTE: 1 Pascal == 100 mb
>>> vapour_pressure(22) / 100 # in mb
26.43707387256724
"""
return 100 * 6.1078 * 10**(
(7.5 * temperature_C) / (237.3 + temperature_C))
|
def bold(text: str) -> str:
"""Get the given text in bold.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "**{}**".format(text)
|
def fix_json_fname(fname):
"""Add JSON suffix to file name if it's missing"""
if fname.lower().endswith('.json'):
return fname
else:
return fname + ".json"
|
def get_percentage(totalhp, earnedhp):
"""
rtype: percentage of `totalhp` num
eg: (1000, 100) will return 10%
"""
matched_less = totalhp - earnedhp
per_of_totalhp = 100 - matched_less * 100.0 / totalhp
per_of_totalhp = str(int(per_of_totalhp))
return per_of_totalhp
|
def _whichever(x, y):
"""Returns whichever of `x` and `y` is not `None`.
If both `x` and `y` are not `None`, returns `x`.
If both `x` and `y` are `None`, returns `None`.
"""
return y if x is None else x
|
def strip_prefix(prefix, key):
"""
Strip the prefix of baggage items.
:param prefix: Prefix to be stripped.
:type prefix: str
:param key: Baggage item to be striped
:type key: str
:return: Striped baggage item
:rtype: str
"""
return key[len(prefix):]
|
def validate_type(arg, types):
"""
>>> types = {'string': 'String'}
>>> validate_type('string', types)
'String'
>>> try:
... validate_type('bbb', types)
... except:
... pass
Error: Field type error, bbb
"""
if arg not in types:
print(f'Error: Field type error, {arg}')
exit(1)
return types.get(arg)
|
def format_duration(dur_ms):
"""Return a time string representing the duration in human readable format."""
if not dur_ms:
return "0ms"
ms = dur_ms % 1000
dur_ms = (dur_ms - ms) / 1000
secs = dur_ms % 60
dur_ms = (dur_ms - secs) / 60
mins = dur_ms % 60
hrs = (dur_ms - mins) / 60
out = ""
if hrs > 0:
out += "%dh" % hrs
if mins > 0:
out += "%dm" % mins
if secs > 0:
out += "%ds" % secs
if ms > 0 or not out:
out += "%dms" % ms
return out
|
def truncate_line(line: str, length: int = 80) -> str:
"""Truncates a line to a given length, replacing the remainder with ..."""
return (line if len(line) <= length else line[: length - 3] + "...").replace("\n", "")
|
def model_tempmax(Sdepth_cm = 0.0,
prof = 0.0,
tmax = 0.0,
tminseuil = 0.0,
tmaxseuil = 0.0):
"""
- Name: TempMax -Version: 1.0, -Time step: 1
- Description:
* Title: Maximum temperature recalculation
* Author: STICS
* Reference: doi:http://dx.doi.org/10.1016/j.agrformet.2014.05.002
* Institution: INRA
* Abstract: recalculation of maximum temperature
- inputs:
* name: Sdepth_cm
** description : snow depth
** inputtype : variable
** variablecategory : state
** datatype : DOUBLE
** default : 0.0
** min : 0.0
** max : 500.0
** unit : cm
** uri :
* name: prof
** description : snow cover threshold for snow insulation
** inputtype : parameter
** parametercategory : constant
** datatype : DOUBLE
** default : 0.0
** min : 0.0
** max : 1000
** unit : cm
** uri :
* name: tmax
** description : current maximum air temperature
** inputtype : variable
** variablecategory : auxiliary
** datatype : DOUBLE
** default : 0.0
** min : 0.0
** max : 100.0
** unit : degC
** uri :
* name: tminseuil
** description : minimum temperature when snow cover is higher than prof
** inputtype : parameter
** parametercategory : constant
** datatype : DOUBLE
** default : 0.0
** min : 0.0
** max : 5000.0
** unit : degC
** uri :
* name: tmaxseuil
** description : maximum temperature when snow cover is higher than prof
** inputtype : parameter
** parametercategory : constant
** datatype : DOUBLE
** default : 0.0
** min :
** max :
** unit : degC
** uri :
- outputs:
* name: tmaxrec
** description : recalculated maximum temperature
** variablecategory : state
** datatype : DOUBLE
** min : 0.0
** max : 500.0
** unit : degC
** uri :
"""
tmaxrec = tmax
if Sdepth_cm > prof:
if tmax < tminseuil:
tmaxrec = tminseuil
else:
if tmax > tmaxseuil:
tmaxrec = tmaxseuil
else:
if Sdepth_cm > 0.0:
if tmax <= 0.0:
tmaxrec = tmaxseuil - ((1 - (Sdepth_cm / prof)) * -tmax)
else:
tmaxrec = 0.0
return tmaxrec
|
def create_split_bounds(N, train_pct):
"""
Computes split bounds for train, dev, and test.
Args:
N (int): Number of data points in the time series
train_pct (float): Percent of data to be used for the training set
Returns:
(int): Length of the training set
(int): Length of the dev set
(int): Length of the test set
"""
train_len = int(round(train_pct * N))
if ((N - train_len) % 2) != 0:
train_len += 1
# NOTE: We're assume the dev and test set are equal in length.
test_len = dev_len = int((N - train_len) / 2)
assert "Not all data points are being used. Check create_split_bounds()", \
(train_len + test_len + dev_len) == N
return train_len, dev_len, test_len
|
def correct(frag):
"""
leaves only unambiguous DNA code (ACTG-)
Input:
frag - string of nucleotides
Output:
pr_frag - corrected string of nucleotides
"""
pr_frag = frag.upper()
pr_frag_s = set(pr_frag)
if pr_frag_s != {"A", "C", "G", "T", "-"}:
for letter in pr_frag_s - {"A", "C", "G", "T", "-"}:
pr_frag = pr_frag.replace(letter, "-")
return pr_frag
|
def ballGenerator(d, filled = False, filename=None):
"""Generate text code for a filtration of the d-ball, or the d-1-sphere if plain is set to false.
The filtration used is a simplicial filtration of a d dimension tetraedra, where value is set equal to dim
in order to ensure that the creation is consistent.
A d dimension tetraedra contains all the possible simplex in it so the code simply has to generate all the
strictly increasing tuples of all the dimensions less than d"""
l = []
nbpoints = d + 1
for i in range(d):
dim = i
val = i
vertices = list(range(dim + 1))
while vertices[0] >= 0:
nextline = str(val) + " " + str(dim)
for indice in vertices:
nextline += " " + str(indice)
l.append(nextline)
for k in range(dim, -1, -1):
vertices[k] += 1
if vertices[k] >= nbpoints - dim + k:
vertices[k] = -1
else:
break
for k in range(1, dim + 1):
if vertices[k] == -1:
vertices[k] = vertices[k-1] + 1
if filled:
nextline = str(d) + " " + str(d)
for i in range(nbpoints):
nextline += " " + str(i)
l.append(nextline)
if filename:
with open(filename, 'w', encoding='ascii') as f:
for line in l:
f.write(line)
f.write('\n')
return l
|
def get_free_energy_from_stems(stems, stem_energies):
""" determines the folding free energy from the set of assembled stems """
free_energy = 0.0
for stem in stems:
free_energy += stem_energies[stem]
return(free_energy)
|
def parse_out_transcript_ID(gtf_metadata):
""" Parse GTF metadata in order to extract the transcript ID """
transcript_ID = (gtf_metadata.split('transcript_id "')[1]).split('";')[0]
return(transcript_ID)
|
def binary_search(arr: list, item) -> int:
"""
performs a simple binary search on a sorted list
:param arr: list
:param item:
:return: int
"""
from bisect import bisect_left
i = bisect_left(arr, item)
if i != len(arr) and arr[i] == item:
return i
else:
return -1
|
def is_real_data(file_path):
"""
Tries to determine from the file path if the file is real data or
simulation.
"""
real_data_examples = [
'SingleElectron', 'SingleMuon', 'ElectronHad', 'SingleMu']
return any([e in file_path for e in real_data_examples])
|
def format_summary(translation):
""" Transforms the output of the `from_batch` function
into nicely formatted summaries.
"""
raw_summary, _, _ = translation
summary = (
raw_summary.replace("[unused0]", "")
.replace("[unused3]", "")
.replace("[PAD]", "")
.replace("[unused1]", "")
.replace(r" +", " ")
.replace(" [unused2] ", ". ")
.replace("[unused2]", "")
.strip()
)
return summary
|
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
try:
code = func.__code__
except AttributeError:
# Some built-in functions don't have __code__, __defaults__, etc.
return {}
pos_count = code.co_argcount
arg_names = code.co_varnames
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
|
def trapezoid_area(base_minor, base_major, height):
"""Returns the area of a trapezoid"""
return ((base_major + base_minor) / 2) * height
|
def cluster(data, threshold):
"""A simple numbering clustering algorithm.
'data' should be a list of numbers, which should get clustered.
The 'threshold' is the biggest gap which can exist between elements
before they will be seperated into two clusters.
Returns a list of lists with the original numbers, clustered.
"""
# data.sort()
groups = [[data[0]]]
for x in data[1:]:
if abs(x - groups[-1][-1]) <= threshold:
groups[-1].append(x)
else:
groups.append([x])
return groups
|
def argv2string(argv, delimiter=' '):
"""Convert arg list to a string."""
assert len(argv) > 0
arg_str = argv[0]
for arg in argv[1:]:
arg_str += delimiter + arg
return arg_str
|
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i
|
def quoteBooleans(a_string):
"""Quote booleans
given a string that contains ": true", replace with ": 'true'" (or false)
"""
tmp = a_string.replace(": true", ": 'true'")
tmp = tmp.replace(": false", ": 'false'")
return tmp
|
def check(row):
"""
:param row: list, list of inputs of a row
:return: bool, the result of whether inputs are input correctly
"""
for ch in row:
if len(ch) != 1:
print('Illegal input')
return False
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.