content stringlengths 42 6.51k |
|---|
def format_config_str(config: dict):
"""
Correctly format a config string for a dbt model
"""
config_string = ", ".join(
[f"{k}='{v}'" if isinstance(v, str) else f"{k}={v}" for k, v in config.items()])
return config_string |
def hasnumbers(input_string):
"""Checks whether the string has any digit value"""
return any(char.isdigit() for char in input_string) |
def get_tree_mean(cart_tree):
"""Return the best value that can describe the branch after pruning the branch to the leaf."""
if isinstance(cart_tree["left"], dict):
cart_tree["left"] = get_tree_mean(cart_tree["left"])
if isinstance(cart_tree["right"], dict):
cart_tree["right"] = get_tree_mean(cart_tree["right"])
return (cart_tree["left"] + cart_tree["right"]) / 2 |
def nside2npix(nside):
"""Give the number of pixels for the given nside.
Parameters
----------
nside : int
healpix nside parameter; an exception is raised if nside is not valid
(nside must be a power of 2, less than 2**30)
Returns
-------
npix : int
corresponding number of pixels
Notes
-----
Raise a ValueError exception if nside is not valid.
Examples
--------
>>> import healpy as hp
>>> import numpy as np
>>> hp.nside2npix(8)
768
>>> np.all([hp.nside2npix(nside) == 12 * nside**2 for nside in [2**n for n in range(12)]])
True
>>> hp.nside2npix(7)
588
"""
return 12 * nside * nside |
def get_N_intransit(tdur, cadence):
"""Estimates number of in-transit points for transits in a light curve.
Parameters
----------
tdur: float
Full transit duration
cadence: float
Cadence/integration time for light curve
Returns
-------
n_intransit: int
Number of flux points in each transit
"""
n_intransit = tdur//cadence
return n_intransit |
def allow_state(state):
"""Returns True if the state code is for a queued or running job."""
return state in ["CG", "PD", "R", "RD", "RS", "SO"] |
def noun_to_verb(sentence, index):
"""
Extract and transform a word
:param sentence: str that uses the word in sentence
:param index: index of the word to remove and transform
:return: str word that changes the extracted adjective to a verb.
A function takes a `sentence` using the
vocabulary word, and the `index` of the word once that sentence
is split apart. The function should return the extracted
adjective as a verb.
"""
verb = sentence.split(' ')[index]
return verb.replace('.', '') + 'en' |
def fn_winkler(weight_jaro, pre_matches, pre_scale):
"""
Scale the standard Jaro metric by 'pre_scale' units per 'pre_matches'.
Note the warning in the docstring of jaro_winkler() regarding the scale.
"""
weight_jaro += pre_matches * pre_scale * (1.0 - weight_jaro)
assert weight_jaro <= 1.0
return weight_jaro |
def _nt_quote_args(args):
"""Quote command-line arguments for DOS/Windows conventions.
Just wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# XXX this doesn't seem very robust to me -- but if the Windows guys
# say it'll work, I guess I'll have to accept it. (What if an arg
# contains quotes? What other magic characters, other than spaces,
# have to be escaped? Is there an escaping mechanism other than
# quoting?)
for i, arg in enumerate(args):
if ' ' in arg:
args[i] = '"%s"' % arg
return args |
def split_list(li):
"""Split a given list into a right half and left half."""
half = len(li) // 2
return li[:half], li[half:] |
def RGB_to_CMYK(r, g, b, gcr=1.0):
"""
take r,g,b float values (0.0 to 1.0),
invert to get c,m,y,
apply GCR (0.0 to 1.0, 0 means no GCR = CMY separation),
return c,m,y,k as integers (percent values)
GCR see http://en.wikipedia.org/wiki/Grey_component_replacement
"""
c, m, y = (1.0 - (float(x)/2) for x in (g+b, r+b, r+g))
k = min(c, m, y) * gcr
c, m, y = c-k, m-k, y-k
return [int(round(x*100)) for x in (c, m, y, k)] |
def counting_sort(array, key_length, key_func):
"""Counting sort."""
counts = [0] * key_length
result = [0] * len(array)
for i in array:
counts[key_func(i)] += 1
for i in range(1, key_length):
counts[i] += counts[i - 1]
for i in range(len(array) - 1, -1, -1):
key = key_func(array[i])
result[counts[key] - 1] = array[i]
counts[key] -= 1
return result |
def cgap_core_variant_sample(cgap_core_project, institution, variant):
"""
This item is not pre-posted to database so gene list association with
variant samples can be tested (due to longer process of associating variant
samples with gene lists when the latter is posted after the former).
"""
item = {
"project": cgap_core_project["@id"],
"institution": institution["@id"],
"variant": variant["@id"],
"CALL_INFO": "some_cgap_core_sample",
"file": "some_cgap_core_vcf_file",
}
return item |
def _validate_relation_value(self, relation_value):
"""
Validates the given (entity) relation value, checking
if it is a valid relation value.
:type relation_value: Object
:param relation_value: The relation value to be checked.
:rtype: bool
:return: The result of the validation test.
"""
# in case the relation value is valid and
# the relation value is not lazy loaded
if relation_value and not relation_value == "%lazy-loaded%":
# returns true (valid)
return True
# otherwise it must be invalid
else:
# returns false (invalid)
return False |
def _get_changed_nodes(node):
"""
Tester used inside map calls to see if offspring data is empty
"""
return node['offspring'] == [] |
def names_match_process_or_parents(proc, names):
"""Returns whether any of the given names are the name of the given
psutil.Process or any of its parents."""
if proc is None:
return False
elif any(name == proc.name().lower() for name in names):
return True
elif proc.parent() is not None and proc.pid == proc.parent().pid:
return False
else:
return names_match_process_or_parents(proc.parent(), names) |
def bytes_to_str(value):
"""Convert bytes to str."""
if isinstance(value, bytes) and not isinstance(value, str):
# Clumsy way to convert bytes to str on Python 3
return "".join(map(chr, value))
else:
return value |
def IPRange(first, last):
"""
Generate a list of IP addresses
Args:
first: the first IP in the range
last: the last IP in the range
Returns:
A list of IPs from first to last, inclusive (list of str)
"""
all_ips = []
ip = first
while ip <= last:
all_ips.append(str(ip))
ip += 1
return all_ips |
def tround(fpt: tuple):
"""
Returns a pont tuple in which elements are 'rounded' applying round()
function
"""
fx, fy = fpt
return (round(fx), round(fy)) |
def nameAndVersion(s):
"""
Splits a string into the name and version numbers:
'TextWrangler2.3b1' becomes ('TextWrangler', '2.3b1')
'AdobePhotoshopCS3-11.2.1' becomes ('AdobePhotoshopCS3', '11.2.1')
'MicrosoftOffice2008v12.2.1' becomes ('MicrosoftOffice2008', '12.2.1')
"""
index = 0
for char in s:
if char in "0123456789":
possibleVersion = s[index:]
if not (" " in possibleVersion or "_" in possibleVersion
or "-" in possibleVersion or "v" in possibleVersion):
return (s[0:index].rstrip(" .-_v"), possibleVersion)
index += 1
# no version number found, just return original string and empty string
return (s, '') |
def partition(thelist: list, start_index: int, end_index: int) -> int:
"""
Partitions a list into 2 and returns the pivot index
:param thelist The list to divide into 2 partitions to get the pivot index.
This uses the last element as the pivot of the list
:type thelist list
:param start_index
:type start_index int
:param end_index
:type end_index int
:returns pivot index
:rtype int
"""
pivot = thelist[end_index]
left_index = start_index
right_index = end_index - 1
while left_index <= right_index:
# walk until we find something on the left side that belongs on the right (less than the pivot)
while left_index <= end_index and thelist[left_index] < pivot:
left_index += 1
# walk until we find something on the right side that belongs on the left(greater than or equal to the pivot)
while right_index >= start_index and thelist[right_index] >= pivot:
right_index -= 1
# swap the items at the left_index and right_index, moving the element that's smaller than the pivot to the left
# half and the element that's larger than the pivot to the right half
if left_index < right_index:
thelist[right_index], thelist[left_index] = thelist[left_index], thelist[right_index]
# unless we have looked at all the elements in the list and are done partitioning. In that case, move the pivot element
# into it's final position
else:
thelist[end_index], thelist[left_index] = thelist[left_index], thelist[end_index]
return left_index |
def ConvertStringToValue(string,vartyp):
""" Convert string to value
:param str string: a string
:param str vartyp: type of value, 'str','int','float', or 'bool'
:return: value(valtype value) or None if type error occured.
"""
value=None
if vartyp == 'int':
try: value=int(string)
except: value=None
elif vartyp == 'float':
try: value=float(string)
except: value=None
elif vartyp == 'bool':
try:value=str(string)
except: value=None
elif vartyp == 'str': value=string
return value |
def rain(walls):
"""Algorithm that calculates water retained
Args:
walls (list): list of walls and their width
Returns:
number: quantity of water retained
"""
if not isinstance(walls, list) and len(walls) < 2:
return 0
if not all(isinstance(n, int) for n in walls): # any no integer
return 0
rain = 0
for i in range(1, len(walls) - 1):
# Find the maximum element on its left
left = walls[i]
for j in range(i):
left = max(left, walls[j])
# Find the maximum element on its right
right = walls[i]
for j in range(i + 1, len(walls)):
right = max(right, walls[j])
# Update the maximum of rain collected
rain += (min(left, right) - walls[i])
return rain |
def replace_backslashes(path:str):
"""
Replaces the backslashes of string-paths with double forward slashes
:param path: a random path that might contain backslashes
:type path: str
:return: A string-path with forward slashes
:rtype: str
|
"""
return path.replace("/", "\\") |
def NetInvIncTax(e00300, e00600, e02000, e26270, c01000,
c00100, NIIT_thd, MARS, NIIT_PT_taxed, NIIT_rt, niit):
"""
Computes Net Investment Income Tax (NIIT) amount assuming that
all annuity income is excluded from net investment income.
"""
modAGI = c00100 # no foreign earned income exclusion to add
if not NIIT_PT_taxed:
NII = max(0., e00300 + e00600 + c01000 + e02000 - e26270)
else: # do not subtract e26270 from e02000
NII = max(0., e00300 + e00600 + c01000 + e02000)
niit = NIIT_rt * min(NII, max(0., modAGI - NIIT_thd[MARS - 1]))
return niit |
def dumper(obj):
"""
JSON serialize an object.
Parameters
----------
obj : dict
The object to be serialized.
Returns
-------
str
JSON encodable version of the passed object.
"""
try:
return obj.toJSON()
except Exception:
try:
return obj.__dict__()
except Exception:
return obj.__str__() |
def get_h_index(counts):
"""Calculates and Returns the h_index of a counts. Counts have to be sorted."""
h = 0
for c in counts:
if c >= h + 1:
h += 1
else:
break
return h |
def _make_type (vendor, field):
"""
Takes an NXM vendor and field and returns the whole type field
"""
return (vendor << 7) | field |
def fiveplates_design_file(field, designID):
"""
string representation of targets file for field within
fiveplates_design_files zip file.
Parameters
----------
field : str
identifier of field, e.g. 'GG_010'
"""
return f'{field}_des{designID}_targets.txt' |
def is_valid_file(ext, argument):
""" Checks if file format is compatible """
formats = {
'input_dataset_path': ['csv'],
'output_model_path': ['pkl'],
'input_model_path': ['pkl'],
'output_results_path': ['csv'],
'output_plot_path': ['png']
}
return ext in formats[argument] |
def print_block_string(value: str, minimize: bool = False) -> str:
"""Print a block string in the indented block form.
Prints a block string in the indented block form by adding a leading and
trailing blank line. However, if a block string starts with whitespace and
is a single-line, adding a leading blank line would strip that whitespace.
"""
if not isinstance(value, str):
value = str(value) # resolve lazy string proxy object
escaped_value = value.replace('"""', '\\"""')
# Expand a block string's raw value into independent lines.
lines = escaped_value.splitlines() or [""]
num_lines = len(lines)
is_single_line = num_lines == 1
# If common indentation is found,
# we can fix some of those cases by adding a leading new line.
force_leading_new_line = num_lines > 1 and all(
not line or line[0] in " \t" for line in lines[1:]
)
# Trailing triple quotes just looks confusing but doesn't force trailing new line.
has_trailing_triple_quotes = escaped_value.endswith('\\"""')
# Trailing quote (single or double) or slash forces trailing new line
has_trailing_quote = value.endswith('"') and not has_trailing_triple_quotes
has_trailing_slash = value.endswith("\\")
force_trailing_new_line = has_trailing_quote or has_trailing_slash
print_as_multiple_lines = not minimize and (
# add leading and trailing new lines only if it improves readability
not is_single_line
or len(value) > 70
or force_trailing_new_line
or force_leading_new_line
or has_trailing_triple_quotes
)
# Format a multi-line block quote to account for leading space.
skip_leading_new_line = is_single_line and value and value[0] in " \t"
before = (
"\n"
if print_as_multiple_lines
and not skip_leading_new_line
or force_leading_new_line
else ""
)
after = "\n" if print_as_multiple_lines or force_trailing_new_line else ""
return f'"""{before}{escaped_value}{after}"""' |
def vect3_length_sqrd(v):
"""
Squared length of a 3d vector.
v (3-tuple): 3d vector
return (float): squared length
"""
return v[0] ** 2 + v[1] ** 2 + v[2] ** 2 |
def get_figure_height(number_categories: int) -> int:
"""
Given a number of categories to plot gets an appropriate figure height.
:param number_categories: The number of categories to plot.
:return: A figure height to be used by plotly.
"""
if number_categories < 10:
return 400
elif number_categories < 20:
return 500
elif number_categories < 30:
return 600
else:
return 800 |
def histogram_intersect(h1, h2):
"""
:param h1:
:param h2:
:return:
"""
res = []
for i, j in zip(h1, h2):
q = min(i, j)
res += [q]
return res |
def sth_else_conditions(node_conditions):
"""
:param node_conditions:
:return:
"""
return "else" in node_conditions |
def ensure_str(val):
"""Converts the argument to a string if it isn't one already"""
if isinstance(val, str):
return val
elif isinstance(val, (bytes, bytearray)):
return val.decode()
else:
raise ValueError('Expected bytes or string') |
def is_valid_data(dewF):
"""
Checks if data returned from API is valid by doing
a very naive check to see if dewpoint temperature
is not equal to -9999.
@param {dewF} the response object from Wunderground
"""
return not dewF == -9999 |
def greet(name: str) -> str:
"""Create a greeting"""
return f'Hello, {name}!' |
def is_sorted(index):
"""Check if a list is sorted by the index .
Args:
index (list): All lines in the ffindex
Returns:
[type]: [description]
"""
for i in range(len(index)-1):
if(index[i][0] > index[i+1][0]):
return False
return True |
def calc_rso(ra, elevation):
"""Clear-sky solar radiation [MJ m-2 day-1].
Parameters
----------
ra: pandas.Series, optional
Extraterrestrial daily radiation [MJ m-2 d-1]
elevation: float, optional
the site elevation [m]
Returns
-------
pandas.Series containing the calculated Clear-sky solar radiation
Notes
-----
Based on equation 37 in [allen_1998]_.
"""
return (0.75 + (2 * 10 ** -5) * elevation) * ra |
def _slow_but_somewhat_general_hash(*args, **kwargs):
"""
Attempts to create a hash of the inputs, recursively resolving the most common hurdles (dicts, sets, lists)
Returns: A hash value for the input
>>> _slow_but_somewhat_general_hash(1, [1, 2], a_set={1,2}, a_dict={'a': 1, 'b': [1,2]})
((1, (1, 2)), (('a_set', (1, 2)), ('a_dict', (('a', 1), ('b', (1, 2))))))
"""
if len(kwargs) == 0 and len(args) == 1:
single_val = args[0]
if hasattr(single_val, 'items'):
return tuple(
(k, _slow_but_somewhat_general_hash(v)) for k, v in single_val.items()
)
elif isinstance(single_val, (set, list)):
return tuple(single_val)
else:
return single_val
else:
return (
tuple(_slow_but_somewhat_general_hash(x) for x in args),
tuple((k, _slow_but_somewhat_general_hash(v)) for k, v in kwargs.items()),
) |
def count_bits_better(x):
"""Return the number of set bits in a word.
O(k) -- k is the number of set bits in a word (i.e. 2 if x = 0110)
"""
if x < 0:
raise ValueError('x must be nonnegative!')
# because even though this approach clears the lowest bit without shifting
# (so avoids the signed shift fills in 1s on the left issue),
# but python doesn't underflow! won't wrap! so 1000 - 1 != 0111
c = 0
while x:
c += 1
x &= x - 1 # clears the lowest set bit
return c |
def extract_text(struct):
"""Extracts the text information from a post struct."""
text = struct.get('description', '')
excerpt = struct.get('post_excerpt')
if excerpt:
text = u'<intro>%s</intro>\n%s' % (excerpt, text)
return text |
def is_close(x, y, thresh=1e-8):
"""
Tests if x is close to y as measured by some threshold.
"""
diff = x - y
return diff > (-thresh) and diff < thresh |
def calc_peak(motion):
"""Calculates the peak absolute response"""
return max(abs(min(motion)), max(motion)) |
def data_dump(*args):
"""Generic data dump format"""
return ' '.join(['{}'.format(arg) for arg in args]) |
def _set_status(status):
"""_set_status
set notification title w.r.t. status.
In addition, color and status icon which is used at slack notification is set, too.
:param status:
"""
title = ''
status_icon = ''
if status == 0:
title = 'Process Succeeded'
color = 'good'
status_icon = ':ok_woman:'
else:
title = 'Process Failed'
color = 'danger'
status_icon = ':no_good:'
return title, color, status_icon |
def gen_output(json_dct, *args):
"""Prep json_dct to be stored in Mongo.
Add in all of the *args into the json_dct so that we can store it
in Mongo. This function expects that the *args come in a specific order,
given by the tuple of strings below (it'll hold the keys to use to store
these things in the json_dct). 'num_stars' isn't necessarily expected
to be passed in (whereas everything else is).
Args:
json_dct: dict
Dictionary that currently stores a couple of things, to be
added to using *args.
*args: Tuple
Holds what to add to the json_dct.
Return: dct
"""
keys_to_add = ('job_title', 'location', 'date', 'company', 'num_stars')
for arg, key in zip(args, keys_to_add):
if arg:
json_dct[key] = arg
return json_dct |
def _validate_interval_type(interval_type):
"""Validate the given `interval_type` value.
Warnings
--------
This method is intended for internal use only.
"""
if interval_type in [None, "range"]:
return interval_type
if interval_type.startswith("CI="):
interval_type = float(interval_type[3:]) / 100
if 0 <= interval_type <= 1:
return interval_type
raise ValueError("Invalid `interval_type`") |
def letter_score(letter):
"""Returns the Scrabble score of a letter.
Args:
letter: a single character string
Raises:
TypeError if a non-Scrabble character is supplied
"""
score_map = {
1: ["a", "e", "i", "o", "u", "l", "n", "r", "s", "t"],
2: ["d", "g"],
3: ["b", "c", "m", "p"],
4: ["f", "h", "v", "w", "y"],
5: ["k"],
8: ["j", "x"],
10: ["q", "z"],
}
for score, letters in score_map.items():
if letter.lower() in letters:
return score
else:
raise TypeError("Invalid letter: %s", letter) |
def sortbyfilenames(spectra, fnames):
""" sort the spectrum in spectra by ids """
return [spectra[i] for i in sorted(range(len(fnames)), key=lambda k: fnames[k])] |
def relpath(file):
"""
Always locate to the correct relative path.
:param file: The wanted-to-get file location
:return: An absolute path to the file requested
"""
from sys import _getframe
from pathlib import Path
frame = _getframe(1)
curr_file = Path(frame.f_code.co_filename)
return str(curr_file.parent.joinpath(file).resolve()) |
def get_new_sol_file(test_nr):
""" Get name of new solution file """
return "test/{0}-new.sol".format(test_nr) |
def to_string(byte_array):
"""
Converts the given byte array to a String.
:param byte_array: The byte array to be converted to a String.
:return: The String represented by the given bytes.
"""
result = None
if byte_array is not None:
result = byte_array.decode("utf-8")
return result |
def bisection_search(input_lines, elemtype1, numel, global_first_elem_idx):
"""
Returns the 0-indexed first index where the element type changes
"""
if numel == 1:
return global_first_elem_idx
half_idx = numel//2
etype = int(input_lines[half_idx - 1].split()[1]) # 0-indexed
if etype == elemtype1:
global_first_elem_idx += half_idx
return bisection_search(input_lines[half_idx:], elemtype1, numel-half_idx, global_first_elem_idx)
elif etype != elemtype1:
return bisection_search(input_lines[:half_idx], elemtype1, half_idx, global_first_elem_idx) |
def Checkmissingligands(Lig_list,Lig_aux):
"""It checks the ligand PDB files that are missing
PARAMETERS
----------
Lig_list : List with the ligand PDB filenames
Lig_aux : List with the ligand names
RETURNS
-------
Missing_lig : List ot the name of the missing ligand PDB files
"""
Missing_lig = []
for ligand in Lig_aux:
if ligand not in Lig_list:
Missing_lig.append(ligand)
return Missing_lig |
def ixor(f,g):
""" f and g are ISPs given as [on,off["""
## fb = inot(f)
## gb = inot(g)
## y1 = iand(fb,g)
## y2 = iand(f,gb)
## return ior(y1,y2)
## res =ior(iand(inot(f),g),iand(f,inot(g)))
## res = [(f[0]&~g[1])|(~f[1]&g[0]),(f[0]&g[0])|(~f[1]&~g[1])]
res = [(f[0]&g[1])|(f[1]&g[0]),(f[0]&g[0])|(f[1]&g[1])]
return res |
def moffat_r(r, alpha, beta):
"""
Moffat profile
:param r: radial coordinate
:param alpha:
:param beta:
:return:
"""
return 2. * (beta -1) / alpha ** 2 * (1 + (r/alpha) ** 2) ** (-beta) |
def is_mobile(phone_number):
"""Determine whether the given telephone number belongs to a mobile (cell) phone"""
return phone_number[:1] in ['7', '8', '9'] |
def find_all_indexes(text, pattern):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found.
Runtime: O(t*p) needs to go through each letter in text and pattern to check
if pattern occurs and if it also occurs multiple times.
Space complexity: O(t)
"""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
pattern_index = []
flag = None
first_pattern = 0
index_stop = len(text) - len(pattern) + 1 #to not include indexes that will not be compared
if pattern == '': #all strings contain empty string
for i in range(len(text)): #worst case for space complexity
pattern_index.append(i) #appends all index
return pattern_index
for i in range(index_stop):
if text[i] == pattern[0]:
if len(pattern) == 1:
pattern_index.append(i)
for j in range(1, len(pattern)):
if i+j >= len(text):
break
elif text[i+j] == pattern[j]:
index_stop += 1
if len(pattern)-1 == j: #check if we're at the end of the pattern
pattern_index.append(i)
else:
break
return pattern_index |
def get_pattern_precise_quicker(guess: str, solution: str):
"""generates the patterns for a guess"""
hint = ""
for index, letter in enumerate(guess):
if not letter in solution:
hint += "b"
else:
if letter == solution[index]:
hint += "g"
else:
# only color yellow if not already marked in other yellow or any green
letter_solution = solution.count(letter)
letter_guess_already = guess[:index].count(letter)
if letter_solution > letter_guess_already:
letter_green = sum([l == letter and l == solution[i] for i, l in enumerate(guess)])
if letter_solution > letter_guess_already + letter_green:
hint += "y"
else:
hint += "b"
else:
hint += "b"
return hint |
def _clean(split_list):
"""Strips and attempts to convert a list of strings to floats."""
def try_float(s):
try:
return float(s)
except ValueError:
return s
return [try_float(s.strip()) for s in split_list if s] |
def cluster_hierarchically(active_sites):
"""
Cluster the given set of ActiveSite instances using a hierarchical algorithm. #
Input: a list of ActiveSite instances
Output: a list of clusterings
(each clustering is a list of lists of Sequence objects)
"""
# Fill in your code here!
return [] |
def _all_pages(topfolder):
"""Return list of all page dictionaries in a folder and its sub folders
Where topfolder is the folder dictionary"""
# search through folders
page_list = []
if "folders" in topfolder:
for folder in topfolder['folders'].values():
page_list.extend(_all_pages(folder))
if "pages" in topfolder:
for page in topfolder['pages'].values():
page_list.append(page)
return page_list |
def safe_cast(value, to_type, default=None):
"""Cast a value to another type safely.
:param value: The original value.
:param type to_type: The destination type.
:param default: The default value.
"""
try:
return to_type(value)
except (ValueError, TypeError):
return default |
def is_url_belong(url, baseurl):
"""
is the url belong to the baseurl.
the check logic is strict string match.
"""
if url.startswith(baseurl):
return True
else:
return False |
def version_str(version):
"""
Convert a version tuple or string to a string.
Will return major.minor.release kind of format.
"""
if isinstance(version, str):
return version
elif isinstance(version, tuple):
return '.'.join([str(int(x)) for x in version]) |
def sanitize_title(title):
"""Sanitize the title so that a valid API request can be made.
Discards everything after the first ':' character and removes and '.'
characters.
Returns:
lowercase version of the title.
"""
# Discard everything after the colon
title = title.split(':')[0]
title.replace('.', '')
return title.lower() |
def get_data_dict(value_dict):
"""Make json line data
Returns:
"""
data_bone = {'file_name': None,
'is_train': None,
'scale': None,
'parts': None,
'visibility': None,
'num_parts': None,
'obj_pos': None}
for key in value_dict:
if key in data_bone:
data_bone[key] = value_dict[key]
else:
assert False, "No Key - '{}'".format(key)
return data_bone |
def checkSyntax(In):
"""
Takes two inputs >
In: Input from end User
diceTypes: the dice types.
-----------
Returns
syntax: A list syntax = [Number of die to roll, type of dice]
-----------
"""
diceTypes = ['d4', 'd6', 'd8', 'd10', 'd12', 'd20',
'D4', 'D6', 'D8', 'D10', 'D12', 'D20']
numbers = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '']
numberOfDice = ''
diceType = ''
for ch in range(len(In)):
if In[ch] in numbers:
numberOfDice += In[ch]
elif In[ch] == 'd' or In[ch] == 'D':
diceType = In[ch:len(In)+1]
break
else:
break
check = [numberOfDice, diceType]
if check[0] == '':
check[0] = '1'
try:
check[0] = int(check[0])
except:
return 'error'
if check[1] in diceTypes:
return check
else:
return 'error' |
def sum_series(n, n0=0, n1=1):
"""
compute the nth value of a summation series.
:param n0=0: value of zeroth element in the series
:param n1=1: value of first element in the series
This function should generalize the fibonacci() and the lucas(),
so that this function works for any first two numbers for a sum series.
Once generalized that way, sum_series(n, 0, 1) should be equivalent to fibonacci(n).
And sum_series(n, 2, 1) should be equivalent to lucas(n).
sum_series(n, 3, 2) should generate antoehr series with no specific name
The defaults are set to 0, 1, so if you don't pass in any values, you'll
get the fibonacci sercies
"""
a, b = n0, n1 # notice that all I had to change from fib were these values?
if n == 0:
return a
for _ in range(n - 1):
a, b = b, a + b
return b |
def validate_repository(repo):
"""
Sanity check to validate the repositoy string.
Args:
repo (string): Repository path
Returns:
isvalid (bool): Indicates if a repository is valid or not.
"""
if (repo is None or repo.strip()=="" or not repo.strip().startswith("/")):
return False
else:
return True |
def StripJSONComments(stream):
"""Strips //-style comments from a stream of JSON. Allows un-escaped //
inside string values.
"""
# Previously we used json_minify to strip comments, but it seems to be pretty
# slow and does more than we need. This implementation does a lot less work -
# it just strips comments from the beginning of the '//' delimiter until end
# of line, but only if we're not inside a string. For example:
#
# {"url": "http://www.example.com"}
#
# will work properly, as will:
#
# {
# "url": "http://www.example.com" // some comment
# }
result = ""
last_char = None
inside_string = False
inside_comment = False
buf = ""
for char in stream:
if inside_comment:
if char == '\n':
inside_comment = False
else:
continue
else:
if char == '/' and not inside_string:
if last_char == '/':
inside_comment = True
last_char = char
continue
else:
if last_char == '/' and not inside_string:
result += '/'
if char == '"':
inside_string = not inside_string
last_char = char
result += char
return result |
def get_fno(obj):
"""
Try to get the best fileno of a obj:
* If the obj is a integer, it return that integer.
* If the obj has a fileno method, it return that function call.
"""
if obj is None:
return None
elif isinstance(obj, int):
return obj
elif hasattr(obj, "fileno") and callable(getattr(obj, "fileno")):
return obj.fileno()
raise TypeError("Expected None, int or fileobject with fileno method") |
def get(obj, field):
""" Get the field from the given dict using dot notation """
parts = field.split('.')
for part in parts:
if obj and part in obj:
obj = obj[part]
else:
return None
return obj |
def q(s):
""" Quote the given string """
return "'" + str(s) + "'" |
def new_struct(line):
"""
Determines if a new structure begins on line in question.
Currently only works for multiple structure files containing these key
words in their header.
Convention of Connect format (ct format) is to include 'ENERGY = value'
(value left blank if not applicable)
Support for additional formats will be added as needed
"""
answer=False
if 'Structure' in line or 'dG' in line or 'ENERGY' in line:
answer = True
return answer |
def fip(data, constant):
"""Fielding Independent Pitching (FIP)
:param
:returns:
"""
return (
(13 * data["hr"] + 3 * (data["bb"] + data["hbp"]) - 2 * data["so"]) / data["ip"]
) + constant |
def _check_extension(file_path, file_type: str) -> bool:
""" Check the correct file type has been selected.
Args:
file_path (file): The path to the file containing two columns of data, 1 period and 1 data-point for 1 sku.
file_type (str): specifying 'csv' or 'text'
Returns:
bool:
"""
if file_path.endswith(".txt") and file_type.lower() == "text":
flag = True
elif file_path.endswith(".csv") and file_type.lower() == "csv":
flag = True
else:
flag = False
return flag
# rewrite all of the to deal with database tables and rows instead of csv files. |
def divide_list_into_equal_chunks(alist, chunks):
"""
Divide a list into equal chunks
:param alist: list
:param chunks: int
:return: list
"""
return [alist[i:i + chunks] for i in range(0, len(alist), chunks)] |
def quotestrip(word):
"""Remove quotes and/or double quotes around identifiers."""
if not word:
return None
while (word.startswith("'") and word.endswith("'")) or (word.startswith('"') and word.endswith('"')):
word = word[1:-1]
return word |
def pv(rate, nper, pmt, fv):
"""Calculate the present value of an asset.
Parameters
----------
rate : float
Interest rate per period.
nper : int
Number of payment periods.
pmt : float
Constant payment made each period.
fv : float
Future value, i.e., balance after the last
payment is made.
Returns
-------
float
"""
if rate == 0:
return -(fv + pmt*nper)
else:
tmp = (1 + rate)**nper
return -(fv + pmt*(tmp - 1) / rate) / tmp |
def find_homology(long, short):
"""
:param long: str, the long sequence to be compared
:param short: str, the short sequence to be compared
:return: str, the homology of the long and shor sequence
"""
l1=len(long)
l2=len(short)
subsequence1=long[0:len(short)]
score1=0
# score1=the matching result of the comparison of the first subsequence of long
for i in range(len(short)):
base1=subsequence1[i]
base2=short[i]
if base1==base2:
score1+=1
maximum=score1
match = subsequence1
for i in range(1,l1-l2+1):
# l1-l2+1 = Total number of subsequence sets for comparison
subsequence=long[0+i:l2+i]
score = 0
# The compared subsequence of long sequence
for j in range(len(short)):
# len(short) = Number of characters compared in each subsequence set
base1=subsequence[j]
base2=short[j]
if base1==base2:
score+=1
if score>maximum:
maximum=score
match=subsequence
return match |
def varintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f:
return 1
if value <= 0x3fff:
return 2
if value <= 0x1fffff:
return 3
if value <= 0xfffffff:
return 4
if value <= 0x7ffffffff:
return 5
if value <= 0x3ffffffffff:
return 6
if value <= 0x1ffffffffffff:
return 7
if value <= 0xffffffffffffff:
return 8
if value <= 0x7fffffffffffffff:
return 9
return 10 |
def generate_primes(n: int) -> list:
"""Generates list of primes up to and including n using a sieve of erasthosthenes"""
candidates = range(2,n+1)
primes = []
i = 0
while len(candidates) > 0:
p = candidates[0]
primes.append(p)
candidates = list(filter(lambda x: x%p != 0, candidates))
i += 1
return primes
# now, starting at 2, we eliminate all the multiples of the numbers |
def mask_percentage_text(mask_percentage):
"""
Generate a formatted string representing the percentage that an image is masked.
Args:
mask_percentage: The mask percentage.
Returns:
The mask percentage formatted as a string.
"""
return "%3.2f%%" % mask_percentage |
def camel_case(text: str, delimiter: str = ' ') -> str:
"""
Makes all words capitalized
:param text: text to convert
:param delimiter: delimiter to split words, defaults to the space character
:return: camel case string
"""
return ''.join([e.capitalize() for e in text.split(delimiter)]) |
def fast_label_binarize(value, labels):
"""Faster version of label binarize
`label_binarize` from scikit-learn is slow when run 1 label at a time.
`label_binarize` also is efficient for large numbers of classes, which is not
common in `megnet`
Args:
value: Value to encode
labels (list): Possible class values
Returns:
([int]): List of integers
"""
if len(labels) == 2:
return [int(value == labels[0])]
else:
output = [0] * len(labels)
if value in labels:
output[labels.index(value)] = 1
return output |
def parse_rank(r):
""" Used to parse the rank of one's standing.
"""
return int(r.get("rangNr", 0)) |
def list_inventory(inventory):
"""
:param inventory: dict - an inventory dictionary.
:return: list of tuples - list of key, value pairs from the inventory dictionary.
"""
result = []
for key in inventory.keys():
value = inventory[key]
if value > 0:
result.append((key, value))
return result |
def option_name_to_variable_name(option: str):
"""
Convert an option name like `--ec2-user` to the Python name it gets mapped to,
like `ec2_user`.
"""
return option.replace('--', '', 1).replace('-', '_') |
def _new_board(num_rows: int, num_cols: int, bombs: list) -> tuple:
"""Create a board with bombs and a mask that is all False."""
board = []
mask = []
for r in range(num_rows):
row_b = []
row_m = []
for c in range(num_cols):
row_m.append(False)
if [r, c] in bombs or (r, c) in bombs:
row_b.append(".")
else:
row_b.append(0)
board.append(row_b)
mask.append(row_m)
return board, mask |
def _epsilon(i, j, k):
"""
Levi-Civita tensor
"""
assert i>=0 and i<3, "Index i goes from 0 to 2 included"
assert j>=0 and j<3, "Index j goes from 0 to 2 included"
assert k>=0 and k<3, "Index k goes from 0 to 2 included"
if (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]:
return +1
if (i, j, k) in [(2, 1, 0), (0, 2, 1), (1, 0, 2)]:
return -1
return 0 |
def ship_shoot(ship, x:float, y:float):
"""Make a ship fire at the designated coordinates
ship: The ship which to move
x: X coordinate
y: Y coordinate
"""
return "{0} shoots at {1},{2}".format(ship, x, y) |
def hass_to_hue_brightness(value):
"""Convert hass brightness 0..255 to hue 1..254 scale."""
return max(1, round((value / 255) * 254)) |
def get_GeoArrayPosition_from_boxImYX(boxImYX):
"""Return row_start,row_end,col_start,col_end and assumes boxImYX as [UL_YX,UR_YX,LR_YX,LL_YX)."""
rS, cS = boxImYX[0] # UL
rE, cE = boxImYX[2] # LR
return rS, rE - 1, cS, cE - 1 |
def is_pass_transistor(pip_json):
""" Returns boolean if pip JSON indicates pip is a pass transistor.
Always returns False if database lacks this information.
"""
if 'is_pass_transistor' in pip_json:
return bool(int(pip_json['is_pass_transistor']))
else:
return False |
def parseFields(fields, output):
""" Take a string of fields encoded as
key1=value1,key2=value2,...
and add the keys and values to the output dict"""
for field in fields.split('|'):
key, value = field.split('=')
try:
value = int(value)
except:
pass
output[key] = value
return output |
def get_coord_value(coord, model_coord_range):
"""
find coord index for a given latitude or longitude array
"""
if coord > 360 or coord < -360:
print("** Satellite coordinate outside range -360 - 360 degrees")
return 0
value = min(range(len(model_coord_range)),
key=lambda i: abs(model_coord_range[i] - coord))
return value |
def max_precision(values):
"""
Given a series of values (such as a :class:`.Column`) returns the most
significant decimal places present in any value.
:param values:
The values to analyze.
"""
max_places = 0
for i, value in enumerate(values):
if value is None:
continue
places = value.normalize().as_tuple().exponent * -1
if places > max_places:
max_places = places
return max_places |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.