content stringlengths 42 6.51k |
|---|
def fix_month_format(element):
"""
This function converts the abbreviation of a Spanish month into its corresponding month number.
Args:
element (:obj:`str`): name of the month in Spanish. Abbreviation of the first 3 letters.
Returns:
:obj:`str`: The function returns the corresponding number as string.
"""
meses = {'ene': 1, 'feb': 2, 'mar': 3, 'abr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'ago': 8, 'sept': 8,
'oct': 8, 'nov': 8, 'dic': 12}
for word, initial in meses.items():
element = element.replace(word, '0' + str(initial))
return element |
def dot(v1, v2):
"""Return the dot product of two vectors.
Args:
v1 (complex): First vector.
v2 (complex): Second vector.
Returns:
double: Dot product.
"""
return (v1 * v2.conjugate()).real |
def enforce_int(addr):
"""
helper function to return an int from hex string or int address
:param addr: (string) if i2c device address in hex,
(int) if already integer
:return: int
"""
if type(addr) == int:
return addr
elif type(addr) == str and '0x' in addr:
return int(addr, 16)
else:
raise ValueError('device address must be int or hex string') |
def load_assemble(payload):
"""Assemble payload from a list of values"""
asm_load = ""
for field in payload:
asm_load += field
return asm_load |
def nest_map_structure(
func,
*structures,
mapping_type=dict,
sequence_type=(tuple, list),
):
"""
Calls func(element) on each element of structure.
See tensorflow.nest.map_structure.
Args:
func:
structure: nested structure
Returns:
>>> structure = {'a': [1, 2, (3, 4)], 'b': [5, (6,)]}
>>> nest_map_structure(lambda e: e + 10, structure)
{'a': [11, 12, (13, 14)], 'b': [15, (16,)]}
>>> nest_map_structure(lambda e: e + 10, {'a': 11, 'b': 12})
{'a': 21, 'b': 22}
>>> nest_map_structure(lambda e: e + 10, {'a': 11, 'b': [13, 14]})
{'a': 21, 'b': [23, 24]}
>>> nest_map_structure(lambda e: e * 2, structure, sequence_type=None)
{'a': [1, 2, (3, 4), 1, 2, (3, 4)], 'b': [5, (6,), 5, (6,)]}
>>> nest_map_structure(lambda a, b: a + b, structure, structure)
{'a': [2, 4, (6, 8)], 'b': [10, (12,)]}
>>> nest_map_structure(lambda a, b: a + b, structure, {'a': 2, 'b': 4})
Traceback (most recent call last):
...
AssertionError: ([<class 'list'>, <class 'int'>], ([1, 2, (3, 4)], 2))
"""
types = {type(s) for s in structures}
if mapping_type and isinstance(structures[0], mapping_type):
assert len(types) == 1, ([type(s) for s in structures], structures)
return structures[0].__class__({
k: nest_map_structure(
func,
*[
s[k]
for s in structures
],
mapping_type=mapping_type,
sequence_type=sequence_type,
)
for k in structures[0].keys()
})
elif sequence_type and isinstance(structures[0], sequence_type):
assert len(types) == 1, ([type(s) for s in structures], structures)
return structures[0].__class__([
nest_map_structure(func, *args, mapping_type=mapping_type, sequence_type=sequence_type)
for args in zip(*structures)
])
else:
return func(*structures) |
def event_values(event, variables):
"""Return a tuple of the values of variables in event.
>>> event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A'])
(8, 10)
>>> event_values ((1, 2), ['C', 'A'])
(1, 2)
"""
if isinstance(event, tuple) and len(event) == len(variables):
return event
else:
return tuple([event[var] for var in variables]) |
def svm_model(r3d_kpc, n0, r_c, beta, r_s=1.0, gamma=3.0, epsilon=0.0, alpha=0.0):
"""
Compute a Simplified Vikhlinin model
Parameters
----------
- r3d_kpc: array of radius in kpc
- r_c : core radius parameter
- n_0 : normalization
- beta : slope of the profile
- r_s : characteristic radius parameter (kpc)
- gamma : slope of the profile
- epsilon : slope of the profile
Outputs
--------
- SVM model profile as a function of the input radius vector
"""
term1 = n0 * (1 + (r3d_kpc / r_c)**2)**(-3.0*beta/2.0)
term2 = (r3d_kpc / r_c)**(-alpha/2.0)
term3 = (1 + (r3d_kpc / r_s)**gamma)**(-epsilon/2.0/gamma)
return term1 * term2 * term3 |
def extract_substring(string, left, right, right_to_left=False):
"""Return a substring from a string.
Parameters
----------
string : |str|
A string to be parsed.
left : |str|
A character representing the left bound of a target substring.
right : |str|
A character representing the right bound of a target substring.
right_to_left : |bool|, optional
Whether the `string` should be searched from right to left.
Returns
-------
|str|
The substring between the specified bounds.
Examples
--------
>>> extract_substring('dup key : { "123" }', ':', '}')
' { "123" '
>>> extract_substring('$_id_1', '$', '_', True)
'_id'
"""
if right_to_left:
l_index = string.rfind(left) + len(left)
r_index = string.rfind(right)
else:
l_index = string.find(left) + len(left)
r_index = string.find(right)
return string[l_index:r_index] |
def fill_answer(recommend_farms, result):
"""
fill the return json with recommended farm ids
"""
for farm in recommend_farms:
result.append({'farm_id': farm, 'description': 'recommended farm'})
return result |
def get_mapping_pfts(mapping):
"""Get all PFT names from the mapping."""
pft_names = set()
for value in mapping.values():
pft_names.update(value["pfts"])
return sorted(pft_names) |
def get_block_id(color, id_tag="00"):
"""Get block identifier based on color
Args:
color (string): string representation of color
id_tag (string, optional): prefix for the object. Defaults to "00".
Returns:
string: block identifier as string
"""
return "%s_block_%s" % (id_tag, color) |
def calculate_compensated_size(frame_type, size, dts=None):
"""
Compensate the wrongly reported sizes from ffmpeg, because they contain AUDs, SPS, and PPS.
frame_type: "I" or anything else ("Non-I", "P", "B", ...)
size: size in bytes from VFI File
dts: DTS from VFI or none, but then it will assume that it's a regular frame within the file
From internal tests, we know that the difference between "real" payload length and reported size is
the following, where the reported size is always higher than the real payload:
type_bs first_frame_pvs Min. 1st Qu. Median Mean 3rd Qu. Max.
1 Non-I FALSE 11 11.0 11 11.00 11 11
2 I FALSE 50 50.0 51 51.69 52 55
3 I TRUE 786 787.2 788 788.40 789 791
Returns the new size of the frame in KB.
"""
# first frame in an entire PVS has SPS/PPS mixed into it
if dts is not None and int(dts) == 0:
size_compensated = int(size) - 800
else:
if frame_type == "I":
size_compensated = int(size) - 55
else:
size_compensated = int(size) - 11
return max(size_compensated, 0) |
def fix_vidshape(res1,res2):
"""Compares two resolutions and get missing x and y coords"""
xmin,ymin = 0,0
xmult = (res2[0]/res1[0])
ymult = (res2[1]/res1[1])
if xmult > ymult:
xmin = int((res2[0]-(res1[0]*ymult))/2)
if ymult > xmult:
ymin = int((res2[0]-(res1[0]*xmult))/2)
return xmin, ymin |
def _inverse_move(move):
"""Invert a move"""
if '\'' in move:
return move.strip('\'')
return move+'\'' |
def extract_cookies_from_headers(headers):
"""Extract all cookies from the response headers.
Args:
headers (Dict[str, Dict[str, str]]): The request/response headers in
dictionary format.
"""
if "cookie" not in headers:
return {}
cookies = {}
for header in headers["cookie"]:
split = header.get("value", "").split(";")
for part in split:
seq = part.split("=")
key, value = seq[0], seq[1:]
cookies[key.strip()] = "=".join(value).strip()
return cookies |
def create_files_dict(file_name, metadata_content, bitstream_content):
"""Create dict of files to upload to S3."""
package_files = [
{
"file_name": f"{file_name}.json",
"file_content": metadata_content,
},
{
"file_name": f"{file_name}.pdf",
"file_content": bitstream_content,
},
]
return package_files |
def possible_sums(interval):
"""All sums of two numbers in list where numbers are not equal."""
return {i+j for i in interval for j in interval if i!=j} |
def find_my_y(your_x, data_x, data_y, logged_data=False):
"""Takes an input x, linear interpolates the data and produces a corresponding y(s).
Parameters
----------
your_x : float
A single number, of which you want the corresponding y value(s) through linear interpolation of the data
given (data_x, data_y).
data_x : 1-d list/array
This is the original set of x values.
data_y : 1-d list/array
This is the original set of y values.
logged_data : Bool
If the data is logged (base 10) coming in and you want linear values back out set this to True.
Default: False
Returns
-------
A list of corresponding y(s) to the input your_x.
"""
your_x_between = []
#search for y range which has your point in it
for dx in range(len(data_x)-1):
if dx == 0: #so the first one isnt completely discounted
if data_x[dx] <= your_x <= data_x[dx+1]:
#append the coordinates of the range
your_x_between.append([[data_x[dx], data_y[dx]], [data_x[dx+1], data_y[dx+1]]])
else:
if (data_x[dx] < your_x <= data_x[dx+1]) or (data_x[dx] > your_x >= data_x[dx+1]):
your_x_between.append([[data_x[dx], data_y[dx]], [data_x[dx+1], data_y[dx+1]]])
#no extrapolation, if your_x is not within the set of x's given (data_x) then this won't work
if your_x_between == []:
print('Your x is out of range of this data_x.')
return
#make a straight line betwen the points and plug your x value in
found_y = []
for coords in your_x_between:
coord1 = coords[0]
coord2 = coords[1]
grad = (coord1[1] - coord2[1]) / (coord1[0] - coord2[0])
_found_y = grad * (your_x - coord1[0]) + coord1[1]
found_y.append(_found_y)
#return all the y's found, no guarentee the there is a one-to-one mapping
if logged_data == True:
return [10**y for y in found_y]
else:
return found_y |
def _append_dicts(x, y):
"""python2 compatible way to append 2 dictionaries
"""
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z |
def _format_exception(e):
"""Returns a human readable form of an exception.
Adds the maximum number of interesting information in the safest way."""
try:
out = repr(e)
except Exception:
out = ''
try:
out = str(e)
except Exception:
pass
return out |
def build_lfn_unique_entry_name_order(entities: list, lfn_entry_name: str) -> int:
"""
The short entry contains only the first 6 characters of the file name,
and we have to distinguish it from other names within the directory starting with the same 6 characters.
To make it unique, we add its order in relation to other names such that lfn_entry_name[:6] == other[:6].
The order is specified by the character, starting with '1'.
E.g. the file in directory 'thisisverylongfilenama.txt' will be named 'THISIS~1TXT' in its short entry.
If we add another file 'thisisverylongfilenamax.txt' its name in the short entry will be 'THISIS~2TXT'.
"""
preceding_entries: int = 0
for entity in entities:
if entity.name[:6] == lfn_entry_name[:6]:
preceding_entries += 1
return preceding_entries + ord('1') |
def parse_bigg_response(res, ms, bi, log):
"""
Parse the BiGG API response text. Text is all plain text in JSON format.
The fields of interest are the KEGG Reaction ID or the EC number.
:param res: API JSON response
:type res: dict
:param ms: ModelSEED reaction ID
:type ms: str
:param bi: BiGG reaction ID
:type bi: str
:param log: Log output file handle
:type log: File
:return: KO IDs and EC numbers
:rtype: dict
"""
data = {'KO': set(), 'EC': set()} # Data to return
# Check if any database info exists
db_info = res['database_links']
if len(db_info) == 0:
log.write('No database info for BiGG ' + bi + '\n')
# Check for KEGG
elif 'KEGG Reaction' in db_info:
# May have multiple KO identfiers
for item in db_info['KEGG Reaction']:
if 'id' not in item:
log.write('KEGG reaction found but no ID for BiGG '
+ bi + ' and ModelSEED ' + ms + ' \n')
data['KO'].add(item['id'])
# Check for EC number of KEGG does not exist
elif 'EC Number' in db_info:
# May have multiple EC numbers
for item in db_info['EC Number']:
if 'id' not in item:
log.write('EC number found but no ID for BiGG '
+ bi + ' and ModelSEED ' + ms + ' \n')
data['EC'].add(item['id'])
# No KEGG or EC
else:
log.write('No KEGG Reaction or EC Number for BiGG '
+ bi + ' and ModelSEED ' + ms + ' \n')
return data |
def _get_param_in_exp(param, bounds):
"""
Convert a log scaled parameter back to linear scale
:param param: The log scaled parameter
:param bounds: The parameter bounds in linear scale
:return: The log scaled parameter scaled back
"""
# assert len(bounds) == 2
# assert bounds[0] >= 0.0
# assert bounds[1] >= 0.0
# log_bounds = [math.log(bounds[0]), math.log(bounds[1])]
# param_in_log = _lerp(param, log_bounds)
# return math.exp(param_in_log)
return param |
def split_float(x):
"""
If x is a float in e notation, return the provided significand and exponent.
If not, or if x is an integer, return the original number with an exponent of 0.
"""
x_str = str(x)
if x_str.count("e"):
x_s, x_p = x_str.split("e")
return float(x_s), int(x_p)
else:
return x, 0 |
def parse_gcov_line(l: str) -> tuple:
"""Parses each line in gcov file
Parameter
----------
l : str
Returns
-------
tuple (str, int, str)
"""
l_split = l.split(':')
# parse the line
hits = l_split[0].strip()
lineno = int(l_split[1].strip())
content = ':'.join(l_split[2:]).rstrip()
return hits, lineno, content |
def multisplit(s, seps):
"""Split a the string s using multiple separators"""
res = [s]
for sep in seps:
s, res = res, []
for seq in s:
res += seq.split(sep)
return res |
def check_yr(yr, low, high):
"""Verify yr is 4 digits and inside acceptable range.
Decimal years (ie. 1982.5) are not allowed and will return False
"""
yr = float(yr)
return (yr == int(yr)) & (low <= yr <= high) |
def get_m2m(obj, model_field_name):
"""
Gets list of IDs for objects named `model_field_name`
holding a foreign key to `obj`
Returns None if no relation exists.
"""
related_set = getattr(obj, model_field_name, None)
if related_set and hasattr(related_set, "get_query_set"):
return map(int, related_set.values_list("pk", flat=True)) |
def rastrigin_bounds(d):
"""
Parameters
----------
d : int
dimension
Returns
-------
list of 2-tuples
"""
return [(-5.12, 5.12) for _ in range(d)] |
def _format_cmd_shorty(cmd):
"""Get short string representation from a cmd argument list"""
cmd_shorty = (' '.join(cmd) if isinstance(cmd, list) else cmd)
cmd_shorty = '{}{}'.format(
cmd_shorty[:40],
'...' if len(cmd_shorty) > 40 else '')
return cmd_shorty |
def lists_view(lists):
"""
View for lists
:param lists:
:return:
"""
if lists:
view = "*Your lists:*\n\n{}"
modified_lists = ["*{}.* {}".format(index + 1, value) for index, value in enumerate(lists)]
return view.format("\n\n".join(modified_lists), )
else:
return "No lists yet :(" |
def is_numeric(val):
"""Check if string is numeric.
Arguments:
val {str} -- potentially stringified number
Returns:
bool -- if the passed value is numeric
"""
try:
float(val)
return True
except ValueError:
return False |
def kf_derivative_wrt_density(kf, n):
"""Computes the derivative of kf with respect to density
It is given by `kf / (3 * n)`
Parameters
----------
kf : array-like
The fermi momentum in fm^-1
n : array-like
The density in fm^-3
Returns
-------
d(kf)/d(n) : array-like
In units of fm^2
"""
return kf / (3 * n) |
def link(content, target):
"""Corresponds to ``[content](target)`` in the markup.
:param content: HTML that will go inside the tags.
:param target: a full URL, or a local ``filename.html#subtitle`` URL
"""
return '<a href="%s">%s</a>' % (target, content) |
def getRankedDict(dict):
"""helper function"""
rank, count, previous, result = 0, 0, None, {}
for key, num in dict:
count += 1
if num != previous:
rank += count
previous = num
count = 0
result[key] = rank
return result |
def find_closure_group(input_string, start, group):
"""Generalization for finding a balancing closure group
if group = ["(", ")"], then finds the first balanced parentheses.
if group = ["{", "}"], then finds the first balanced bracket.
Given an input string, a starting position in the input string, and the group type,
find_closure_group returns the positions of group[0] and group[1] as a tuple.
Example:
find_closure_group("(hi)", 0, ["(", ")"])
Returns:
0, 3
"""
inside_parenthesis = False
parens = 0
pos = start
p_start, p_end = -1, -1
while pos < len(input_string):
if input_string[pos] == group[0]:
if inside_parenthesis is False:
inside_parenthesis = True
parens = 1
p_start = pos
else:
parens += 1
elif input_string[pos] == group[1] and inside_parenthesis:
parens -= 1
if parens == 0:
p_end = pos
return p_start, p_end
pos += 1
return None, None |
def rising_factorial(x: int, n: int) -> int:
"""
Returns the rising factorial of the given number to the given height.
x(x+1)...(x+(n-1))
:param x: The number to take the rising factorial of
:param n: The height to which to take the rising factorial
:return: The rising factorial of the given number to the given height
"""
result = 1
for i in range(n):
result *= x + i
return result |
def _is_unicode_u_value(name):
"""Return whether we are looking at a uXXXX value."""
return name.startswith("u") and all(
part_char in "0123456789ABCDEF" for part_char in name[1:]
) |
def _get_total_dataset_task(name_task):
"""
:param name_task: name task, this can be : dialect, ro, md
:return: total size of dataset fo train, dev, train
"""
if name_task == 'dialect':
return 21719, 5921, 5924
if name_task == 'ro':
return 11751, 3205, 3205
if name_task == 'md':
return 9968, 2716, 2719
return None, None, None |
def save_file(mode):
"""Decide whether the file should be saved."""
if mode == "E":
while True:
s_file = input("Enter S to save encrypted text to file or N not to save:")
if s_file == "S":
return "S"
elif s_file == "N":
break
else:
print("Wrong input! Please try again.\n")
elif mode == "D":
while True:
s_file = input("Enter S to save decrypted text to file or N not to save:")
if s_file == "S":
return "S"
elif s_file == "N":
break
else:
print("Wrong input! Please try again.\n")
else:
print("Wrong input! Please try again.\n") |
def single(a, fn):
"""
Returns a single item inside an iterable, respecting a given condition.
Raises exception if more than one item, or no items respect the condition.
Example: single([3,4,5,6], lambda x: x > 4)
:param a: array
:param fn: function to evaluate items
:return: None or first item matching result
"""
result = [x for x in a if fn(x)]
if result is None or len(result) == 0:
raise ValueError("sequence contains no element")
if len(result) > 1:
raise ValueError("sequence contains more than one element")
return result[0] |
def convert_to_index_name(s):
"""Converts a string to an Elasticsearch index name."""
# For Elasticsearch index name restrictions, see
# https://github.com/DataBiosphere/data-explorer-indexers/issues/5#issue-308168951
# Elasticsearch allows single quote in index names. However, they cause other
# problems. For example,
# "curl -XDELETE http://localhost:9200/nurse's_health_study" doesn't work.
# So also remove single quotes.
prohibited_chars = [
' ', '"', '*', '\\', '<', '|', ',', '>', '/', '?', '\''
]
for char in prohibited_chars:
s = s.replace(char, '_')
s = s.lower()
# Remove leading underscore.
if s.find('_', 0, 1) == 0:
s = s.lstrip('_')
print('Index name: %s' % s)
return s |
def _append(so_far, item):
""" Appends an item to all items in a list of lists. """
for sub_list in so_far:
sub_list.append(item)
return so_far |
def create_graph(edge_num: int, edge_list: list) -> dict:
"""
Create a graph expressed with adjacency list
:dict_key : int (a vertex)
:dict_value : set (consisted of vertices adjacent to key vertex)
"""
a_graph = {i: set() for i in range(edge_num)}
for a, b in edge_list:
a_graph[a - 1].add(b - 1) # All graphs always need this line
a_graph[b - 1].add(a - 1) # Only undirected graph needs this line
return a_graph |
def json_parsing(obj, key, index=0):
"""Recursively pull values of specified key from nested JSON."""
arr = []
def extract(obj, arr, key):
"""Return all matching values in an object."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict)) or (isinstance(v, (list)) and v and isinstance(v[0], (list, dict))):
extract(v, arr, key)
elif k == key:
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
results = extract(obj, arr, key)
return results[index] if results else results |
def join(li):
""":returns: list of objs joined from list of iterable with objs.
>>> join([[1,2], [3], [4,5]])
[1, 2, 3, 4, 5]
"""
result = []
for iterable in li:
for obj in iterable:
result.append(obj)
return result |
def get_ls(num):
""" Get a list of available line styles """
ls = [':', '--', '-.', '-', ':', '--', '-.', '-', ':', ':', '--', '-.', '-', ':', '--', '-.', '-', ':'
]
return ls[:num] |
def read_nrevbytes(buffer: bytearray, length: int) -> bytes:
""" Read and reverse the given number of bytes, read bytes are consumed.
"""
array = bytes(reversed(buffer[:length]))
del buffer[:length]
return array |
def as_name(upi, taxid):
"""
Create the name of the RNA sequence using the UPI and taxid.
"""
return "Unique RNA Sequence {upi}_{taxid}".format(
upi=upi,
taxid=taxid,
) |
def array(num_elements, element_func, *element_func_args):
"""
Returns array of elements with a length of num_elements.
Every element is generated by a call to element_func(*element_func_args).
"""
return [element_func(*element_func_args) for _ in range(num_elements)] |
def is_valid_para(para_type, type_table):
"""Check if it is a valid parameter type contained in the type table.
"""
# The values of the table contain all known destination types
if para_type in type_table.values():
return True
return True |
def sol(arr, n):
"""
Sort the array and check for continuity
"""
arr = sorted(arr)
mc = 1
i = 0
c = 1
while i <= n-2:
if arr[i+1] == arr[i]+1:
c+=1
else:
c=1
mc = max(mc, c)
i+=1
return mc |
def word_search(document, keyword):
"""
Takes a list of documents (each document is a string) and a keyword.
Returns list of the index values into the original list for all documents
containing the keyword.
Example:
doc_list = ["The Learn Python Challenge Casino.", "They bought a car", "Casinoville"]
>>> word_search(document, 'casino')
>>> [0]
"""
indices = []
for i, doc in enumerate(document):
tokens = doc.split()
print('Tokens: ', tokens)
normalized = [token.rstrip('.,').lower() for token in tokens]
print('Normalized Words: ', normalized)
if keyword.lower() in normalized:
indices.append(i)
return indices |
def gradX(x, y):
"""
Evaluates X-gradient of Beale at x, y
@ In, x, float, value
@ In, y, float, value
@ Out, gradX, float, X-gradient of Beale
"""
tot = 0
consts = (1.5, 2.25, 2.625)
for i in range(1, 4):
tot += 2 * (y**i - 1) * (x * (y**i - 1) + consts[i-1])
return tot |
def py1(k, kr, rho, cp, r):
"""
Calculate the pyrolysis number.
Parameters
----------
k = thermal conductivity, W/mK
kr = rate constant, 1/s
rho = density, kg/m^3
cp = heat capacity, J/kgK
r = radius, m
Returns
-------
py = pyrolysis number, -
"""
py = k / (kr * rho * cp * (r**2))
return py |
def fmla_for_filt(filt):
"""
transform a set of column filters
from a dictionary like
{ 'varX':['lv11','lvl2'],...}
into an R selector expression like
'varX %in% c("lvl1","lvl2")' & ...
"""
return ' & '.join([
'{var} %in% c({lvls})'.format(
var=k,
lvls=','.join(map(lambda x:'"%s"' % x, v)) if
type(v) == list else '"%s"' % v
) for k, v in filt.items()
]) |
def calculate_evaluation_metrics(confusion_matrix):
"""
Calculates the evaluation metrics of the model.
:param confusion_matrix: The confusion matrix of the model.
:return: dictionary, with the metrics of the model.
"""
metrics = dict()
metrics['precision'] = confusion_matrix.get('TP', 1) / (
confusion_matrix.get('TP', 1) + confusion_matrix.get('FP', 1))
metrics['recall'] = confusion_matrix.get('TP', 1) / (
confusion_matrix.get('TP', 1) + confusion_matrix.get('FN', 1))
metrics['f1_score'] = 2 * metrics['precision'] * metrics['recall'] / (metrics['precision'] + metrics['recall'])
return metrics |
def get_lats_longs(road_map):
"""
Returns a list of tuples containing (latitude, longitudes, lattiudes2, longitudes2)
for the city and the next city in the route
"""
lattiudes = [city[2] for city in road_map]
longitudes = [city[3] for city in road_map]
lattiudes2 = [lattiudes[(i + 1) % len(lattiudes)] for i in range(len(lattiudes))]
longitudes2 = [longitudes[(i + 1) % len(longitudes)] for i in range(len(longitudes))]
return list(zip(lattiudes, longitudes, lattiudes2, longitudes2)) |
def updatelibindex(library):
"""
Update the index of the sequence in the library to be continuous
After narrowing down the dictionary through the various steps of the algorithm, certain terms are deleted. Thus, the
dictionary is no longer continuous, and could be ordered "ASD1" and the next term "ASD9". This function changes the
"ASD9" into an "ASD2"
:param library: The dictionary whose keys need to be updated.
:return: The dictionary with updated keys.
"""
liblen = len(library) / 2
n = 1
while (n < liblen + 1):
testidx = n
while 'ASD' + str(testidx) not in library:
testidx = testidx + 1
library['ASD{0}'.format(n)] = library.pop('ASD{0}'.format(testidx))
library['SD{0}'.format(n)] = library.pop('SD{0}'.format(testidx))
n = n + 1
return library |
def make_normal_action(atype, label, i18n_labels=None):
"""
Create camera, camera roll, location action.
reference
- `Common Message Property <https://developers.worksmobile.com/jp/document/1005050?lang=en>`_
:param atype: action's type
:return: None
"""
if i18n_labels is not None:
return {"type": atype, "label": label, "i18nLabels": i18n_labels}
return {"type": atype, "label": label} |
def v8_tail(iterable, n):
"""Return the last n items of given iterable.
We can actually move that if-else outside of our for loop without repeating
our loop if we pass in slice objects in our for loop...
You don't see slice objects used much directly in Python.
A slice object is what Python creates when you use the slicing notation.
When you say sequence[-n:], Python essentially converts that to
sequence[slice(-n, None)].
"""
items = []
if n <= 0:
return []
elif n == 1:
index = slice(0, 0)
else:
index = slice(-(n-1), None)
for item in iterable:
items = [*items[index], item]
return items |
def parameter_id_to_index(parameter_id):
"""Converts a SMIRNOFF parameter id string (like 'n3' or 'n15') to an 0-start integer index"""
assert(parameter_id[0] == 'n') # make sure this is a nonbonded parameter...
return int(parameter_id[1:]) - 1 |
def repr_fcall(fname, args, kwargs):
"""Nice string representation for function call"""
data = ', '.join(map(repr, args))
data += ', '.join('%s=%r' % item for item in kwargs.items())
return '%s(%s)' % (fname, data) |
def get_start_of_field_of_study(field_of_study):
"""
Returns start year of a field of study
"""
if field_of_study == 0 or field_of_study == 100: # others
return 0
elif field_of_study == 1: # bachelor
return 0
elif 10 <= field_of_study <= 30: # 10-30 is considered master
return 3
elif field_of_study == 40: # social
return 0
elif field_of_study == 80: # phd
return 5
elif field_of_study == 90: # international
return 0
# If user's field of study is not matched by any of these tests, return -1
else:
return -1 |
def _get_hou_version_filter(latest):
"""
Get the right peace of sql query to filter by houdini versions, latest and
previous
"""
if latest:
return """hmc.houdini_major_version = '{{ latest_hou }}' """
else:
return """hmc.houdini_major_version <= '{{ previous_hou }}' """ |
def cdf(r, N0=1.0):
"""
Cumulative distribution function of the number of point sources.
r^{1/4} distribution law: de Vaucouleurs 1948
"""
return N0 * r**(1.0/4.0) |
def years_descriptors(actual_year, open_year, close_year):
"""Compute the two important date variables in order to assign a value of
quality.
"""
years_open = actual_year - open_year
years2close = close_year - actual_year
return years_open, years2close |
def check_more(response):
"""Reads the next page field to check if there are more results."""
return response.get("next", None) is not None |
def add_malicious_key(entity, verdict):
"""Return the entity with the additional 'Malicious' key if determined as such by ANYRUN
Parameters
----------
entity : dict
File or URL object.
verdict : dict
Task analysis verdict for a detonated file or url.
Returns
-------
dict
The modified entity if it was malicious, otherwise the original entity.
"""
threat_level_text = verdict.get('threatLevelText', '')
if threat_level_text.casefold() == 'malicious activity':
entity['Malicious'] = {
'Vendor': 'ANYRUN',
'Description': threat_level_text
}
return entity |
def schema_input_type(schema):
"""Input type from schema
:param schema:
:return: simple/list
"""
if isinstance(schema, list):
return 'list'
return 'simple' |
def calcBounds(array):
"""Calculate the bounding rectangle of a 2D points array.
Args:
array: A sequence of 2D tuples.
Returns:
A four-item tuple representing the bounding rectangle ``(xMin, yMin, xMax, yMax)``.
"""
if len(array) == 0:
return 0, 0, 0, 0
xs = [x for x, y in array]
ys = [y for x, y in array]
return min(xs), min(ys), max(xs), max(ys) |
def _sorteditems(d, orderby):
""" return items from a dict of dict, sorted by the orderby item of the dict """
s = sorted([(i[orderby], k) for k, i in d.items()])
return [(k, d[k]) for i, k in s] |
def base10_to_base16_alph_num(base10_no):
"""Convert base-10 integer to base-16 hexadecimal system.
This function provides a utility to write pdb/psf files such that it can
add many more than 9999 atoms and 999 residues.
Parameters
----------
base10_no: int
The integer to convert to base-16 hexadecimal system
Returns
-------
str
The converted base-16 system string
See Also
--------
mbuild.conversion._to_base: Helper function to perform a base-n conversion
"""
return hex(int(base10_no))[2:] |
def ignore_answer(answer):
"""
Should this answer be disregarded?
"""
return (answer == "<Invalid>") or \
answer.startswith("<Redundant with") |
def show_graphs(file_name):
""" Hide the plots when no file is selected """
return file_name is None or file_name == '' |
def asCurrency(amount, decimals: bool = True) -> str:
"""Ref: https://stackoverflow.com/questions/21208376/converting-float-to-dollars-and-cents"""
if decimals:
if amount >= 0:
return f"${amount:,.2f}"
return f"-${-amount:,.2f}".format(-amount)
if amount >= 0:
return f"${amount:,.0f}"
return f"-${-amount:,.0f}" |
def gripper_joint2gripper_l_finger_joint_values(gripper_joint_vals):
"""
Only the %s_gripper_l_finger_joint%lr can be controlled (this is the joint returned by robot.GetManipulator({"l":"leftarm", "r":"rightarm"}[lr]).GetGripperIndices())
The rest of the gripper joints (like %s_gripper_joint%lr) are mimiced and cannot be controlled directly
"""
mult = 5.0
gripper_l_finger_joint_vals = mult * gripper_joint_vals
return gripper_l_finger_joint_vals |
def make_label(label_text):
"""
returns a label object
conforming to api specs
given a name
"""
return {
'messageListVisibility': 'show',
'name': label_text,
'labelListVisibility': 'labelShow'
} |
def sum_of_digits(number):
"""
What comes in: An integer.
What goes out: The sum of the digits in the given integer.
Side effects: None.
Example:
If the integer is 83135,
this function returns (8 + 3 + 1 + 3 + 5), which is 20.
"""
# ------------------------------------------------------------------
# Students:
# Do NOT touch this function - it has no TO DO in it.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the other problems.
#
# Ask for help if you are unsure what it means to CALL a function.
# The ONLY part of this function that you need to understand is
# the doc-string above. Treat this function as a black box.
# ------------------------------------------------------------------
if number < 0:
number = -number
digit_sum = 0
while True:
if number == 0:
break
digit_sum = digit_sum + (number % 10)
number = number // 10
return digit_sum |
def which(program):
"""Determines where if an executable exists on the users path.
This code was contributed by Jay at http://stackoverflow.com/a/377028
:args program: The name, or path for the program
"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None |
def constrain(value, minv, maxv):
"""Returns the constrained value so that it falls between minv and maxv."""
return min(max(value, minv), maxv) |
def generateFWGList (minI = 7, maxI = 13, minJ = 3, maxJ = 23):
""" Generate a list of fwgrid IPs that can be used """
return ["fwg-c%s-%s" % (i, j)
for i in range(minI, maxI)
for j in range(minJ, maxJ)] |
def convert_where_clause(clause: dict) -> str:
"""
Convert a dictionary of clauses to a string for use in a query
Parameters
----------
clause : dict
Dictionary of clauses
Returns
-------
str
A string representation of the clauses
"""
out = "{"
for key in clause.keys():
out += "{}: ".format(key)
out += '"{}"'.format(clause[key])
out += ","
out += "}"
return out |
def wears_jacket_with_if(temp, raining):
"""
>>> wears_jacket_with_if(90, False)
False
>>> wears_jacket_with_if(40, False)
True
>>> wears_jacket_with_if(100, True)
True
"""
if temp < 60 or raining == True:
return True
else:
return False |
def compute_counts(corpus):
"""Compute the word counts and probs for a given corpus
corpus: list of sentences
returns: dict of words, containing counts & probs
"""
words = {}
size = 0
# Let's count words first
for line in corpus:
for token in line.split():
if token in words:
words[token]['count'] += 1
else:
words[token] = {}
words[token]['count'] = 1
size += 1
# Then we compute all the probs once we know the final size
for k in words.keys():
words[k]['prob'] = words[k]['count'] / size
return words |
def _unprime_any_primed(model):
"""Trim any primed variables."""
d = dict(model)
suffix = "'"
for k in list(d.keys()):
if k.endswith(suffix):
s = k[:-1]
d[s] = d.pop(k)
return d |
def is_fully_meth(methfreq, eps=1e-5, cutoff_fully_meth=1.0):
"""
Check if the freq is fully-methylated, can be 1.0, or 0.9
:param methfreq:
:param eps:
:param cutoff_fully_meth:
:return:
"""
if methfreq > 1.0 + eps or methfreq < 0:
raise Exception(f'detect error value for freq={methfreq}')
if methfreq > cutoff_fully_meth - eps: # near 1
return True
return False |
def parse_input(input_data, include_myself):
"""
Returns an array of unique people and a dictionary of the happiness between them
Each line of the input data must be in the format:
Alice would gain 54 happiness units by sitting next to Bob.
"""
lines = input_data.splitlines()
people_arr = []
people_dict = {}
happiness_dict = {}
for line in lines:
data = line.split()
person1, person2, happiness = data[0], data[10][:-1], int(data[3])
if data[2] == "gain":
happiness_dict[person1 + person2] = happiness
else:
happiness_dict[person1 + person2] = happiness * -1
if not person1 in people_dict:
people_dict[person1] = True
people_arr.append(person1)
if include_myself is True:
for person in people_arr:
happiness_dict['me' + person] = 0
happiness_dict[person + 'me'] = 0
people_arr.append("me")
return people_arr, happiness_dict |
def _should_unpack_args(args):
"""Returns `True` if `args` should be `*args` when passed to a callable."""
return type(args) is tuple # pylint: disable=unidiomatic-typecheck |
def find_min(l):
"""
generic function that takes a list of numbers and returns smallest number in that list its index.
return optimal value and the index of the optimal value as a tuple.
:param l: list
:return: tuple
"""
l_min = min(l);
min_i = l.index(l_min);
min_t = (l_min,min_i);
return min_t ;
pass |
def first(iterable, default=None):
"""
Returns the first item in the iterable that is truthy.
If none, then return 'default'.
"""
for item in iterable:
if item:
return item
return default |
def seq_get(seq, index, default=None):
"""!
@brief Get element from sequence by index.
(Fallback to default if index is out of range)
@param seq Sequence.
@param index Index of the element.
@param default Fallback default value.
@return Element if index is valid, otherwise default value.
"""
# Valid index
if isinstance(index, int) and index>=0 and index<len(seq):
return seq[index]
# Fallback to default value
else:
return default |
def should_sync_locations(last_sync, location_db):
"""
Determine if any locations (already filtered to be relevant
to this user) require syncing.
"""
if not last_sync or not last_sync.date:
return True
for location in location_db.by_id.values():
if not location.last_modified or location.last_modified >= last_sync.date:
return True
return False |
def reverse(str):
"""reverse string"""
return str[::-1] |
def count(a,val,c):
"""
count(a,val,c)
c is the number of occurrences of val in array a.
"""
return [c == sum([a[i] == val for i in range(len(a))])
] |
def VLBAAIPSName( project, session):
"""
Derive AIPS Name. AIPS file name will be project+session with project
truncated to fit in 12 characters.
* project = project name
* session = session code
"""
################################################################
Aname = Aname=project.strip()[0:12-len(session)]+session
return Aname
# end VLBAAIPSName |
def parse_host_port(h):
"""Parses strings in the form host[:port]"""
host_port = h.split(":", 1)
if len(host_port) == 1:
return (h, 80)
else:
host_port[1] = int(host_port[1])
return host_port |
def capitalize(s):
"""Capitalize the first letter of a string.
Unlike the capitalize string method, this leaves the other
characters untouched.
"""
return s[:1].upper() + s[1:] |
def isPalindrome(s):
"""Assumes s is a str
Returns True if letters in s form a palindrome; False
otherwise. Non-letters and capitalization are ignored."""
def toChars(s):
s = s.lower()
letters = ''
for c in s:
if c in 'abcdefghijklmnopqrstuvwxyz':
letters = letters + c
return letters
def isPal(s):
if len(s) <= 1:
return True
else:
return s[0] == s[-1] and isPal(s[1:-1])
return isPal(toChars(s)) |
def turn_to_list(input_string):
"""
Helper function to question1, which turns a string into a list of
characters.
"""
return list(input_string.lower()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.