content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def lineincustcols (inlist, colsizes):
"""
Returns a string composed of elements in inlist, with each element
right-aligned in a column of width specified by a sequence colsizes. The
length of colsizes must be greater than or equal to the number of columns
in inlist.
Usage: lineincustcols (inlist,colsizes)
Returns: formatted string created from inlist
"""
outstr = ''
for i in range(len(inlist)):
if not isinstance(inlist[i], str):
item = str(inlist[i])
else:
item = inlist[i]
size = len(item)
if size <= colsizes[i]:
for j in range(colsizes[i]-size):
outstr = outstr + ' '
outstr = outstr + item
else:
outstr = outstr + item[0:colsizes[i]+1]
return outstr
|
76ba1196e726f2549bae7dd43e0325efca07257b
| 60,100
|
import logging
def calc_lcoe_capex(capex, fcf, cf=1, basis="perkw"):
"""
Calculate capex component of lcoe
:param capex: Capex in the form of CURR/MW
:param cf: capacity factor, default is 1
:param fcf: Plant life cost inflation corrector
:param basis: Default is per MW, it is there to be open to implement other values
:return: LCOE CAPEX component in CURR per KWh
"""
if basis == "perkw":
capex_lcoe = capex * fcf / (cf * 8760)
else:
logging.warning("Currently there is no other basis implemented")
return None
return capex_lcoe
|
71df16af4ac12982c7b969503e06d197909e4a1d
| 60,102
|
def u_global(name, prefix='u_', suffix=''):
"""Returns the uncertainty corresponding to a column. For example, the
column ('roi_atoms', 'Fz', '', '') has uncertainties in the column
('roi_atoms', 'u_Fz', '', '')
"""
if type(name) == tuple:
i = len(name)-1
while not name[i]:
i -= 1
t = name[:i]
t += (prefix + name[i] + suffix,)
t += name[i+1:]
return t
elif type(name) == str:
return prefix + name + suffix
else:
return None
|
047ef38d2031b98c5f759481c006e92ab47d4523
| 60,103
|
import inspect
def get_from_classes(clazz, class_var):
"""
Get a list of class variables with the given name in clazz and all its superclasses. The values are returned
in mro order.
:param clazz: the class which is being queried
:param class_var: class variable name
:return: list of values
"""
ret = []
for clz in reversed(inspect.getmro(clazz)):
if hasattr(clz, class_var):
val = getattr(clz, class_var)
if isinstance(val, list) or isinstance(val, tuple):
ret.extend(val)
else:
ret.append(val)
return ret
|
fffaf2c3fe7e92769666a9e72f6e818e809c43d9
| 60,104
|
def name_is_all(name: str) -> bool:
"""
Return True if deck name is all, else False.
Used to re-prompt user in check_deck_name.
"""
if name == "all":
return True
return False
|
c1874b7bfb51e36b219d787b705357b6d7e744b1
| 60,106
|
def _get_meas_sds(params, info):
"""Create the array of standard deviations of the measurement errors."""
return params[info["meas_sds"]]
|
b70eda2492af41994c9891032a0b47388491ae17
| 60,108
|
from pathlib import Path
from typing import Dict
from typing import Any
import jinja2
def get_final_script(script_path: Path, variables: Dict[str, Any]) -> str:
"""Return the script with replaced placeholders.
Args:
script_path: Path to the script.
variables: Placeholders to be replaced.
Returns:
The final script.
Raises:
TypeError if the script cannot be processed.
"""
try:
template = jinja2.Template(script_path.read_text())
except UnicodeDecodeError:
raise TypeError(f"can't read the script {script_path}") from None
return template.render(**variables)
|
4a7f3b130a5818163fda73a63f4cc20c88c7d0bc
| 60,109
|
def get_concat_input_op_slices(concat_ops, op_reg_manager):
"""Returns OpSlice for concat input ops to concatenate.
For concat, all input OpSlice should be stacked to align with the concat
OpSlice. Also, the last input is the axis which should be omitted.
Args:
concat_ops: List of tf.Operation which provide inputs to the concat op.
op_reg_manager: OpRegularizerManager that tracks the slicing.
Returns:
List of list of OpSlice, where the outer list only has 1 element, and the
inner list is the concatenation of input OpSlice.
"""
concat_input_op_slices = []
for concat_op in concat_ops:
concat_input_op_slices.extend(op_reg_manager.get_op_slices(concat_op))
return [concat_input_op_slices]
|
b9009e9fe632170c89d92b2734bfc6621eeafff6
| 60,110
|
def GetAllAcceptSequences(root):
"""Return all the byte sequences that are accepted by a trie.
Args:
root: the root of the trie to generate accepting sequences for.
Returns:
sequences: a list of tuples containing information about accepts.
Each tuple is a pair of AcceptInfo and a list of the bytes that is accepted.
"""
accept_sequences = []
def AddAcceptSequences(node, context):
if node.accept_info is None and not node.children:
raise ValueError('Node has no children but is not accepting', context)
if node.accept_info is not None:
accept_sequences.append((node.accept_info, context))
for key, value in sorted(node.children.iteritems()):
AddAcceptSequences(value, context + [key])
AddAcceptSequences(root, [])
return accept_sequences
|
cba90112e99a91653395d63d62066adc2e11775e
| 60,118
|
import re
import warnings
def string_to_dms(coord):
"""Converts well formed coordinate string to its components.
Example: 41°24'12.2" -> (41, 24, 12.2)
:param str coord: coordinate string formatted as <DD°MM'SS.S[E|N]>
:rtype: (int, int, float)
"""
match = re.match(r"^\d{1,2}°\d{1,2}\'\d{1,2}(\.\d+)?\"[EN]?$", coord)
if not match:
warnings.warn("Argument value <{}> does not match pattern <DD°MM'SS.S\">".format(coord))
return
components = re.split(r"[°'\"]+", match.string)
return int(components[0]), int(components[1]), float(components[2])
|
0b6d404ef4c14359a64ab49172ddc7c7351ce4e7
| 60,122
|
import warnings
def deprecated(func):
"""A simple decorator to mark functions and methods as deprecated."""
def wrapper(*fargs, **kw):
warnings.warn("%s is deprecated." % func.__name__,
category=DeprecationWarning, stacklevel=2)
return func(*fargs, **kw)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
wrapper.__dict__.update(func.__dict__)
return wrapper
|
20c987facf6d6a6db597039ce90eebe50f59faf5
| 60,124
|
from typing import Union
def enumerate_oxford(x: Union[list, str]) -> Union[None, str]:
"""
Return as string with Oxford comma.
Args:
x (Union[list, str]): List or string to be formated.
Returns:
Union[None, str]: Formated string. None if x is None.
"""
if isinstance(x, str):
x = x.replace(" ", "").replace(",", "")
else:
x = [str(item) for item in x]
if len(x) == 0:
return None
if 0 < len(x) < 3:
return ", ".join(x)
else:
first_part = x[:-1]
last_part = x[-1]
return ", ".join(first_part) + ", and " + last_part
|
4f3030e35eb15caf71adae3b97124176800b71c7
| 60,125
|
def countNotes(root):
"""
Classic use case: divide & conquer
Count how many nodes are in binary tree.
Use divide & conquer.
"""
def helper(node):
if node is None: return 0
return 1 + helper(node.left) + helper(node.right)
return helper(root)
|
f1a3941388aec034c4192466cdba378870772484
| 60,129
|
def select_one(possibilities, description):
"""
Asks the user to select one of the possibilities after displaying description as prompt
"""
print(description)
# Display all possibilities
for i in range(len(possibilities)):
(object_description, _) = possibilities[i]
print(f"{i}: {object_description}")
valid_selection = False
selection = -1 # invalid value
# Continue to ask for a selection as long as there is no valid one
while not valid_selection:
try:
selection = int(input("Please enter the number for your selection\n"))
except ValueError:
# Inform the user to enter an integer
print("Invalid selection! Please enter an Integer")
continue
# The selection is valid if it is between 0 and the number of available possibilities
if 0 <= selection < len(possibilities):
valid_selection = True
else:
# Inform the user on the fact, that his selection was invalid
print("Invalid selection! Please enter one of the listed options")
# Get the selected Object
(_, object1) = possibilities[selection]
return object1
|
e504f000b8294b6b039ee756ce777b557041b032
| 60,132
|
def sort(ref, *args):
"""Returns sorted arrays based on a reference array.
Args:
ref (list or array): 1D array.
*args: array to sort.
Returns:
sorted arrays."""
s = []
for x in args:
s.append( [x1 for (y,x1) in sorted(zip(ref,x), key=lambda pair: pair[0])])
return s
|
6a5bc7c0608f9b36de39d4864792c72f5c07cd5e
| 60,133
|
import re
def abbreviate(words):
"""
Return the acronym of a string
"""
words = filter(lambda x: x != '', re.split("[^a-zA-Z0-9']", words))
return "".join([word[0].upper() for word in words])
|
ec9c809fadd2aa9b28a9910580114233ebffb3c7
| 60,134
|
def decimal_to_dot(x_dec):
"""Test if `Decimal` value has enough precision that it is defined to dot,
i.e., its eps is <= 1.
Parameters
----------
x_dec : Decimal
Input value in decimal.
Returns
-------
y : bool
True if `x_dec` defined to dot.
Examples
--------
>>> decimal_to_dot(Decimal('1.23E+1'))
True
>>> decimal_to_dot(Decimal('1.23E+2'))
True
>>> decimal_to_dot(Decimal('1.23E+3'))
False
"""
y = x_dec.is_finite() and (x_dec.as_tuple().exponent <= 0)
return y
|
17dea4c25dd8a4c2a9cb2b79ea256dc043e694dd
| 60,136
|
def invert_dict(d):
"""Returns a new dict with keys as values and values as keys.
Parameters
----------
d : dict
Input dictionary. If one value of the dictionary is a list or a tuple,
each element of the sequence will be considered separately.
Returns
-------
dict
The new dictionary with d keys as values and d values as keys.
In the case of duplicated d values, the value of the resulting key
of the new dictionary will be a list with all the corresponding d keys.
Examples
--------
>>> from secml.utils.dict_utils import invert_dict
>>> a = {'k1': 2, 'k2': 2, 'k3': 1}
>>> print(invert_dict(a))
{1: 'k3', 2: ['k1', 'k2']}
>>> a = {'k1': 2, 'k2': [2,3,1], 'k3': 1}
>>> print(invert_dict(a))
{1: ['k2', 'k3'], 2: ['k1', 'k2'], 3: 'k2'}
"""
def tolist(x): return [x] if not isinstance(x, (list, tuple)) else list(x)
new_d = {}
for k in d.items():
for v in tolist(k[1]):
i = k[0]
if v in new_d:
# If the key has already been set create a list for the values
i = tolist(i)
i = tolist(new_d[v]) + i
new_d[v] = i
return new_d
|
5378d64bb3c2257f1eb08e63c371717401210529
| 60,142
|
import json
def get_inputs_from_file(filename=""):
"""
Get the specified JSON input file's contents as a dict
:param filename: name of the JSON input file
:return: json/python dictionary
:rtype: dict
"""
with open(filename) as input_text:
json_obj = json.load(input_text)
return json_obj
|
5c40e3266946ce71cb7c62453c092bf44ca4bcb2
| 60,146
|
def get_batch_ids(stensor):
"""Get batch ids of sparse tensor"""
return stensor.indices[:, 0].unique()
|
1bb448b8854b34af9ee137f11ca36fbd6ca9fcff
| 60,148
|
import pickle
def from_pickle(fname):
"""
deserializes an object from a pickle file
"""
with open(fname, "rb") as inf:
return pickle.load(inf)
|
9cb099e2204053718d7022a9a71d0cba51e55e5a
| 60,149
|
def _FormatFloat(number):
"""Formats float with two decimal points."""
if number:
return '%.2f' % number
else:
return '0.00'
|
754563562dc34fc6137ccf5c856fe961dc52b713
| 60,151
|
def myfunction(arg1, arg2, kwarg='whatever.'):
"""
Does nothing more than demonstrate syntax.
This is an example of how a Pythonic human-readable docstring can
get parsed by doxypypy and marked up with Doxygen commands as a
regular input filter to Doxygen.
Args:
arg1: A positional argument.
arg2: Another positional argument.
Kwargs:
kwarg: A keyword argument.
Returns:
A string holding the result.
Raises:
ZeroDivisionError, AssertionError, & ValueError.
Examples:
>>> myfunction(2, 3)
'5 - 0, whatever.'
>>> myfunction(5, 0, 'oops.')
Traceback (most recent call last):
...
ZeroDivisionError: integer division or modulo by zero
>>> myfunction(4, 1, 'got it.')
'5 - 4, got it.'
>>> myfunction(23.5, 23, 'oh well.')
Traceback (most recent call last):
...
AssertionError
>>> myfunction(5, 50, 'too big.')
Traceback (most recent call last):
...
ValueError
"""
assert isinstance(arg1, int)
if arg2 > 23:
raise ValueError
return '{0} - {1}, {2}'.format(arg1 + arg2, arg1 / arg2, kwarg)
|
34bab79c03a8adf02d5f3d8d5db410ecdd8a3516
| 60,155
|
def get_chapter(exercise: str) -> str:
"""Extract chapter name from exercise name.
:param exercise: exercise name (e.g. "1.3.1").
:return: chapter name (e.g. "1.3").
"""
return exercise[:-2]
|
03d93455a70c7c212d8566ad932efe223600ef6b
| 60,157
|
def hhmmss_to_seconds_since_midnight(time_int):
"""
Convert HH:MM:SS into seconds since midnight.
For example "01:02:03" returns 3723. The leading zero of the hours may be
omitted. HH may be more than 23 if the time is on the following day.
:param time_int: HH:MM:SS string. HH may be more than 23 if the time is on the following day.
:return: int number of seconds since midnight
"""
time_int = int(''.join(time_int.split(':')))
hour = time_int // 10000
minute = (time_int - hour * 10000) // 100
second = time_int % 100
return hour * 3600 + minute * 60 + second
|
837116c13d8c1b653c8f9f2ff1ad97c462fa06d6
| 60,158
|
def format_elapsed_time(elapsed: float, precision: int = 2) -> str:
"""
Format the elapsed time in seconds to a human readable string.
Parameters
----------
elapsed : `float`
The elapsed time in seconds.
precision : `int`
The number of decimal places to use, defaults to 2.
Returns
-------
`str`
The formatted elapsed time.
"""
ms = elapsed * 1e3
if ms >= 1e3:
return f'{ms / 1e3:.{precision}f}s'
if ms >= 1:
return f'{ms:.{precision}f}ms'
return f'{ms * 1e3:.{precision}f}μs'
|
e8cd1ceb24b62936bdf317e0b1584b80ba363da3
| 60,162
|
import math
def mag(v):
"""
Returns the magnitude of v.
"""
x, y, z = v
return math.sqrt(x*x + y*y + z*z)
|
ae4a8080ad530256e2a0673a43fb9f9ac1ee2eb4
| 60,166
|
def read_bool(data):
"""Reads a string and outputs a boolean.
Takes a string of the form 'true' or 'false', and returns the appropriate
boolean.
Args:
data: The string to be read in.
Raises:
ValueError: Raised if the string is not 'true' or 'false'.
Returns:
A boolean.
"""
if data.strip().upper() == "TRUE":
return True
elif data.strip().upper() == "FALSE":
return False
else:
raise ValueError(data + " does not represent a bool value")
|
8729567f262e2c8d5376a9fe54dbf10060d0c59c
| 60,174
|
def normalized_length_tolerance() -> float:
""" tolerance to be used when comparing normalized vectors """
return 1e-4
|
98721148ecf862d32751f7de42430ed5ba385cd7
| 60,179
|
def gchp_metname(prior_to_13):
"""
Deterimines the correct collection name for GCHP StateMet data.
"""
if prior_to_13:
return "StateMet_avg"
return "StateMet"
|
77eab1163e8a010c1d74dccdfdb12fffd09d059b
| 60,183
|
def post_process_states(input_t_str, states, pos_tag_sentence):
"""
Post-processes the states (either lstm cell states or probabilities of
a byte belonging to a POS tag) of input_t_str.
If a unicode char of input_t_str has more than one byte in utf-8, take the
state associated with the last byte.
Args:
input_t_str (str): text to parse.
states (list): list of neuron activations for each byte in
input_t_str, or probabilities of each byte belonging
to a POS tag.
pos_tag_sentence (list): pos tag for each byte of input_t_str.
Returns:
states_tmp (list): list of lstm activations / probabilities for each
byte.
pos_tags (list): list of pos tags for each byte.
"""
states_tmp = []
pos_tags = []
i = 0
for ch in input_t_str:
n_bytes = len(ch.encode('utf-8'))
if n_bytes == 1:
states_tmp.append(states[i])
pos_tags.append(pos_tag_sentence[i])
i += 1
else:
states_tmp.append(states[i + n_bytes - 1])
pos_tags.append(pos_tag_sentence[i + n_bytes - 1])
i += n_bytes
return states_tmp, pos_tags
|
07bba597ec2e527594518b5b73a080e5a2651fdd
| 60,185
|
def _alternating_signs(n):
"""
Return 1 if n is zero
Returns -1 if n is odd
Returns 1 if n is even
"""
if (n < 0.0):
raise ValueError("Argument has to be zero or a positive integer")
return (-1.0)**n
|
237bcfc82e3947ca7bc01d57c17480a9ae89dbb2
| 60,187
|
import json
def get_performance(pos: str, wordform: str, source: str):
"""
Given a `pos` and `wordform`, return the list of modules that support the corresponding conversion,
also return a dictionary mapping wordforms to a list of accuracies. Each accuracy score corresponds
to a module in the list of modules
"""
sources = {
"celex": "app/static/data/celex_performance.json",
"celex_word": "app/static/data/celex_word_performance.json",
"celex_collocation": "app/static/data/celex_collocation_performance.json",
"agid": "app/static/data/agid_performance.json",
"wiktionary": "app/static/data/wiktionary_performance.json",
"wiktionary_word": "app/static/data/wiktionary_word_performance.json",
"wiktionary_collocation": "app/static/data/wiktionary_collocation_performance.json",
}
with open(sources[source], "r") as f:
data = json.load(f)
performance_dict = data[pos][wordform]
module_names = list(next(data.keys() for data in performance_dict.values()))
n_terms = sum(performance_dict[list(performance_dict.keys())[0]][module_names[0]].values())
return (
module_names,
{
key: [
performance_dict[key][module]["correct"] * 100 / (performance_dict[key][module]["correct"] + performance_dict[key][module]["incorrect"])
for module in module_names
]
for key in performance_dict
},
n_terms
)
|
6e3edfd373570095520f3931732c449acfb488c1
| 60,188
|
def db_list_sensors(c):
""" Get list of temperature and humidity sensors from database """
sql = "SELECT DISTINCT name FROM sensors"
c.execute(sql)
sensor_list = c.fetchall()
return sensor_list
|
f5ade4ca43769f5ad714b15a11c5acd74acf4159
| 60,189
|
import torch
def reserve_nn_scores(similarities, nn_num):
"""Reserver top-k nearest neighbors' similarity scores
Args:
similarities (Matrix): test_num * train_num
Returns:
nn_scores (Matrix): only keep the scores of the top-k nearest neighbors
"""
scores = torch.FloatTensor(similarities)
# sorted each row, and get the index
# ascending order
sorted_scores, sorted_index = torch.sort(scores, 1, descending=True)
nn_scores = sorted_scores[:, 0:nn_num]
nn_index = sorted_index[:, 0:nn_num]
# only accept float32
nn_scores = torch.zeros(scores.size()).scatter_(1, nn_index, nn_scores)
# convert float32 to float64
return nn_scores.numpy().astype(float)
|
b3da2563c0ac56c9428167f7f6bb992556d826ce
| 60,195
|
import math
def degrees_to_radians(deg):
"""Converts degrees to radians.
Args:
deg: degrees
Returns:
radians
"""
return deg * math.pi / 180
|
df10298108e85dd92d962351ad0d763821626759
| 60,198
|
from typing import Counter
def count_oov(tokens, vocab):
"""
计算oov的单词
:param tokens: 所有token单词
:param vocab: 生成的单词表中的单词
:return: int, int, 返回所有单词数和所有不在vocab中的单词数
"""
c = Counter(t for t in tokens)
total = sum(c.values())
matched = sum(c[t] for t in vocab)
return total, total-matched
|
b69c3efb49ea697d6d81e398e7a22ce03ceaa838
| 60,200
|
from pathlib import Path
import re
def nearest_img_files(files, fmt='png'):
"""Get a list of file paths to closest images (type as specified) in dataset """
def file_png2svg(x):
p = Path(x)
n = p.name
n = re.sub(r"\.png", ".svg", n)
p = p.parent.parent
p = p / 'transformed_svgs' / n
return str(p)
def file_png2gif(x):
p = Path(x)
n = p.name
n = re.sub(r"-\d+\.png", ".gif", n)
p = p.parent.parent
p = p / 'icons' / 'icon_gif' / n
return str(p)
if fmt == 'png':
pass
elif fmt == 'svg':
files = list(map(lambda x: file_png2svg(x), files))
elif fmt == 'gif':
files = list(map(lambda x: file_png2gif(x), files))
return files
|
d2271c2024e53fa2a12c25b40d1b7a82d3a44a8a
| 60,203
|
import pprint
def phone_partial_clean(phones = {}):
""" Takes dictionary of phone numbers to be fixed (as key).
Removes parenthesis and inserts hyphen (-) in place of blanks
saving partially cleaned number as value.
Function is used prior to cleaning and reduces the number of
manual corrections needed in the update_phones mapping dictionary.
"""
if not phones:
phones = {'(512) 246-7941': 'fix_number',
'+1 (512) 469-7000': 'fix_number',
'+1 (512) 759-5900': 'fix_number',
'+1 512 218 5062': 'fix_number',
'+1 512 218 9888': 'fix_number',
'+1 512 238 0820': 'fix_number',
'+1 512 244 3737': 'fix_number',
'+1 512 248 7000': 'fix_number',
'+1 512 252 1133': 'fix_number',
'+1 512 255 7000': 'fix_number',
'+1 512 255 7530': 'fix_number',
'+1 512 258 8114': 'fix_number',
'+1 512 277 6959': 'fix_number',
'+1 512 310 7600': 'fix_number',
'+1 512 310 7678': 'fix_number',
'+1 512 324 4000': 'fix_number',
'+1 512 341 1000': 'fix_number',
'+1 512 362 9525': 'fix_number',
'+1 512 402 7811': 'fix_number',
'+1 512 528 7000': 'fix_number',
'+1 512 532 2200': 'fix_number',
'+1 512 600 0145': 'fix_number',
'+1 512 637 6890': 'fix_number',
'+1 512 733 9660': 'fix_number',
'+1 512 990 5413': 'fix_number',
'+1 512)351 3179': 'fix_number',
'+1 512-244-8500': 'fix_number',
'+1 512-260-5443': 'fix_number',
'+1 512-260-6363': 'fix_number',
'+1 512-310-8952': 'fix_number',
'+1 512-338-8805': 'fix_number',
'+1 512-341-7387': 'fix_number',
'+1 512-421-5911': 'fix_number',
'+1 512-535-5160': 'fix_number',
'+1 512-535-6317': 'fix_number',
'+1 512-733-6767': 'fix_number',
'+1 512-851-8777': 'fix_number',
'+1 737 757 3100': 'fix_number',
u'+1-737-484\u20110700': 'fix_number',
'+1512-413-9671': 'fix_number',
'+1512-909-2528': 'fix_number',
'+15123885728': 'fix_number',
'+15124282300': 'fix_number',
'+15124648382': 'fix_number',
'1+512-696-5209': 'fix_number'}
for key in phones:
phone_update_value = key.replace('(', '').replace(')', '').replace(' ', '-')
phones[key] = phone_update_value
pprint.pprint(phones)
return phones
|
05a5b8d36b9fcc700106fc4f78930cc41aef8abd
| 60,207
|
def find_cluster(stats, m1, m2):
"""Checks if m1 and m2 are in true or predited cluster."""
in_true_cluster = False
in_pred_cluster = False
for cluster in stats['true_clusters'].values():
if m1 in cluster and m2 in cluster:
in_true_cluster = True
break
for cluster in stats['pred_clusters'].values():
if m1 in cluster and m2 in cluster:
in_pred_cluster = True
break
return in_true_cluster, in_pred_cluster
|
3919fb15a169dbe8a933813f9cb60bbc01467136
| 60,223
|
def calcMetrics(tp, n_std, n_test):
"""Calculate precision, recall and f1"""
# default precision and recall are set to 1
# because technically an empty test corresponding to an empty standard
# should be the correct answer
precision = (tp / float(n_test)) if n_test > 0 else 1
recall = (tp / float(n_std)) if n_std > 0 else 1
f1 = (2 * precision * recall / (precision + recall)) if (precision + recall) > 0 else 0.0
return (precision, recall, f1, tp, n_std, n_test)
|
548aebd314a12d49f1617372fb4f70b2412f61f8
| 60,228
|
import re
def remove_junos_group(src):
"""
Remove XML attribute junos:group from the given string.
:param str src: Source string
:returns: String with junos:group occurrences removed.
"""
return re.sub(r' junos:group="[^"]+"', '', src)
|
311b335085b686e433a94da42abc22cbe959ef82
| 60,229
|
def get_arrival(event, pickid):
"""Find the arrival object in a Catalog Event corresponding to input pick id.
Args:
event (Event): Obspy Catalog Event object.
pickid (str): Pick ID string.
Returns:
Arrival: Obspy Catalog arrival object.
"""
for origin in event.origins:
idlist = [arr.pick_id for arr in origin.arrivals]
if pickid not in idlist:
continue
idx = idlist.index(pickid)
arrival = origin.arrivals[idx]
return arrival
if pickid is None:
return None
|
c4f8e47b0b6483fc372f374b68984f433304875b
| 60,230
|
def assign_reference_name(config):
"""
Assigns bucket name to reference name if reference name doesn't exist.
Args:
config(dict)
Returns:
dict: formatted config
"""
for bucket in config["buckets"]:
if bucket.get("referenceName") is None:
bucket["referenceName"] = bucket["name"]
return config
|
1023d7b2fb8d45d3e8bb6ce3890b46fd4495302d
| 60,233
|
def is_type_subclass(type_: type) -> bool:
"""Check if the given type field is subclass of ``type``."""
return issubclass(type(type_), type(type))
|
6b25f1d67cf30d8c8996a3dfb08cf80af6167e67
| 60,237
|
def getClientIP(request):
"""
Pull the requested client IP address from the X-Forwarded-For request
header. If there is more than one IP address in the value, it will return
the first one.
For more info, see: 'def access_route' in
https://github.com/mitsuhiko/werkzeug/blob/master/werkzeug/wrappers.py
:param request:
:return str: The client IP address, or none if neither the X-Forwarded-For
header, nor REMOTE_ADDR are present in the environment.
"""
if request.access_route > 0:
ip = request.access_route[0]
else:
ip = None
return ip
|
4ed0d2693e805aab782a304dd0ab9a62bc1f91d4
| 60,242
|
def running_mean_estimate(estimate,
batch_estimate,
num_samples,
batch_size):
"""Update a running mean estimate with a mini-batch estimate."""
tot_num_samples = float(num_samples + batch_size)
estimate_fraction = float(num_samples) / tot_num_samples
batch_fraction = float(batch_size) / tot_num_samples
return estimate * estimate_fraction + batch_estimate * batch_fraction
|
95b2d1911a49978812461323ea2ddbaf56be967e
| 60,243
|
import math
def cart2pol(x, y):
"""
Transform a point from cartesian coordinates to polar coordinates.
:param x: x coordinate value of the point
:param y: y coordinate value of the point
:return: rho (distance from the pole (origin) to the point),
theta (the angle between the polar-axis and the line connecting the pole and the point, in radians)
"""
return math.hypot(x, y), math.atan2(y, x)
|
87f5b68646f135cf321a844afef64b11fe8dc948
| 60,249
|
def getList(x):
"""
Convert any input object to list.
"""
if isinstance(x, list):
return x
elif isinstance(x, str):
return [x]
try:
return list(x)
except TypeError:
return [x]
|
06f888a5e689e9e19e5b3530fb9e9f8d7c63d670
| 60,252
|
def read_blast_file(location):
"""
:param location: String containing the exact relative filepath to
the BLAST file to be read.
:return blast_result: A 2D-list; contains lists with a variety of
Int, float and string elements detailing all results of a
single BLAST result record.
"""
with open(location, 'r') as infile:
content = infile.readlines()
blast_result = []
for line in content:
if line[0] != "#":
line = line.rstrip("\n").split("\t")
blast_result.append(line)
return blast_result
|
cb2ee234a4f11f3ad436c18263836586e591d5c1
| 60,258
|
def is_dynamic_buffer_enabled(config_db):
"""Return whether the current system supports dynamic buffer calculation"""
device_metadata = config_db.get_entry('DEVICE_METADATA', 'localhost')
return 'dynamic' == device_metadata.get('buffer_model')
|
ad7bf220327f711910a8cac781b78daf063a03a9
| 60,260
|
import struct
def decode_timestamp(ts_bytes):
"""
Decode timestamp tuple from values saved in FITS file
The timestamp is originally encoded to bytes as a series of
32bit (4 bytes) unsigned integers in little endian byte format.
Added to this is a pair of chars (1 byte) for the number of satellites
tracked and a sync status.
However, this is then stored as fake pixels in a FITS file, which
performs some mangling of the data, since FITS assumes 16bit (2 byte)
integers, and has no way to store unsigned integers. The following
mangling occurs:
- decode the timestamp byte string as two 16bit (2 bytes) little-endian unsigned integers;
- subtract 32768 from each integer;
- encode these integers as two 16bit signed integers in BIG ENDIAN format;
- save to file as fits data.
This routine reverses this process to recover the original timestamp tuple. We have to
take some care because of all the endian-swapping going on. For example, the number 27
starts off as \x1b\x00\x00\x00, which is interpreted by the FITS save process as (27, 0).
If we ignore the signing issue for clarity, then (27, 0) encoded in big endian format is
\x00\x1b, \x00\x00 so we've swapped the byte pairs around.
The steps taken by this routine are:
- decode timestamp string as big endian 16bit integers
- add 32768 to each value
- encode these values as little endian 16bit unsigned integers
- re-interpret the new byte string as 32bit, little-endian unsigned integers
The status bytes are handled slightly differently: after the re-encoding to little endian
16 bit uints, they are decoded as chars, and the last two chars discarded.
Parameters
----------
ts_bytes: bytes
a Python bytes object which contains the timestamp bytes as written in the
FITS file
Returns
--------
timestamp : tuple
a tuple containing the (frameCount, timeStampCount,
years, day_of_year, hours, mins,
seconds, nanoseconds) values.
"""
buf = struct.pack('<' + 'H'*18, *(val + 32768 for val in struct.unpack('>'+'h'*18, ts_bytes)))
return struct.unpack('<' + 'I'*8, buf[:-4]) + struct.unpack('bb', buf[-4:-2])
|
df5fc7d7b5becc2cbbbb61db32f8a874536eb4dc
| 60,263
|
def strip_leading_numbers(s):
"""
Return a string removing leading words made only of numbers.
"""
s = s.split()
while s and s[0].isdigit():
s = s[1:]
return u' '.join(s)
|
8ab28c80de808837152874b56fbbfce96c7eeafd
| 60,265
|
def split(text):
"""Split a text, taking escape characters into account."""
# See https://stackoverflow.com/a/21882672
escape = '\\'
ret = []
current = []
itr = iter(text)
for ch in itr:
if ch == escape:
try:
# skip the next character; it has been escaped!
current.append(next(itr))
except StopIteration:
current.append(escape)
elif ch == ';' or ch == ',':
# split! (add current to the list and reset it)
ret.append(''.join(current))
current = []
else:
current.append(ch)
if len(current) > 0:
ret.append(''.join(current))
return ret
|
32584df378cfdbfbce18595356f46b484918976a
| 60,266
|
def _whichever(x, y):
"""Returns whichever of `x` and `y` is not `None`.
If both `x` and `y` are not `None`, returns `x`.
If both `x` and `y` are `None`, returns `None`.
"""
return y if x is None else x
|
b2a0a7ffa3eccd445119700bb668872ca8f67d2d
| 60,268
|
def rescale(x, new_min, new_max, old_min, old_max):
"""
Rescales a value from an old range [A,B] to a new range [C,D] using the equation:
x' = C(1 - (x-A)/(B-A)) + D((x-A)/(B-A))
:param x: value to be scaled
:param new_min: new range minimum (C)
:param new_max: new range maximum (D)
:param old_min: old range minimum (A)
:param old_max: old range maximum (B)
:return: rescaled value
"""
return new_min * (1 - (x-old_min)/(old_max-old_min)) \
+ new_max*((x-old_min)/(old_max-old_min))
|
ebd988a2c740376c2fa3a2f5349f913e41e5b4e8
| 60,270
|
def select_pivot_column(z):
"""
Pick one variable that increases the objective
"""
for i, zi in enumerate(z):
if zi > 0:
return i + 1
else:
return None
|
d36d56adf21038e94beb6a069960e4319630f1cf
| 60,271
|
def n_missing(s):
"""Get the number of missing feature subsets.
Parameters
----------
s : pandas.Series
Records of feature subsets.
Returns
-------
int
See also
--------
subrela.records.iterate_missing : Iterate missing feature subsets.
Examples
--------
>>> s = from_arrays([[True, False, False], [True, False, True]],
... [0.2, 0.8],
... features=['A', 'B', 'C'])
>>> s
A B C
True False False 0.2
True 0.8
dtype: float64
>>> n_missing(s)
5
"""
return 2**s.index.nlevels - 1 - len(s)
|
3a7b642f9e15e4abd251eab364f700b19d5115f4
| 60,273
|
def td2days(td):
"""Convert datetime.timedelta to days"""
sec = td.total_seconds() # float
days = sec / 3600 / 24
return days
|
3d50a326aaef6dd0d570e2a118f0a8cfd44830b5
| 60,276
|
import re
def cpg_map(seq):
""" Return tuple of C/G/N.
>>> cpg_map('CGCGTAGCCG')
'CGCGNNNNCG'
"""
starts = (x.start() for x in re.finditer('CG', ''.join(['N', seq, 'N'])))
cpgs = ['N'] * (len(seq) + 2)
for start in starts:
cpgs[start] = 'C'
cpgs[start+1] = 'G'
return ''.join(cpgs[1:-1])
|
795ca667ba93fa3fb50576ec87f00d1672b95d63
| 60,280
|
import logging
def GetLogFromViewUrl(base_log, http_client):
"""Gets a log from it's view url.
Args:
base_log(str): View url in the format
like https://{host}/logs/{project}/{path}
http_client (FinditHttpClient): http_client to make the request.
Returns:
log (str or None): Requested log.
"""
log_url = '{base_log}?format=raw'.format(base_log=base_log)
status_code, log, _ = http_client.Get(log_url)
if status_code != 200 or not log:
logging.error('Failed to get the log from %s: status_code-%d, log-%s',
log_url, status_code, log)
return None
return log
|
e9c41a0b6df978f9583838334bd5f37e3b914bcd
| 60,281
|
def percent2decimal(percent):
"""Take a percentage, such as 3.5, and convert it to a decimal, such as 0.035"""
return percent / 100
|
12fb184f19968c7d7e9bb59b3ad47d92e1b3d2d1
| 60,285
|
def find_two_sum(numbers, target_sum):
"""
:param numbers: (list of ints) The list of numbers.
:param target_sum: (int) The required target sum.
:returns: (a tuple of 2 ints) The indices of the two elements whose sum is equal to target_sum
"""
taken = {}
for i, num in enumerate(numbers):
diff = target_sum - num
if diff in taken:
return i, taken[diff]
taken[num] = i
return None
|
ac940ae2a0a763dee5c316fa610d3e06ad6b1d25
| 60,286
|
import json
def normalize_single_model_value(model):
"""Standardize a single plane model string.
Args:
model (str) - a plane model name
Returns:
normalized_model_value (str) - a standardized model name
"""
# json file storing plane model strings as key and standardized model
# as value
with open("plane_model_dict.json", "r") as file_path:
plane_model_dict = json.load(file_path)
# check for model value, if not present return None
normalized_model_value = plane_model_dict.get(model, None)
return normalized_model_value
|
319e9faf119e75adedb4e5942affd07fc765a67e
| 60,288
|
def fmt_float_list(ctx, num=1):
""" Return a comma separated list of float formatters to the required number
of decimal places. For instance:
fmt_float_list(ctx.decimals=4, num=3) == "%.4f, %.4f, %.4f"
"""
return ', '.join(["%%.%df" % ctx.decimals] * num)
|
3654c6ef50cd4cc27c4baaa5ce5a9947db499ae7
| 60,289
|
def A000326(n: int) -> int:
"""Pentagonal numbers: a(n) = n*(3*n-1)/2."""
return n * (3 * n - 1) // 2
|
e3f6bf814b17c996a7e659e197981f9852d9d3ab
| 60,291
|
from typing import Any
import torch
def tensor_in_device(data: Any, device: str = "cpu", **kwargs) -> torch.Tensor:
"""Create tensor in device.
Args:
data (Any): data on the tensor.
device (str, optional): string of the device to be created in.
Defaults to "cpu".
Returns:
torch.Tensor: tensor in device.
"""
return torch.Tensor(data, **kwargs).to(device)
|
768550efb9596f66d065bc111922beea1daa0ad6
| 60,293
|
def _face_landmarks_to_list(face_landmarks):
"""Takes a dict of the face landmarks and turns it into a single list
of tuples."""
res = []
for area in ['chin', 'left_eyebrow', 'right_eyebrow', 'nose_bridge', 'nose_tip',
'left_eye', 'right_eye', 'top_lip', 'bottom_lip']:
for lm in face_landmarks[area]:
res.append(lm)
return res
|
f66e91d8d9ca40ac6940e8a126098592e865bd8e
| 60,299
|
import torch
import itertools
def tensors_to_tensor(tensors, shapes):
"""
Restore N-tensors into one
args:
tensors: tensors [nb, ch, row, col]
shapes: [nb_rows, nb_cols] of original position
return img
"""
nb, ch, row, col = tensors.shape
assert nb == (shapes[0] * shapes[1]), 'Number of tensor should be equals.'
# set img placeholder
tmp = torch.zeros(size=(1, ch, row*shapes[0], col*shapes[1]),dtype=torch.float32)
# merge
for i, j in itertools.product(range(shapes[0]), range(shapes[1])):
tmp[0,
:,
i * row:(i+1) * row,
j * col:(j+1) * col] = tensors[i * shapes[1] + j]
return tmp
|
3a9f5d6229cb4eca6e8f37058b15979d36ca813a
| 60,300
|
def tags2dict(tags):
"""Convert a tag list to a dictionary.
Example:
>>> t2d([{'Key': 'Name', 'Value': 'foobar'}])
{'Name': 'foobar'}
"""
if tags is None:
return {}
return dict((el['Key'], el['Value']) for el in tags)
|
1b5caaf51ad45110d7065ef24625f02daa034267
| 60,303
|
def get_tables(c, verbose=False):
"""Get all table names in the database."""
c.execute("SELECT name FROM sqlite_master WHERE type='table';")
tabs = c.fetchall()
if verbose:
print(tabs)
return tabs
|
ac8fab6c628319b236b7d6491bd87344c3a77a9f
| 60,304
|
import hashlib
import json
def job_id(conf):
"""Create a unique id for a schedule item in config."""
return hashlib.sha1(json.dumps(conf, sort_keys=True).encode('utf-8')).hexdigest()
|
51962143a0e304b48b30324fd357562c2bc9bbd2
| 60,305
|
def _get_mime_type(content_type):
"""Parse mime type.
Args:
content_type: content-type of response.
Returns:
returns string parsed to a media type.
If content_type is None, returns none value.
"""
if content_type is None:
return None
if ";" in content_type:
i = content_type.index(";")
content_type = content_type[:i]
fields = content_type.split("/")
for i in range(len(fields)):
fields[i] = fields[i].strip().lower()
mime_type = "/".join(fields)
return mime_type
|
d9dbdc55c03ea7830ef1fba0d7d5be7f09d4bee2
| 60,306
|
def sum_of_squares(*nums: int) -> int:
"""Sum of the squares of `nums`."""
return sum(n * n for n in nums)
|
e0c4e7d7953a7fc01fe2cb97806d0e2cbe1fbb33
| 60,307
|
def convert_to_list_of_words(lyrics):
"""Returns a list of words
Parameters:
lyrics (string)
Returns:
list: a list of words
"""
return lyrics.replace(',','').lower().strip().split()
|
2e64bc89cd22dd9b0447c7f4f7dd421825cc4d32
| 60,315
|
def clear(favourites_list):
"""Remove everything from the favourites_list. """
favourites_list.clear()
return (True, None)
|
567650c46ce23465e4adccffa350db19b98dbe04
| 60,316
|
def diffun(ctx, f, n=1, **options):
"""
Given a function f, returns a function g(x) that evaluates the nth
derivative f^(n)(x)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> cos2 = diffun(sin)
>>> sin2 = diffun(sin, 4)
>>> cos(1.3), cos2(1.3)
(0.267498828624587, 0.267498828624587)
>>> sin(1.3), sin2(1.3)
(0.963558185417193, 0.963558185417193)
The function f must support arbitrary precision evaluation.
See :func:`diff` for additional details and supported
keyword options.
"""
if n == 0:
return f
def g(x):
return ctx.diff(f, x, n, **options)
return g
|
dc217df65c8c1cbbf1839f1e9a1de15752aa003a
| 60,317
|
import pyarrow
def get_hdfs_connector(host: str, port: int, user: str):
""" Initialise a connector to HDFS
Parameters
----------
host: str
IP address for the host machine
port: int
Port to access HDFS data.
user: str
Username on Hadoop.
Returns
----------
fs: pyarrow.hdfs.HadoopFileSystem
"""
return pyarrow.hdfs.connect(host=host, port=port, user=user)
|
320732bc106961e8c7bc926af7995c91d3ad6248
| 60,323
|
def getNumber(card):
"""
Get the value of a card.
Parameters
----------
card : str
The card we are looking at
Returns
-------
val : int
The value of the card
"""
num = card[:-1]
if num == 'A':
return 11
elif num == 'J' or num == 'Q' or num == 'K':
return 10
else:
return int(num)
|
8da6c1fba849dc7d9294c29b71a21114d1ea85a8
| 60,326
|
import csv
def get_analytics_account_properties_dict_from_csv(csv_file_path: str) -> dict:
"""
We use this dict to gather the properties by accounts.
:param csv_file_path: path to the CSV file where Google Analytics
Properties have previously been dumped.
:return: a dict representation of the Analytics Properties.
"""
account_to_properties = {}
with open(csv_file_path, "r") as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
account = row["Account"]
account_property = row["Properties"]
if account not in account_to_properties and account_property != '':
account_to_properties[account] = [account_property]
elif account_property != '':
account_to_properties[account].append(account_property)
return account_to_properties
|
18356f5569e2f56ba1864995a7a7e7420106b457
| 60,328
|
from typing import Counter
def count_vector(text):
"""Creates a vector of unique words and how many times they appear."""
count_dict = Counter(text)
return count_dict
|
28316a62df7c7cc2d44b4033ce8f42ea11ded466
| 60,330
|
def select_trajectories(trajectories, trajectories_cost, trajectories_nb):
"""
Return the trajectories associated with the smallest costs.
Inputs:
- trajectories: list of pd.DataFrame
Each element of the list is a trajectory
- trajectories_cost: ndarray
Array containing the cost of the trajectories
- trajectories_nb: int
Number of trajectories to keep
Ouput:
- best_trajectories: list of pd.DataFrame
List containing the best trajectories
"""
# Sort indices with respect to costs
indexes = sorted(range(len(trajectories)),
key=lambda k: trajectories_cost[k])
# Keep the first ones
best_trajectories = [trajectories[i] for i in indexes[:trajectories_nb]]
return best_trajectories
|
459271c972ab36938dedf7db8a4a792f6f591a74
| 60,332
|
import random
import string
def rand_name(len_ = 4):
"""generates a random string for naming recording sessions"""
return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(len_))
|
438040d8333c1ac8586242f70974929675923aed
| 60,334
|
def get_repository_and_tag_from_image_uri(image_uri):
"""
Return the name of the repository holding the image
:param image_uri: URI of the image
:return: <str> repository name
"""
repository_uri, tag = image_uri.split(":")
_, repository_name = repository_uri.split("/")
return repository_name, tag
|
d5f405c2197163f5dcf26c0373ff283059ac107b
| 60,335
|
import glob
def list_simulations(pathname):
"""
pathname = /mypath/element_name
example: pathname = './tail_*' to list all the elements starting for "tail_"
"""
simlist = glob.glob(pathname)
return simlist
|
1b5eb3d5cae709f678f4facd3d4e382c0d806fb4
| 60,338
|
from typing import Sequence
from typing import List
def shell_sort_iter(seq: Sequence) -> List:
"""
Sort a sequence with the Shell sort algorithm.
Parameters
----------
seq : Sequence
Returns
-------
List
"""
seq = list(seq) # copy -> purity sake
size = len(seq)
gap = size // 2
while gap:
for i in range(size):
for j in range(i + gap, size, gap):
if seq[i] > seq[j]:
seq[i], seq[j] = seq[j], seq[i]
gap //= 2
return seq
|
b66736cb32e99415698ec8e0ab190b3d33e0022c
| 60,345
|
import re
def camelToTitle(text):
"""Convert camelCase `text` to Title Case
Example:
>>> camelToTitle("mixedCase")
"Mixed Case"
>>> camelToTitle("myName")
"My Name"
>>> camelToTitle("you")
"You"
>>> camelToTitle("You")
"You"
>>> camelToTitle("This is That")
"This Is That"
"""
return re.sub(
r"((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))",
r" \1", text
).title()
|
e22bea1b0d0969ee1e8f0e5eb78134de4d71ec47
| 60,349
|
def GetDistToWrappedLimits(value, start_limit, end_limit,
wrap_left, wrap_right):
"""Returns value for min distance from value to limits on wrapped scale.
Arguments:
value: Value to be evaluated. Can be list-like or single value. Values must
be between wrap_left and wrap_right.
start_limit: The beginning of a range on wrapped scale.
end_limit: The end of a range on wrapped scale.
wrap_left: Minimum value for wrapping scale.
wrap_right: Maximum value for wrapping scale.
Returns:
Minimum distance that value is from range limits.
Positive values indicate value is between range specified by start_limit
and end_limit. Negative values indicate value is outside of range.
"""
wrap_range = wrap_right - wrap_left
if not hasattr(value, '__iter__'):
value = [value]
# Unwrap end limit if needed so limits are in order.
if end_limit < start_limit:
end_limit_ordered = end_limit + wrap_range
else:
end_limit_ordered = end_limit
for ii, v in enumerate(value):
assert v >= wrap_left and v <= wrap_right, (
'Values must be between wrap_left and wrap_right.')
if end_limit < start_limit and v < end_limit:
# If limits go around wrap and value was in limits before wrap,
# unwrap value.
v += wrap_range
if v > start_limit and v < end_limit_ordered:
# If inside the bad range, give positive value
value[ii] = min(abs(v - start_limit),
abs(v - end_limit_ordered))
else:
# If outside bad range, give negative value.
value[ii] = -min(abs(v - start_limit),
abs(v - end_limit_ordered),
# Also check wrapped values to limits.
abs(v + wrap_range - end_limit_ordered),
abs(v - wrap_range - start_limit))
if len(value) == 1:
return value[0]
else:
return value
|
1d966cef67d9d60dee909c3b01e3578b70d3369f
| 60,351
|
def get_asset_dict(
source_location,
source_name,
target_location="",
target_name=None,
copy_method="copy",
):
"""Helper function to generate asset for a particular file
Args:
source_location (str): path to directory containing source file
source_name (str): filename of source file
target_location (str, optional): sub-directory to which file will
be written, relative to run directory root. Defaults to empty
string (i.e. root of run directory).
target_name (str, optional): filename to which file will be written.
Defaults to None, in which case source_name is used.
copy_method (str, optional): flag to determine whether file is
copied ('copy') or hard-linked ('link'). Defaults to 'copy'.
Returns:
dict: an asset dictionary
"""
if target_name is None:
target_name = source_name
asset = {
"source_location": source_location,
"source_name": source_name,
"target_location": target_location,
"target_name": target_name,
"copy_method": copy_method,
}
return asset
|
82c66e29df295483dfa134125c72058a03d538e9
| 60,353
|
def user_client(client, user):
"""Version of the client that is authenticated with the user"""
client.force_login(user)
return client
|
4b5fbb3d41fc5bfb54c6848c698b50fb087b4638
| 60,354
|
def esc_regex(string: str) -> str:
"""Escape non-literal regex characters in a string.
:param string: the string to escape characters from
:return: the string with escaped characters
"""
return (
string.replace("[", "\\[")
.replace("]", "\\]")
.replace("\\", "\\\\")
.replace("^", "\\^")
.replace("$", "\\$")
.replace(".", "\\.")
.replace("|", "\\|")
.replace("?", "\\?")
.replace("*", "\\*")
.replace("+", "\\+")
.replace("(", "\\(")
.replace(")", "\\)")
)
|
e23514628ce2a8f634e2dc4b3d991e8fb9830873
| 60,356
|
def create_fileters(*exts):
"""
Create filters for file dialog
Parameters
----------
exts : list of list
The list of file extension. The extension must be not included '.'
[(description, extension),...]
Returns
-------
list of str
The filters for file dialog
"""
ret = []
for e in exts:
ret += ['{} (*.{})'.format(*e)]
return ret
|
ee41f743748919e9dd9bd0e0263db4dffa18364d
| 60,357
|
import random
def sample(value, n: int = 1):
"""Sample elements from array"""
return random.sample(value, k=n)
|
243ed6553a48805879553413d1660363111a42d5
| 60,359
|
import csv
def get_detected_amplicons(file):
"""
Parse the negative control report and obtain a list of detected amplicons.
"""
amplicons = set()
with open(file, 'r') as ifh:
reader = csv.DictReader(ifh, delimiter='\t')
for record in reader:
if len(record['amplicons_detected']) > 0:
_amplicons = record['amplicons_detected'].split(',')
for amplicon in _amplicons:
amplicons.add(amplicon)
return amplicons
|
5c0c738ad508f7af0e38e086eadc53c9cd7e9019
| 60,361
|
import csv
def write_csv(file_path, data, delimiter=","):
"""Writes a nested list to a csv file
Args:
file_path (string): Absolute path of the file. Example:"C:/Users/Noctsol/Documents/somefile.csv"
data (<list<list>>): Nested list of data. Example: [["header1", "header2"], ["val1", "val2"]]
delimiter (str, optional): Delimiter of the the csv file. Defaults to ",".
Returns:
[type]: [description]
"""
with open(file_path, 'w', encoding='UTF8', newline='') as file:
writer = csv.writer(file, delimiter=delimiter)
# write multiple rows
writer.writerows(data)
return True
|
bb334e830c125e2da0a83d8c42bc37c99cfe3017
| 60,362
|
def swift_escape_string(s):
"""
Escape backslashes and double quotes in string, so it can be
embedded in a literal swift string when generatig swift source code.
"""
s = s.replace("\\", "\\\\")
s = s.replace("\"", "\\\"")
return s
|
bb4184adc46b1e3289939298733fd88075b88aa2
| 60,366
|
def status_put_response_ok(devid, status):
"""Return status change response json."""
return '{"id": "' + devid + '", "status": "' + str(status) + '"}'
|
f15c4435ce5031bd5bfb12e247f4c5a9a8f7ed43
| 60,369
|
def trapped_rain_water(arr):
"""
For an array of positive numeric values, each element of the array represents
the height of terrain capable of blocking water.
:param arr: an array describing terrain height
:return: the volume of water trapped within the terrain described by arr
"""
larr = len(arr)
volume = 0
watermap = [0] * larr
# initial pass, determine how much water can fill in from the left
peak = 0
for i in range(larr):
# new or same peak, we are not adding water
if peak <= arr[i]:
peak = arr[i]
continue
# tentatively presume that the distance between the last peak and this height
# is filled with water
watermap[i] = peak - arr[i]
# second pass, eliminate impossible amounts of water by finding right edges
peak = 0
for i in range(larr):
# reverse the index
i = larr - 1 - i
# we found a new peak moving in reverse, this cell cannot add water
if peak <= arr[i]:
watermap[i] = 0
peak = arr[i]
continue
# discover the cell's water level to the lesser of it's current water level (determined
# by moving forward) or the current measured water level (determined by moving
# in reverse) and add it to volume
volume += min(watermap[i], peak - arr[i])
return volume
|
19b35b93a7bb4004c4caa52d80f6409ba2cd0c8f
| 60,372
|
def title_invalidates_entry(title):
"""
Determines if the title contains phrases that indicate that the book is invalid
>>> from gender_novels.corpus_gen import title_invalidates_entry
>>> title_invalidates_entry("Index of the Project Gutenberg Works of Michael Cuthbert")
True
>>> title_invalidates_entry("Pride and Prejudice")
False
:param title: str
:return: boolean
"""
title = title.lower()
if title.find("index of the project gutenberg ") != -1:
# print("Was an index")
return True
if title.find("complete project gutenberg") != -1:
# print("Was a compilation thing")
return True
if title.find("translated by ") != -1:
# print("Was a translation")
return True
# if (title.find("vol. ") != -1):
# return True
# if re.match(r"volume \d+", title, flags= re.IGNORECASE):
# return True
return False
|
aa5e326a6aa7a9723cbc9bc905c40574611bc873
| 60,373
|
from typing import Union
from pathlib import Path
def basename_sh_util(path: Union[Path, str]) -> str:
"""
clone of unix's "basename"
grab a complete path and return only the name of the file (last / forward)
"""
path_string: str = str(path) # turn possible Path type into str type
# if no /, return it as it is
if path_string.find('/') == -1:
return path_string
else:
return path_string.rpartition('/')[2]
|
63423fd38171e135ad15b2f43357f66c67b8b367
| 60,374
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.