content
stringlengths 42
6.51k
|
|---|
def is_age(age):
"""
A simple age validator.
Should be an integer, that is all
"""
valid = False
# Remove excess spaces
age_stripped = age.strip()
if str(int(age_stripped)) == age_stripped:
valid = True
return valid
|
def _to_Hex(color):
"""
Converts (r,g,b) tuple into #RRGGBB hex code
"""
return "".join(f"{component:02x}" for component in color)
|
def is_dataclass_type(cls):
"""Returns whether cls is a dataclass."""
return isinstance(cls, type) and hasattr(cls, "__dataclass_fields__")
|
def _GetAttribute(obj, attr_path):
"""Gets attributes and sub-attributes out of an object.
Args:
obj: The object to extract the attributes from.
attr_path: str, The dotted path of attributes to extract.
Raises:
AttributeError: If the attribute doesn't exist on the object.
Returns:
The desired attribute or None if any of the parent attributes were None.
"""
if attr_path:
for attr in attr_path.split('.'):
try:
if obj is None:
return None
obj = getattr(obj, attr)
except AttributeError:
raise AttributeError(
'Attribute path [{}] not found on type [{}]'.format(attr_path,
type(obj)))
return obj
|
def price_to_colour(price: float) -> str:
"""edit this function to change price thresholds - be careful that you
don't leave gaps in the numbers or strange things will very likely happen.
prices are including VAT in p/kWh"""
if price > 28:
pixel_colour = 'level6'
elif 28 >= price > 17:
pixel_colour = 'level5'
elif 17 >= price > 13.5:
pixel_colour = 'level4'
elif 13.5 >= price > 10:
pixel_colour = 'level3'
elif 10 >= price > 5:
pixel_colour = 'level2'
elif 5 >= price > 0:
pixel_colour = 'level1'
elif price <= 0:
pixel_colour = 'plunge'
else:
raise SystemExit("Can't continue - price of " + str(price) +" doesn't make sense.")
return pixel_colour
|
def geom_mean_long_sun(juliancentury: float) -> float:
"""Calculate the geometric mean longitude of the sun"""
l0 = 280.46646 + juliancentury * (36000.76983 + 0.0003032 * juliancentury)
return l0 % 360.0
|
def counts_in_out_packets(packets):
"""
Counts the number of packets in & out in the array packets
@param packets is a list of packets, structured as follows: `[(time, direction)]`
@return tuple `(num_packets_in, num_packets_out)`
"""
packets_in, packets_out = [], []
for val in packets:
if val[1] < 0:
packets_in.append(val)
elif val[1] > 0:
packets_out.append(val)
return (len(packets_in), len(packets_out))
|
def stratification(height, value_mean, height_top, value_top):
"""
Calculates the stratification of the temperature or relative humidity
:param height: height at which the stratification value is wanted. in m.
:param value_mean: mean value
:param height_top: height at the top of the boundary. in m
:param value_top: value at the top of the boundary
:return: value at desired height.
"""
return value_mean - 2 * height * (value_mean - value_top)/height_top
|
def count_diffing_subsystems(subsystems):
"""
Function for counting the number of subsystems in a repository.
"""
number = 0
for system in subsystems.values():
number = number + count_diffing_subsystems(system)
return number + len(subsystems.keys())
|
def substract(x, y):
"""Does action of substraction value x to value y and convert values to integer.
Args:
x (any): minuend
y (any): subtrahend
Returns:
[integer]: An integer holding the result
"""
return int(x) - int(y)
|
def _get_nibbles(char):
"""
A helper function for the decoding functions
:return: The first ans second nibble of the ascii value of the char
"""
try:
x, y = hex(ord(char))[2:]
except:
# We get here with chars like \t
# that translate to 0x9 (they "don't have" first and second nibble")
x = '0'
y = hex(ord(char))[2:]
return x, y
|
def json_dict_copy(json_object, property_list, defaultValue=None):
"""
property_list = [
{ "name":"name", "alternateName": ["name","title"]},
{ "name":"birthDate", "alternateName": ["dob","dateOfBirth"] },
{ "name":"description" }
]
"""
ret = {}
for prop in property_list:
p_name = prop["name"]
for alias in prop.get("alternateName", []):
if json_object.get(alias) is not None:
ret[p_name] = json_object.get(alias)
break
if not p_name in ret:
if p_name in json_object:
ret[p_name] = json_object[p_name]
elif defaultValue is not None:
ret[p_name] = defaultValue
return ret
|
def ilc_index(ndim):
"""Returns einsum indexing given ndim of cinv.
If covmat of 1d powers, return single index, else
return 2 indices for 2D kspace matrix."""
if ndim==3:
return "p"
elif ndim==4:
return "ij"
else:
raise ValueError
|
def GetExtAttrs(filepath):
"""Does nothing.
This is kept for compatibility with other platform-specific version of this
function.
Args:
filepath: Unused.
Returns:
An empty list.
"""
del filepath # Unused on Windows.
return []
|
def split_pair(pair_string):
"""given an option of the form "(val1, val2)", split it into val1 and
val2"""
return pair_string.replace("(", "").replace(")", "").replace(" ","").split(",")
|
def computeMaxRowSpan(lCells):
"""
compute maxRowSpan for Row 0
ignore cells for which rowspan = #row
"""
nbRows = max(int(x.get('row')) for x in lCells)
try:
return max(int(x.get('rowSpan')) for x in filter(lambda x: x.get('row') == "0" and x.get('rowSpan') != str(nbRows+1), lCells))
except ValueError :
return 1
|
def get_bridge_interfaces(yaml):
"""Returns a list of all interfaces that are bridgedomain members"""
ret = []
if not "bridgedomains" in yaml:
return ret
for _ifname, iface in yaml["bridgedomains"].items():
if "interfaces" in iface:
ret.extend(iface["interfaces"])
return ret
|
def seconds(dhms):
"""Convert a time string "[[[DD:]HH:]MM:]SS" to seconds.
"""
components = [int(i) for i in dhms.split(':')]
pad = 4 - len(components)
if pad < 0:
raise ValueError('Too many components to match [[[DD:]HH:]MM:]SS')
components = [0] * pad + components
return sum(i * j for i, j in zip((86400, 3600, 60, 1), components))
|
def left_of_line(point, p1, p2):
""" True if the point self is left of the line p1 -> p2
"""
# check if a and b are on the same vertical line
if p1[0] == p2[0]:
# compute # on which site of the line self should be
should_be_left = p1[1] < p2[1]
if should_be_left:
return point[0] < p1[0]
else:
return point[0] > p1[0]
else:
# get pitch of line
pitch = (p2[1] - p1[1]) / (p2[0] - p1[0])
# get y-value at c's x-position
y = pitch * (point[0] - p1[0]) + p1[1]
# compute if point should be above or below the line
should_be_above = p1[0] < p2[0]
if should_be_above :
return point[1] > y
else:
return point[1] < y
|
def split_blocks(b: bytes, k_len: int) -> tuple:
"""Given a buffer and the key len, split the buffer into blocks.
:param b: The input buffer.
:param k_len: The key length.
:returns: A list of byte buffers.
"""
assert len(b) >= k_len
return tuple(
bytes(
b[j] for j in range(i, len(b), k_len)
) for i in range(0, k_len)
)
|
def _str2bool(string):
"""Converts either 'true' or 'false' (not case-sensitively) into a boolean."""
if string is None:
return False
else:
string = string.lower()
if string == 'true':
return True
elif string == 'false':
return False
else:
raise ValueError(
'String should either be `true` or `false`: got {}'.format(string))
|
def _get_bikes_available(sta):
"""Given a GBFS station status blob, return the number of bikes"""
# 'num_ebikes_available" is not part of the GBFS spec, but it appears
# in the Divvy API response
return sta['num_bikes_available'] + sta.get('num_ebikes_available', 0)
|
def inclusion(first_x, second_x):
"""
Check if a list is included in another.
Parameters
----------
first_x : list
List to evaluate.
second_x : list
Reference list to compare with.
Returns
-------
bool
True if first_x is contained in second_x.
"""
return all(elem in second_x for elem in first_x)
|
def divPropre (nombre : int) -> list :
""" exemple renvoie [1, 2, 3] pour n = 6
:param n: nombre dont on cherche les diviseurs propres
:return: liste des diviseurs propre de n
"""
listeDivPropre = [1]
for i in range(2, nombre):
if nombre % i == 0:
if i not in listeDivPropre:
listeDivPropre.append(i)
return listeDivPropre
|
def _str_to_ord(content, weights, alphabet):
"""Converts a string to its lexicographical order.
Args:
content: the string to convert. Of type str.
weights: weights from _get_weights.
Returns:
an int or long that represents the order of this string. "" has order 0.
"""
ordinal = 0
for i, c in enumerate(content):
ordinal += weights[i] * alphabet.index(c) + 1
return ordinal
|
def get_dot_cmd(filetype="png", dpi=300):
"""Return command for subprocess"""
return ["dot", "-T%s" % filetype, "-Gdpi=%d" % dpi]
|
def SortFiles(items, sorteditems, filenames):
"""
Sort files in alphabetical order.
:param `sorteditems`: a list of L{Thumb} objects;
:param `filenames`: a list of image filenames.
"""
newfiles = []
for item in sorteditems:
newfiles.append(filenames[items.index(item)])
return newfiles
|
def HTML_color_to_RGB(html_color):
""" convert a HTML string color (eg. '#4422aa') into an RGB list (range 0-255)
"""
if html_color[0] == '#': html_color = html_color[1:]
r, g, b = html_color[:2], html_color[2:4], html_color[4:]
return [int(n, 16) for n in (r, g, b)]
|
def bond_quatinty(price, investment, minimum_fraction=0.1):
"""
Computes the quantity of bonds purchased given the investment,
bond price per unit, and the minimum fraction of a bond that
can be purchased
:param investment: Amount of money that will be invested
:param minimum_fraction: Minimum fraction that can be purchased
:param price: Price of bond per unit
:return: [quantity of bonds purchased, Total Value Invested, Eror%]
"""
Qf = int(investment / (minimum_fraction * price))
Q = Qf * minimum_fraction
value = Q * price
error = (investment - value) / value * 100
return [Q, value, error]
|
def get_fsed(num):
""" Generate random value of f_sed based on distribution provided by Mark Marley:
f_sed Frequency
0.000000 0.099
0.010000 0.001
0.030000 0.005
0.100000 0.010
0.300000 0.025
1.000000 0.280
3.000000 0.300
6.000000 0.280
Input num is a uniform random value between 0 and 1
"""
if num < .099:
r = 0
elif num < .1:
r = .01
elif num < .105:
r = .03
elif num < .115:
r = .1
elif num < .14:
r = .3
elif num < .42:
r = 1
elif num < .72:
r = 3
else:
r = 6
return float(r)
|
def definition_display_name(def_name):
"""Get definition name properly"""
if (def_name.endswith(".CREATE") or def_name.endswith(".UPDATE") or
def_name.endswith(".GET") or def_name.endswith(".PATCH")):
parts = def_name.rsplit(".", 2)
variant = parts[-1].lower()
display_name = parts[-2] + " (" + parts[-1].lower() + ")"
def_name = parts[-2] + "." + parts[-1]
else:
parts = def_name.rsplit(".", 1)
display_name = parts[-1]
def_name = parts[-1]
variant = ""
return def_name, display_name, variant
|
def get_snapshot(module, array):
"""Return Snapshot or None"""
try:
snapname = module.params["name"] + "." + module.params["suffix"]
for snaps in array.get_volume(module.params["name"], snap=True, pending=False):
if snaps["name"] == snapname:
return True
except Exception:
return False
|
def _get_grade_value(course_grade):
"""
Get the user's course grade as a percent, or an empty string if there is no grade
"""
if course_grade:
return course_grade.percent
return ''
|
def _ite_mk(bdd, v, t, e):
"""
Special mk method for the ite operator.
(Could possibly be improved by passing in a minimum
variable index so the entire var_order list doesn't have to be
traversed each time.
"""
#Get the index
i = bdd["var_order"].index(v) + 1
#Have we seen it before?
if (i,t,e) in bdd["h_table"]:
return bdd["h_table"][(i,t,e)]
#Make new Node
u = bdd["u"] + 1
bdd["h_table"][(i,t,e)] = u
bdd["t_table"][u] = (i,t,e)
bdd["u"] = u
return u
|
def gzipped(content):
"""
test if content is gzipped by magic num.
first two bytes of gzip stream should be 0x1F and 0x8B,
the third byte represent for compress algorithm, always 8(deflate) now
"""
if content is not None and len(content) > 10 \
and ord(content[0:1]) == 31 and ord(content[1:2]) == 139 \
and ord(content[2:3]) == 8:
return True
return False
|
def title_case(sentence):
"""
Convert string to title case.
Title case means that the first character of every word is capitalized, otherwise lowercase.
Parameters
--------------
sentence: string
Sentence to be converted to title case
Returns:
--------------
ret: string
Input string in title case
Examples
---------
>>> title_case('ThIs iS a StrIng tO be ConVerted')
'This Is A String To Be Converted'
"""
ret = sentence[0].upper()
for i in range(1, len(sentence)):
if sentence[i - 1] == ' ':
ret += sentence[i].upper()
else:
ret += sentence[i].lower()
return ret
|
def as_list(x):
""" Try to convert argument to list and return it.
Useful to implement function arguments that could be scalar values
or lists.
"""
try:
return list(x)
except TypeError:
return list([x])
|
def GetLapicSpuriousVectorFields(reg_val):
""" Helper function for DoLapicDump that prints the fields of the
spurious vector register.
Params:
reg_val: int - the value of the spurious vector registre to print
Returns:
string showing the fields
"""
vector = reg_val & 0xff
enabled = (reg_val & 0x100) >> 8
return "[VEC={:3d} ENABLED={:d}]".format(vector, enabled)
|
def _isviewer(target):
"""Return true if target is a viewer"""
#Can't use isinstance without class in scope
#return not hasattr(target, "parent")
return target.__class__.__name__ == 'Viewer' or any(['lavavu.Viewer' in str(b) for b in target.__class__.__bases__])
|
def cross_2d(v1, v2):
"""Two dimensional cross product.
"""
return v1[0] * v2[1] - v1[1] * v2[0]
|
def get_mock_case_body(mock_case_id):
"""
custom get mockCase response body
:param mock_case_id: unique key for get mock data
"""
mock_case_body = None
# here add something to get mockCase response body...
return mock_case_body
|
def strip_chars(chars, sequence):
"""
Strip the specified chars from anywhere in the text.
:param chars: An iterable of single character tokens to be stripped out.
:param sequence: An iterable of single character tokens.
:return: Text string concatenating all tokens in sequence which were not stripped.
"""
return ''.join(s for s in sequence if s not in chars)
|
def compute_same_padding(filter_size, in_size, stride):
"""Helper to compute the amount of padding used by a convolution.
Computation based on https://stackoverflow.com/a/44242277
"""
out_size = (in_size + (stride - 1)) // stride
return max((out_size - 1) * stride + filter_size - in_size, 0)
|
def pool_offer(config, lower=False):
# type: (dict, bool) -> str
"""Get Pool offer
:param dict config: configuration object
:param bool lower: lowercase return
:rtype: str
:return: pool offer
"""
offer = config['pool_specification']['offer']
return offer.lower() if lower else offer
|
def byte_notation(size: int, acc=2, ntn=0):
"""Decimal Notation: take an integer, converts it to a string with the
requested decimal accuracy, and appends either single (default), double,
or full word character notation.
- Args:
- size (int): the size to convert
- acc (int, optional): number of decimal places to keep. Defaults to 2.
- ntn (int, optional): notation name length. Defaults to 0.
- Returns:
- [tuple]: 0 = original size int unmodified; 1 = string for printing
"""
size_dict = {
1: ['B', 'B', 'bytes'],
1000: ['k', 'kB', 'kilobytes'],
1000000: ['M', 'MB', 'megabytes'],
1000000000: ['G', 'GB', 'gigabytes'],
1000000000000: ['T', 'TB', 'terabytes']
}
return_size_str = ''
for key, value in size_dict.items():
if (size / key) < 1000:
return_size_str = f'{size / key:,.{acc}f} {value[ntn]}'
return size, return_size_str
|
def force_dict(value):
"""
Coerce the input value to a dict.
"""
if type(value) == dict:
return value
else:
return {}
|
def _make_xml_string(s):
"""Enclose s in <String>...</String>."""
return '<String>'+s+'</String>'
|
def get_permutations(sequence):
"""
Enumerate all permutations of a given string
sequence (string): an arbitrary string to permute. Assume that it is a
non-empty string.
You MUST use recursion for this part. Non-recursive solutions will not be
accepted.
Returns: a list of all permutations of sequence
Example:
>>> get_permutations('abc')
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
Note: depending on your implementation, you may return the permutations in
a different order than what is listed here.
"""
if len(sequence) == 1:
return [sequence]
else:
o_list = get_permutations(sequence[1:])
r_list = []
for char in o_list:
for a in range(len(char)+1):
r_list.append(char[0:a] + sequence[0] + char[a:])
return sorted(r_list)
|
def parse_log_level_names(str_log_level_names):
"""20:INFO,30:WARN,40:ERROR,50:FATAL"""
if not str_log_level_names:
return {}
log_levels = str_log_level_names.split(",")
log_levels = [item.split(":") for item in log_levels]
level_names = {int(level): name for level, name in log_levels}
return level_names
|
def find_if(predicate, seq):
"""If there is an element of seq that satisfies predicate; return it.
>>> find_if(callable, [3, min, max])
<built-in function min>
>>> find_if(callable, [1, 2, 3])
"""
for x in seq:
if predicate(x): return x
return None
|
def get_value(list, name, default=0):
"""
:param list: must be structured as: [(a, b), (c, d), ..]
:param name: name to filter on, e.g.: if name == a, it returns b
:param default: returned if the name was not found in the list
:return value corresponding to the name in the list.
"""
for key, value in list:
if key == name:
return value
return default
|
def insert_cpf(a):
"""
Cast a string of digits to the formatted 000.000.000-00 CPF standard.
"""
cpf = a[:3]+'.'+a[3:6]+'.'+a[6:9]+'-'+a[9:]
return cpf
|
def format_api_url_with_limit_offset(api_url):
"""Format the API URL here to make sure it is as correct as
possible.
"""
base_url = api_url.get("baseUrl", "").rstrip("/")
limit = int(api_url.get("limit", ""))
offset = api_url.get("offset", "")
user_name = api_url.get("userName")
api_key = api_url.get("apiKey", "")
request_url_without_api_key = "{}/api/v2/file/?limit={}&offset={}".format(
base_url, limit, offset
)
request_url = "{}&username={}&api_key={}".format(
request_url_without_api_key, user_name, api_key
)
return base_url, request_url_without_api_key, request_url
|
def error_response(msg: str = ""):
"""Define a error json response to send a client.
:param msg: A message indicating that the request has errors.
"""
_data_response = {
u'valid': False,
u'msg': msg
}
return _data_response
|
def removeSpecialSubjects(courseNums):
"""
Returns a set of course numbers without letters in their name
"""
courseNumsNoSpecials = set()
for course in courseNums:
if any(char in course for char in 'QWERTYUIOPASDFGHJKLZXCVBNM'):
continue
courseNumsNoSpecials.add(course)
courseNumsNoSpecials.add('18.100') #special case - central subject in math department, but offered in 4 versions, so combine here
return courseNumsNoSpecials
|
def _compute_fans_for_keras_init_v1_v2(shape):
""" Making keras VarianceScaling initializers v1 & v2 support dynamic shape.
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
|
def _strip_environments(text, environments, verbose=0):
"""Remove environments in the ``environments`` list from the text."""
# Note: this stripping does not work well for !bc and !bt envirs,
# because a phrase like `!bc pycod` in running text gives a split...
for item in environments:
if len(item) != 2:
raise ValueError(
'%s in environments to be stripped is wrong' % (str(item)))
begin, end = item
if not begin in text:
continue
parts = text.split(begin)
text = parts[0]
for part in parts[1:]:
subparts = part.split(end)
text += end.join(subparts[1:])
if verbose > 1:
print('\n============ split %s <-> %s\ntext so far:' % (begin, end))
print(text)
print('\n============\nSkipped:')
print(subparts[0])
if verbose > 0:
print('split away environments: %s %s\nnew text:\n' % (begin, end))
print(text)
print('\n==================')
return text
|
def _NodeInfoPreProc(node, args):
"""Prepare the storage_units argument for node_info calls."""
assert len(args) == 2
# The storage_units argument is either a dictionary with one value for each
# node, or a fixed value to be used for all the nodes
if isinstance(args[0], dict):
return [args[0][node], args[1]]
else:
return args
|
def list_subtract(lis1, lis2):
""" Subtracts numbers in lis2 from the corresponding numbers in
lis1 and returns the resulting tuple. Called by find_rel_prop"""
result = (lis1[i] - lis2[i] for i in range(len(lis1)))
return tuple(result)
|
def check_confusion_matrix_metric_name(metric_name: str):
"""
There are many metrics related to confusion matrix, and some of the metrics have
more than one names. In addition, some of the names are very long.
Therefore, this function is used to check and simplify the name.
Returns:
Simplified metric name.
Raises:
NotImplementedError: when the metric is not implemented.
"""
metric_name = metric_name.replace(" ", "_")
metric_name = metric_name.lower()
if metric_name in ["sensitivity", "recall", "hit_rate", "true_positive_rate", "tpr"]:
return "tpr"
if metric_name in ["specificity", "selectivity", "true_negative_rate", "tnr"]:
return "tnr"
if metric_name in ["precision", "positive_predictive_value", "ppv"]:
return "ppv"
if metric_name in ["negative_predictive_value", "npv"]:
return "npv"
if metric_name in ["miss_rate", "false_negative_rate", "fnr"]:
return "fnr"
if metric_name in ["fall_out", "false_positive_rate", "fpr"]:
return "fpr"
if metric_name in ["false_discovery_rate", "fdr"]:
return "fdr"
if metric_name in ["false_omission_rate", "for"]:
return "for"
if metric_name in ["prevalence_threshold", "pt"]:
return "pt"
if metric_name in ["threat_score", "critical_success_index", "ts", "csi"]:
return "ts"
if metric_name in ["accuracy", "acc"]:
return "acc"
if metric_name in ["balanced_accuracy", "ba"]:
return "ba"
if metric_name in ["f1_score", "f1"]:
return "f1"
if metric_name in ["matthews_correlation_coefficient", "mcc"]:
return "mcc"
if metric_name in ["fowlkes_mallows_index", "fm"]:
return "fm"
if metric_name in ["informedness", "bookmaker_informedness", "bm"]:
return "bm"
if metric_name in ["markedness", "deltap", "mk"]:
return "mk"
raise NotImplementedError("the metric is not implemented.")
|
def combinaciones(l):
""" Acepta una lista como argumento, y devuelve todas
las combinaciones en otra lista de listas"""
"""Calcula y devuelve el conjunto potencia del
conjunto c.
Devuelve todas las combinaciones en una lista
"""
if len(l) == 0:
return [[]]
r = combinaciones(l[:-1])
return r + [s + [l[-1]] for s in r]
|
def get_deposition_url(sandbox: bool):
"""Get the Zenodo API URL for requesting an upload. The URL depends on whether the sandbox
or live version of Zenodo is being used."""
if sandbox:
api_base_url = 'https://sandbox.zenodo.org/api/'
else:
api_base_url = 'https://zenodo.org/api/'
depositions_url = f'{api_base_url}deposit/depositions'
return depositions_url
|
def get_angle_diff(angle1, angle2):
"""Return the angle, between 0 and 2*pi, between angle1 and angle2.
"""
diff = angle2 - angle1
while diff < -3.1415:
diff += 3.1415*2
while diff > 3.1415:
diff -= 3.1415*2
return abs(diff), diff
|
def recovery_data_key_function(item):
"""
Convert item in `RecoveryDialog.data` to tuple so that it can be sorted.
Sorting the tuples returned by this function will sort first by name of
the original file, then by name of the autosave file. All items without an
original file name will be at the end.
"""
orig_dict, autosave_dict = item
if orig_dict:
return (0, orig_dict['name'], autosave_dict['name'])
else:
return (1, 0, autosave_dict['name'])
|
def tuple_to_bool( value ):
"""
Converts a value triplet to boolean values
From a triplet: concentration, decay, threshold
Truth value = conc > threshold/decay
"""
return value[0] > value[2] / value[1]
|
def ptbescape(token):
"""Escape brackets according to PTB convention in a single token."""
if token is None:
return ''
elif token == '{':
return '-LCB-'
elif token == '}':
return '-RCB-'
elif token == '[':
return '-LSB-'
elif token == ']':
return '-RSB-'
return token.replace('(', '-LRB-').replace(')', '-RRB-')
|
def _is_arraylike(x):
"""Returns whether the input is array-like."""
return hasattr(x, "__len__") or hasattr(x, "shape") or hasattr(x, "__array__")
|
def _is_rst_file(path: str):
"""
Returns `True` if path looks like a ReStructuredText file.
"""
path = path.lower()
return path.endswith('rst') or path.endswith('rest')
|
def generate_success(
default_package_count, dev_package_count=0, pipfile=False
): # type: (int, int, bool) -> str
"""
:param default_package_count: The number of updated default packages
:param dev_package_count: The number of updated dev packages
:param bool lockfile: indicate that Pipfile was used to update setup.py
"""
src = "Pipfile" if pipfile else "Pipfile.lock"
string = (
"setup.py was successfully generated"
"\n%d default packages synced from %s to setup.py"
% (default_package_count, src)
)
if dev_package_count != 0:
string += "\n%d dev packages from %s synced to setup.py" % (
default_package_count,
src,
)
string += "\nPlease edit the required fields in the generated file"
return string
|
def _nn_filename(name, epoch):
"""Gets the filename for the trained weights."""
if epoch is None:
return "pretrain/{:s}.npz".format(name)
else:
return "pretrain/{:s}-epoch{:04d}.npz".format(name, epoch)
|
def get_triangle_vertex_order(face):
"""returns an anticlockwise ordering on the vertices of a triangle
consistent with our orientation convention"""
if face % 2 == 0:
triangle_verts = [0,1,2,3]
else:
triangle_verts = [0,3,2,1]
triangle_verts.remove(face)
return triangle_verts
|
def introspect_file(file_path, re_term):
"""
Takes a (str) file_path and (str) re_term in regex format
Will introspect file_path for the existance of re_term
and return True if found, False if not found
"""
from re import match
try:
ifile = open(file_path, "r")
except:
return False
for line in ifile:
if match(re_term, line):
return True
return False
|
def session_num_pageviews(session):
"""Number of pageviews in session."""
return len([r for r in session if 'is_pageview' in r and r['is_pageview'] == 'true'])
|
def calc_target_value_by_linear_model(ref_value: float, slope: float, offset: float):
"""
:rtype: np.float64
"""
return (ref_value*slope) + offset
|
def mkcols(l, rows):
"""
Compute the size of our columns by first making them a divisible of our row
height and then splitting our list into smaller lists the size of the row
height.
"""
cols = []
base = 0
while len(l) > rows and len(l) % rows != 0:
l.append("")
for i in range(rows, len(l) + rows, rows):
cols.append(l[base:i])
base = i
return cols
|
def convert_data(datain):
"""converts the output of the ADC into a voltage
Arguments
----------
datain : list
A list of bytes (the raw output of the adc)
Returns
-------
float
a float indicating a voltage between 2.5v and -2.5v - This will
not be adjusted for PGA gain. If you use a voltage reference other
than midsupply (2.5v) which is what is used by the internal
reference then you will need to adjust this accordingly
"""
combined_data = datain[0] << 24 | datain[1] << 16 | datain[2] << 8 | datain[3]
if(combined_data & (1 << 31)) != 0:
combined_data = combined_data - (1 << 32)
converteddata = float(combined_data*(2.5/2**31))
return converteddata
|
def shift_cities(road_map):
""" Cycle cities in a circuit by 1 position. city_i -> city_1, and city_n -> city_0
Cities are shifted in place - the output road_map is the same object as the input road_map
:param road_map: list of four-tuples: [(state, city, latitude, longitude), ...]
:return: list of four-tuples: [(state, city, latitude, longitude), ...]
"""
road_map.insert(0, road_map.pop())
return road_map
|
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(1, L + 1):
# (approx. 4 lines)
# compute velocities
# v["dW" + str(l)] = ...
# v["db" + str(l)] = ...
# update parameters
# parameters["W" + str(l)] = ...
# parameters["b" + str(l)] = ...
# YOUR CODE STARTS HERE
v["dW" + str(l)] = beta * v["dW" + str(l)] + (1 - beta) * grads['dW' + str(l)]
v["db" + str(l)] = beta * v["db" + str(l)] + (1 - beta) * grads['db' + str(l)]
parameters["W" + str(l)] = parameters["W" + str(l)] - learning_rate * v["dW" + str(l)]
parameters["b" + str(l)] = parameters["b" + str(l)] - learning_rate * v["db" + str(l)]
# YOUR CODE ENDS HERE
return parameters, v
|
def clean_string(str_in):
"""clean up strings that I find hard to deal with in spreadsheet
cells (mainly newlines)."""
if type(str_in) in [int, float]:
return str_in
else:
print('type(str_in) = %s' % type(str_in))
#str_in = str(str_in)
if type(str_in) == bytes:
str_in = str_in.decode("utf-8")
find_rep_tuples = [('\n','; '), \
('\r','; '), \
]
str_out = str_in
for find_str, rep_str in find_rep_tuples:
str_out = str_out.replace(find_str, rep_str)
return str_out
|
def check_normality(statistic, p_value, alpha=0.05):
""" Statistical report if the variables informed seems Gaussian/Normal
Args:
statistic (float): [description]
p_value (float): [description]
alpha (float, optional): significance level. Defaults to 0.05.
Returns:
(boolean): True if Normal else False
"""
print('Statistics=%.3f, p_value=%.3f' % (statistic, p_value))
if p_value <= alpha:
seems_normal = False
print('Sample does not look Gaussian (reject H0)')
else:
seems_normal = True
print('Sample looks Gaussian (fail to reject H0)')
return seems_normal
|
def argument_checker(
Csin,
Csout,
Crin,
Crout,
Sin,
Rin,
precision_value,
):
""""Checks the arguments to be of correct form"""
if not (Csin < 64000 and Csin > 1024):
print('ERROR: Cin of wrong size')
return False
if not (Csout < 64000 and Csout > 1024):
print('ERROR: Csout of wrong size')
return False
if not (Crin < 64000 and Crin > 1024):
print('ERROR: Crin of wrong size')
return False
if not (Crout < 64000 and Crout > 1024):
print('ERROR: Crout of wrong size')
return False
if Sin == Rin:
print('ERROR: Port numbers are the same')
return False
if not (precision_value < 1 and precision_value >= 0):
print('ERROR: precision value of wrong size')
return False
return True
|
def reverse1(snippet):
"""Reverse first sequence.
This does not change the calculated value.
"""
text = snippet
if '0123456789;' in text:
text = text.replace('0123456789;', '9876543210;')
text = text.replace(r';\d*(\1\d*);', r';(\d*\1)\d*;') # [a9] to [9a] len = 10 - a
text = text.replace(r';(\d*)\1\d*;', r';\d*\1(\d*);') # [0a[ to ]a0] len = a
else:
text = text.replace('9876543210;', '0123456789;')
text = text.replace(r';(\d*\1)\d*;', r';\d*(\1\d*);') # [9a] to [a9] len = 10 - a
text = text.replace(r';\d*\1(\d*);', r';(\d*)\1\d*;') # ]a0] to [0a[ len = a
return text
|
def counts_to_flux(counts, dcounts, magzp, wavelength):
""" converts counts into flux (erg/s/cm2/A) """
flux = counts * 10**(-(2.406+magzp) / 2.5 ) / (wavelength**2)
if dcounts is not None:
dflux = dcounts * 10**(-(2.406+magzp) / 2.5 ) / (wavelength**2)
else:
dflux = None
return flux, dflux
|
def format_qnode(qnode: str) -> str:
"""Formats a qnode with the required prefix.
"wd:" is prepended for qnodes and "wdt:" is prepended for pnodes.
Args:
qnode: Unformatted qnode.
Returns:
Formatted qnode.
"""
return f"wd:{qnode}" if qnode.startswith("Q") else f"wdt:{qnode}"
|
def _fully_qualified_name(t: type) -> str:
"""Retrieves the fully qualified name of the provided type.
Args:
t (type): The type whose fully qualified name shall be retrieved.
Returns:
str: The fully qualified name of ``t``.
Raises:
TypeError: If ``t`` is not an instance of ``type``.
"""
if not isinstance(t, type):
raise TypeError(
"The parameter <t> has to be a type, but is an instance of {}!".format(_fully_qualified_name(type(t)))
)
prefix = ""
if hasattr(t, "__module__"):
prefix = t.__module__ + "."
return prefix + t.__name__
|
def _decode_field(s, prev=None):
"""
Decode a single field according to the Lightstreamer encoding rules.
1. Literal '$' is the empty string.
2. Literal '#' is null (None).
3. Literal '' indicates unchanged since previous update.
4. If the string starts with either '$' or '#', but is not length 1,
trim the first character.
5. Unicode escapes of the form uXXXX are unescaped.
Returns the decoded Unicode string.
"""
if s == "$":
return ""
elif s == "#":
return None
elif s == "":
return prev
elif s[0] in "$#":
s = s[1:]
return s.decode("unicode_escape")
|
def concat(datasets):
"""Merge dictionaries containing lists for each key"""
res = {}
for dataset in datasets:
for k, v in dataset.items():
res[k] = res.get(k, []) + v
return res
|
def tokenization(tweet):
"""
DESCRIPTION:
Tokenizes a tweet into words
INPUT:
tweet: a tweet as a python string
OUTPUT:
list of tweet's tokens (words)
"""
return list(tweet.split())
|
def find(string, char):
"""
Looks for a character in a sctring and returns its index.
"""
# Compared to string.find(), it returns ALL the indexes, not only the first one.
return [index for index, letter in enumerate(string) if letter == char]
|
def encode_strings(strings, encode_format="utf8"):
"""Converts an array/list of strings into utf8 representation
"""
return [x.encode(encode_format) for x in strings]
|
def parse_identifier(s):
"""Split off the last"""
parts = s.split('/')
if parts[-1] == '':
return parts[-2]
else:
return parts[-1]
|
def human(s):
"""Returns human readable time rounded to the tenth of second."""
if s <= 60:
return '%.1fs' % s
m = int(round(s/60))
if m <= 60:
return '%dm%04.1fs' % (m, s%60)
return '%dh%02dm%04.1fs' % (m/60, m%60, s%60)
|
def get_camelcase_name_chunks(name):
"""
Given a name, get its parts.
E.g: maxCount -> ["max", "count"]
"""
out = []
out_str = ""
for c in name:
if c.isupper():
if out_str:
out.append(out_str)
out_str = c.lower()
else:
out_str += c
out.append(out_str)
return out
|
def _vector_scalar_mask(form):
"""The vsmask marks buffers as either of length niter items or as
pseudo buffers containing scalars which should not be length checked.
"""
vsmask = "{ "
for f in form:
if f == "s":
vsmask += "1, "
else:
vsmask += "0, "
vsmask = vsmask[:-2] + " }"
return vsmask
|
def append_to_keys(adict, preffix):
"""
@param adict:
@param preffix:
@return:
"""
return {preffix + str(key): (value if isinstance(value, dict) else value)
for key, value in list(adict.items())}
|
def _xfrm_bttv(data):
"""Helper for load_bttv - parse a BTTV-format JSON file"""
template = data["urlTemplate"].replace("{{image}}", "1x")
if template.startswith("//"): template = "https:" + template
return {em["code"]: template.replace("{{id}}", em["id"])
for em in data["emotes"]}
|
def recursive_get_using_string(collection, key):
"""
Given a collection and a key in the format of x.y.z.a, return collection
[x][y][z][a].
"""
if "." not in key:
if key.isdigit():
key = int(key)
return collection[key]
left, right = key.split('.', 1)
return recursive_get_using_string(
recursive_get_using_string(collection, left),
right)
|
def B0_rel_diff(v0w, b0w, b1w, v0f, b0f, b1f, prefact, weight_b0, weight_b1):
"""
Returns the relative difference in the bulk modulus.
THE SIGNATURE OF THIS FUNCTION HAS BEEN CHOSEN TO MATCH THE ONE OF ALL THE OTHER FUNCTIONS
RETURNING A QUANTITY THAT IS USEFUL FOR COMPARISON, THIS SIMPLIFIES THE CODE LATER.
Even though several inputs are useless here.
"""
return prefact*2*(b0w-b0f)/(b0w+b0f)
|
def v3_is_anagram(word1, word2):
"""Return True if the given words are anagrams.
Using the built-in python function - sorted.
"""
return sorted(word1) == sorted(word2)
|
def for_insert(index_row, row, attributes):
"""
Checks if data in uploaded file are accordant to constraints in database, i.e. suitable for insert in database.
Takes three parameters:
- "index_row" (index of row in uploaded file that is being checked if it is suitable for insert in database
or not)
- "row" (dictionary that contains data from row in uploaded file that is being checked for insert)
- "attributes" (dictionary with information about constraints for fields in database)
Returns two values:
- not "error_found" (True if row in uploaded file is suitable for insert in database or False if it is not)
- "error_msg" (String that contains information about errors that have been found during checking if values
in uploaded file are accordant to constraints in database or empty string if no errors were found. Row
number in error message is 1-based where first row is header.)
"""
error_list = []
error_found = False
for key, cell in row.items():
if key in attributes.keys():
if attributes[key]['required'] is True:
if cell == '' or cell is None:
error_msg = f'value in column "{str(key)}" is missing'
error_list.append(error_msg)
error_found = True
if attributes[key]['type'] == 'DropDown':
if cell in attributes[key]['options'] or cell == '' or cell is None:
continue
else:
error_msg = (
f'value in column "{str(key)}" is not allowed (it should be one of the predefined values)'
)
error_list.append(error_msg)
error_found = True
elif attributes[key]['type'] == 'Decimal':
if isinstance(cell, int) or isinstance(cell, float) or cell == '' or cell is None:
continue
else:
error_msg = f'value in column "{str(key)}" is not allowed (it should be a decimal number)'
error_list.append(error_msg)
error_found = True
elif attributes[key]['type'] == 'Integer':
if isinstance(cell, int) or cell == '' or cell is None:
continue
else:
error_msg = f'value in column "{str(key)}" is not allowed (it should be a whole number)'
error_list.append(error_msg)
error_found = True
if error_found:
error_msg = f'Row {str(index_row)}: ' + ', '.join(error_list) + '.'
else:
error_msg = ''
return not error_found, error_msg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.