content stringlengths 42 6.51k |
|---|
def get_words_from_string(line):
"""
Split a string into a list of words.
"""
word_list = []
char_list = []
for c in line:
if c.isalnum():
char_list.append(c)
elif len(char_list) > 0:
word = ''.join(char_list).lower()
word_list.append(word)
char_list = []
if len(char_list) > 0:
word = ''.join(char_list).lower()
word_list.append(word)
return word_list |
def pp(pre, name):
"""
Make prefix-appended name
:param pre:
:param name:
:return the string prefix_name:
"""
return '{}_{}'.format(pre, name) |
def image(value, width='', height=''):
"""
Accepts a URL and returns an HTML image tag ready to be displayed.
Optionally, you can set the height and width with keyword arguments.
"""
style = ""
if width:
style += "width:%s" % width
if height:
style += "height:%s" % height
data_dict = dict(src=value, style=style)
return '<img src="%(src)s" style="%(style)s">' % data_dict |
def postprocess_clusters(dict_clusters):
"""
Merge adjacent clusters withe the same value
:param dict_clusters: Dictionnary : prop -> clusters
:return:
"""
# 1. For each prop value sort the cluster by (node,t0)
# 2. Browse the [(t0,t1,u),..] and keep a min(t0),max(t1) until encountering a t1!=t0
dict_clusters_pp = {k: [] for k in dict_clusters}
for k, cluster in dict_clusters.items():
cluster = sorted(cluster, key=lambda x: (x[2], x[0]))
t0_current, t1_current, prev_u = cluster[0]
for t0, t1, u in cluster[1:]:
if u == prev_u and t0 == t1_current: # previous temporal node equals to current one
t1_current = t1
else:
dict_clusters_pp[k].append((t0_current, t1_current, prev_u))
prev_u = u
t0_current, t1_current = t0, t1
dict_clusters_pp[k].append((t0_current, t1_current, prev_u))
return dict_clusters_pp |
def median(data_sorted):
"""
Finds the median value of a given series of values.
:param data_sorted:
The values to find the median of. Must be sorted.
"""
length = len(data_sorted)
if length % 2 == 1:
return data_sorted[((length + 1) // 2) - 1]
half = length // 2
a = data_sorted[half - 1]
b = data_sorted[half]
return (a + b) / 2 |
def a(v, dfs_data):
"""The ancestor function."""
return dfs_data['parent_lookup'][v] |
def strip(value):
"""Removes whitespaces from value on both side"""
return value.strip() if value else value |
def sha1_of_file(filepath):
"""
Get sha1 of file
:param filepath: File to hash
:return: sha1 hash of file or None
"""
import hashlib
try:
with open(filepath, 'rb') as file_to_hash:
return hashlib.sha1(file_to_hash.read()).hexdigest()
except: # pylint: disable=bare-except
return None |
def get_file_content(filename):
"""
Get the content of a non-binary file.
:param filename: file name
:return: file.readlines() output
"""
f = open(filename, "r")
new_lines = f.readlines()
f.close()
return new_lines |
def getModelFNameFromHyperPars(pref, *hyperParsList, suff=None) :
"""
Generate a model filename from a list [hyperpar_entry ... ]
where each hyperpar_entry is either a dict of hyperparameter values
or a tuple (hyperpars_dict, default_hyperpars_dict).
In the second case, only the values different from the default
are used to generate the filename.
The filename is in the format pref_key1_val1_key2_val2...
"""
res = pref
for hyperPars in hyperParsList :
if isinstance(hyperPars,tuple) or isinstance(hyperPars,list) :
hyperPars, defHyperPars = hyperPars
else :
defHyperPars = {}
for k in sorted(hyperPars.keys()) :
if hyperPars[k] != defHyperPars.get(k) :
res += '_{}_{}'.format(k,hyperPars[k]).replace('.','_')
return res+(('_'+suff) if suff else '') |
def do_remap(build_id):
"""
>>> do_remap('1815548680a59ffa')
'86541518A580FA9F0000000000000000'
"""
build_id = build_id.upper()
# See https://crashpad.chromium.org/bug/229 and
# https://source.chromium.org/chromium/chromium/src/+/main:third_party/crashpad/crashpad/snapshot/elf/module_snapshot_elf.cc;l=157-167;drc=81cc8267d3a069163708f3ac140d0d940487c137
return (
build_id[6:8]
+ build_id[4:6]
+ build_id[2:4]
+ build_id[0:2]
+ build_id[10:12]
+ build_id[8:10]
+ build_id[14:16]
+ build_id[12:14]
+ +(32 - len(build_id)) * "0"
) |
def generate_experiment_histories_file_path(experiment_path_prefix):
""" Given an 'experiment_path_prefix', append '-histories.npz'. """
return f"{experiment_path_prefix}-histories.npz" |
def _decode_positive_integer(bits):
"""
:param bits:
:return:
"""
return int(bits, 2) |
def create_dictionary(timestamp, original_sentence, sequence_switched, err_message, suggestion_list):
"""Create Dictionary Function
Generates and exports a dictionary object with relevant data for website interaction to take place.
"""
if len(suggestion_list) != 0:
err_message_str = "Possible error: " + err_message + "\n \n"
new_dictionary = {
"timestamp": timestamp,
"original_sentence": original_sentence,
"masked_sentence": sequence_switched,
"err_message": err_message,
"possible_corrections": suggestion_list
}
return new_dictionary
else:
return {} |
def rollback_delete_rule(table_name):
"""Helper function to make SQL to create a rule to allow deleting from the ecommerce table"""
return f"DROP RULE delete_protect ON ecommerce_{table_name}" |
def isvalidIp(ip_addr):
"""Check if valid IPv4 address"""
try:
subnets = list(map(int, ip_addr.split('.')))
return len(subnets) == 4 and all(map(lambda x: 0 <= x <= 255, subnets))
except ValueError:
return False |
def isLambdaWithOneArgument(x):
"""Returns whether 'x' is a lambda function containing a single argument"""
try:
return x.func_code.co_argcount == 1
except AttributeError:
return False |
def validate_message(message):
""" Validate a received message to make sure that it
is a valid message in the MIDAS framework and return
a dictionary containing the information sent by the beacon.
"""
message = message.split(';')
result = None
if message[0] == 'midas':
k = ['name', 'type', 'id', 'address', 'status']
result = dict(zip(k, message[1:]))
return result |
def ind_complement(inds, n):
"""Return the indices below ``n`` not contained in ``inds``.
"""
return tuple(i for i in range(n) if i not in inds) |
def decode_roman_numeral(roman):
"""Calculate the numeric value of a Roman numeral (in capital letters)"""
# CREDITS: 200_success, https://codereview.stackexchange.com/questions/141402/converting-from-roman-numerals-to-arabic-numbers
trans = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
values = [trans[r] for r in roman]
return sum(
val if val >= next_val else -val
for val, next_val in zip(values[:-1], values[1:])
) + values[-1] |
def Lt(D, Lc):
"""
Calculates the length way of liquid
Parameters
----------
D : float
The diameter of the column, [m]
Lc : float
The drain perimeter, [m]
Returns
-------
Lt : float
The length way of liquid, [m]
References
----------
&&&&
"""
return (D**2 - Lc**2)**(0.5) |
def iterable(arg):
"""
Make an argument iterable
:param arg: an argument to make iterable
:type: list
:return: iterable argument
"""
if not isinstance(arg, (list, tuple)):
return [arg]
else:
return arg |
def equal_dicts(d1: dict, d2: dict, ignore_keys: list):
"""Check two dictionaries for equality with the ability to ignore
specific keys.
Source: https://stackoverflow.com/a/10480904/6942666
Args:
d1 (dict): the first dictionary
d2 (dict): the second dictionary
ignore_keys (list): a list of strings with keys to ignore
Returns:
bool: whether the dicts are equal
"""
d1_filtered = {k: v for k, v in d1.items() if k not in ignore_keys}
d2_filtered = {k: v for k, v in d2.items() if k not in ignore_keys}
return d1_filtered == d2_filtered |
def data_binarizer(ratings, threshold):
"""binarizes the data below and above the threshold"""
binarized = []
for rating in ratings:
if rating <= threshold:
binarized.append(0)
else:
binarized.append(1)
return binarized |
def is_nonzero(delta):
"""Return True if any element of 2D list is nonzero."""
nonzero = False
for row in delta:
for element in row:
if element != 0:
nonzero = True
return nonzero |
def vectorized_get(dictionary, key):
"""
Helper vectorized function to get keys
from a dictionary.
"""
return dictionary.get(key, -1) |
def reverse(text: str):
"""
Returns the string in reverse order.
"""
return text[::-1] |
def get_maximum_row_lengths(rows):
"""Finds the longest lengths of fields per column in a collection of rows."""
lengths, total = [], len(rows[0])
for index in range(total):
largest = max(len(row[index]) for row in rows)
lengths.append(largest)
return lengths |
def string_from_list(l):
"""
Construct a string from a list of strings
:param l: list of strings
:return: string containing elements of list l separated by a comma
"""
s = l[0]
if len(l) > 1:
for i in range(len(l)-1):
# only add this string to the list if it is different from the previous strings
e = l[i+1]
if e not in l[0:i+1]:
s += ',\t\t' + e
return s |
def remove_list(l1, l2):
"""Remove from list *l1* elements which are in list *l2*."""
return list(set(l1)-set(l2)) |
def omit_all(poles, special_poles, var):
"""
Instead of returning a product of poles where each pole is not in a special
list, this returns a product where each pole is subtracted from some variable.
"""
expression = 1
for p in poles:
if not p in special_poles:
expression *= (var - p)
return expression |
def genPacket(DATA, ERRS):
"""
returns a structured packet to send to client\n
DATA:ERRS\n
code,result;code,result:code,result\n
result is space seperated
"""
# searcha yes,test hello,searcha no -- test data
sdata = ""
if len(DATA) >= 1:
for i in DATA:
datadelist = " ".join(i[1])
sdata += "{},{};".format(i[0], "".join(datadelist))
sdata = sdata[:-1]
else:
sdata += "202,None"
sdata += ":"
if len(ERRS) >= 1:
for i in ERRS:
# ['300', ['test', ['hello']]]
i[1][1] = " ".join(i[1][1])
i[1] = " ".join(i[1])
sdata += "{},{};".format(i[0], i[1])
sdata = sdata[:-1]
else:
sdata += "202,None"
return sdata |
def convert_idx(text, tokens):
"""
convert token idx to char idx.
"""
current = 0
spans = []
for token in tokens:
current = text.find(token, current)
if current < 0:
print('Token {} cannot be found'.format(token))
raise Exception()
spans.append((current, current + len(token)))
current += len(token)
return spans |
def C_to_F(C):
"""
Convert Celsius to Fahrenheit
Args:
C : (float, or array of floats) temperature in degrees celsius
Returns:
The input temperature in degrees fahrenheit
"""
return C*9/5.+32 |
def complement(s):
"""
>>> complement('01100')
'10011'
"""
t = {'0': '1', '1': '0'}
return ''.join(t[c] for c in s) |
def _parse_path(url):
"""Return the path component of a URL string"""
if url is None:
return None
parsed = urlparse.urlsplit(url)
return parsed.path |
def interpret_qmcpack_fname(fname):
""" extract metadata regarding the contents of a file based on its filename.
QMCPACK generates files having a pre-determined suffix structure. This
function will interpret the last 4 period-separated segments of the suffix.
fname examples:
qmc.s000.scalar.dat
qmc.g000.s000.stat.h5
qmc.g161.s000.config.h5
qmc.g005.s001.cont.xml
Args:
fname (str): filename, must end in one of ['dat','h5','qmc','xml'].
Return:
dict: a dictionary of metadata.
"""
known_extensions = set(['dat', 'h5', 'qmc', 'xml'])
tokens = fname.split('.')
ext = tokens[-1] # dat,h5,qmc
if ext not in known_extensions:
raise RuntimeError('unable to interpret %s' % fname)
# end if
# interpret various pieces of the filename
# category
cate = tokens[-2] # scalar,stat,config,random,qmc
# series index
isst = tokens[-3] # s000
iss = int(isst.replace('s', '')) # series index
# group index
grouped = False # single input is not grouped
igt = tokens[-4] # g000 or $prefix
ig = 0 # group index
suf_list = [isst, cate, ext]
if igt.startswith('g') and len(igt) == 4:
ig = int(igt.replace('g', ''))
suf_list = [igt] + suf_list
grouped = True
else: # there is no group index
pass # keep defaul ig=0, grouped=False
# end if
# get project id by removing the suffix
suffix = '.' + '.'.join(suf_list)
prefix = fname.replace(suffix, '')
# metadata entry
entry = {
'id': prefix, 'group': ig, 'series': iss,
'category': cate, 'ext': ext, 'grouped': grouped
}
return entry |
def sphere(phenome):
"""The bare-bones sphere function."""
return sum(x ** 2 for x in phenome) |
def first(lst):
"""Return the first element of the given list - otherwise return None"""
return lst[0] if lst else None |
def _filter_cols_ps(dataset, seed):
"""Mapping function.
Filter columns for propensity score batch.
Args:
dataset: tf.data.Dataset with several columns.
seed: int
Returns:
dataset: tf.data.Dataset with two columns (X,T).
"""
t_name = f'image/sim_{seed}_pi/value'
return dataset['image/encoded'], dataset[t_name] |
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
"""
Testing aproximate equality for floats
See https://docs.python.org/3/whatsnew/3.5.html#pep-485-a-function-for-testing-approximate-equality
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) |
def actions(board):
"""
Returns set of all possible actions (i, j) available on the board.
"""
moves = set()
for row in range(len(board)):
for col in range(len(board[0])):
if board[row][col] == None:
moves.add((row,col))
return moves |
def score_calc_5(sumI, n, beta_a_b, beta_aB, beta_c_d, beta_w_x, beta_y_z_P, gamma, L, sumI_all):
"""
Scoring function with 5 ion series: sumI(matched) / sumI(all) * n / L * ( 1 + beta[a + b] + beta[a-B] + beta[c + d]
+ beta[w + x] + beta[y + z + y-P + z-P])
n = total number of matching MS2 ions for the given precursor, excluding non sequence-defining ions
(only 11 main series) within the specified m/z window, charge independent
beta is determined as:
if no consecutive matches in the same ion series: beta = 0
if at least one consecutive match in the same ion series: beta = sum(beta_increment +
(x_beta * consecutive_matches ) * beta_increment) over all consecutive matches in the same
ion series (a,a-b,b..) or
ion series group (e.g. a/b, w/x...). beta_increment and x_beta are tunable via input options
L = normalization factor base on total number of theoretical predicted ions for a precursor ion excluding
non sequence-defining ions (only 11 main series), wihin the specified m/z window
"""
return round(sumI * n * (1 + beta_a_b + beta_aB + beta_c_d + beta_w_x + beta_y_z_P) / (L * sumI_all),
3) |
def remove_all(word: str, to_replace: tuple) -> str:
"""Replaces every word(words are passed in to_replace tuple)
of the question(passed word var) except for the word that
will be searched in wikipedia to give users its meaning"""
for i in to_replace:
word = word.replace(i, '')
return word.strip() |
def load_peripheral(pdata, templates=None):
"""Load a peripheral from a dict
This loads a peripheral with support for templates, as used in the board
definition file format
Args:
pdata: A dict containing the peripheral definition
templates: A dict mapping types to template definitions
"""
if not 'type' in pdata:
raise ValueError("Peripheral definition requires a type field")
template = None
if templates is not None and pdata['type'] in templates:
template = templates[pdata['type']]
periph = pdata
# Override electrodes with fields from template
def map_electrode(e):
eid = e['id']
if template is None:
return e
e_template = next((x for x in template['electrodes'] if x['id'] == eid), None)
if e_template is None:
return e
# Merge dicts, with values in e taking priority in case of duplicate keys
return {**e_template, **e}
periph['electrodes'] = [map_electrode(e) for e in periph['electrodes']]
return periph |
def iterative_sum(n):
"""
Sums a number from 1 to n.
n = 2 => 1 + 2
return
Alejandro AS
"""
r = 0
for i in range(1, n + 1):
r += i
return r |
def same_issn(obj1, obj2):
"""Check if two objects have the same ISSN."""
return obj1['issn'] is not None and obj2['issn'] is not None and \
obj1['issn'] == obj2['issn'] |
def cmake_cache_string(name, value, comment=""):
"""Generate a string for a cmake cache variable"""
return 'set({0} "{1}" CACHE STRING "{2}")\n'.format(name, value, comment) |
def filesize (path) :
"""Return size of file `path` in bytes."""
import os
import stat
return os.stat (path) [stat.ST_SIZE] |
def empty_dropper(item):
"""Used for list/dict types that may contain empty strings
Recurses over the lists and dicts, replacing '' with None values"""
if isinstance(item, dict):
return {k: empty_dropper(v) for k, v in item.items()}
elif isinstance(item, list):
return [empty_dropper(v) for v in item if v != '']
elif item == '':
return
return item |
def bubble_sort(arr):
"""
Bubble sort iteratively swaps two elements until sorted
"""
l = len(arr)
if l == 0:
# treat arr of len 0 as sorted
return arr
for i in range(l):
madeSwap = False
for j in range(l-i-1):
if arr[j] > arr[j+1]:
temp = arr[j]
arr[j] = arr[j+1]
arr[j+1] = temp
madeSwap = True
if madeSwap == False:
break
return arr |
def unit_limit(x):
""" Forces value to be in [-1, 1].
Parameters
----------
x: float, int
The value to adjust.
Returns
-------
float
The adjusted value.
"""
return min(max(-1., x), 1.) |
def decode_0x03(data):
"""
Decode the bit-encoding of Mode 01, PID 03 and return appropriate string.
This is apparently bit-encoded, but only one bit may be set at any one time.
If you want the raw value, just do int([0:1]) on the string.
"""
if data == 1:
return '01: Open loop due to insufficient engine temperature'
elif data == 2:
return '02: Closed loop, using oxygen sensor feedback to determine fuel mix'
elif data == 4:
return '04: Open loop due to engine load OR fuel cut due to deceleration'
elif data == 8:
return '08: Open loop due to system failure'
elif data == 16:
return '16: Closed loop, using at least one oxygen sensor but there is a fault in the feedback system'
else:
return 'NO DATA' |
def challenge(authentication, realm):
"""Constructs the string to be sent in the WWW-Authenticate header"""
return u"{0} realm=\"{1}\"".format(authentication, realm) |
def find_first_extremum(pccf_vec):
"""
Find first extremum location.
"""
for i in range(len(pccf_vec)):
if i == 0:
continue
if pccf_vec[i] < pccf_vec[i-1]:
return i - 1 |
def drop_adjacent_duplicates(l: list) -> list:
"""
drop adjacent duplicates and keep original order
Examples:
1. [0, 0, 1, 1, 1, 2, 3, 4, 4, 5] -> [0, 1, 2, 3, 4, 5]
2. [] -> []
"""
if len(l) <= 1:
return l
x = l[0]
res = [x]
for y in l[1:]:
if y != x:
res.append(y)
x = y
return res |
def find_sum(alist):
""" Find normalised difficulty score from a particular subject's grade list and return it.
No. of Ex are multiplied by 0, A's by 1, B's by 2 and so on. And final sum is returned.
"""
csum = 0
factor = 0
for item in alist:
csum += item*factor
factor += 1
return csum |
def issue_follow_doc_template_values(url_root):
"""
Show documentation about issueFollow
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'issue_we_vote_id',
'value': 'string', # boolean, integer, long, string
'description': 'The unique identifier for the issue that the voter wants to follow.',
},
{
'name': 'follow',
'value': 'boolean', # boolean, integer, long, string
'description': 'Voter wants to follow or stop following this issue.',
},
{
'name': 'ignore',
'value': 'boolean', # boolean, integer, long, string
'description': 'Voter wants to ignore this issue.',
},
]
optional_query_parameter_list = [
# {
# 'name': '',
# 'value': '', # boolean, integer, long, string
# 'description': '',
# },
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
# {
# 'code': '',
# 'description': '',
# },
]
try_now_link_variables_dict = {
# 'organization_we_vote_id': 'wv85org1',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
'}'
template_values = {
'api_name': 'issueFollow',
'api_slug': 'issueFollow',
'api_introduction':
"",
'try_now_link': 'apis_v1:issueFollowView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values |
def addClass(classes, className, grade, credits):
"""Adds className, grade, and credits to classes.
Only adds information to classes if className is not already in
classes.
.. note::
credits parameter shadows built-in function credits(). If not
mandated by assignment instructions, parameter name would be
altered to numCredits.
:param dict classes:
The class information. Format:
classes = {className: {"grade": grade, "credits", credits}}
:param str className:
The name of the class to be added to classes.
:param str grade:
The grade received for className.
:param int credits:
The number of credits className is worth.
:return:
Returns True if className added to classes (i.e., className was
not already in classes), else False.
:rtype: bool
"""
if className in classes:
return False
else:
className = className.upper()
grade = grade.upper()
credits = int(credits)
classes[className] = {"grade": grade, "credits": credits}
return True |
def dhash_hamming_distance(dhash1, dhash2):
"""
Calculate the hamming distance between two dhash values
:param dhash1: str, the dhash of an image returned by `calculate_dhash`
:param dhash2: str, the dhash of an image returned by `calculate_dhash`
:return: int, the hamming distance between two dhash values
"""
difference = (int(dhash1, 16)) ^ (int(dhash2, 16))
return bin(difference).count("1") |
def get_colors(use=True):
"""
Return the colors as a dict, pass False to return the colors as empty
strings.
"""
colors = {
"BLACK": "\033[0;30m",
"DARK_GRAY": "\033[1;30m",
"RED": "\033[0;31m",
"LIGHT_RED": "\033[1;31m",
"GREEN": "\033[0;32m",
"LIGHT_GREEN": "\033[1;32m",
"BLUE": "\033[0;34m",
"LIGHT_BLUE": "\033[1;34m",
"MAGENTA": "\033[0;35m",
"LIGHT_MAGENTA": "\033[1;35m",
"CYAN": "\033[0;36m",
"LIGHT_CYAN": "\033[1;36m",
"LIGHT_GRAY": "\033[0;37m",
"WHITE": "\033[1;37m",
"DEFAULT_COLOR": "\033[00m",
"ENDC": "\033[0m",
}
if not use:
for color in colors:
colors[color] = ''
return colors |
def convertTimeToSeconds(hours, minutes, seconds, milliseconds):
"""convert time into seconds"""
h = int(hours) * 3600
minu = int(minutes) * 60
sec = int(seconds)
return h + minu + sec + (milliseconds / 1000) |
def is_good_version(version):
"""
Check the format of the version and return true if it's a proper format.
Format: X.Y.Z see http://semver.org
:param version: version given by the user
:return: False if the version does not follow semantic
versioning, true if it does.
"""
vers = version.split('.')
if len(vers) != 3:
return False
else:
if not vers[0].isdigit() or \
not vers[1].isdigit() or \
not vers[2].isdigit():
return False
return True |
def EnumDistance(caseAttrib, queryValue, arrayList, weight): # stores enum as array
"""
Implements EnumDistance local similarity function.
Returns the similarity of two enum values as their distance sim(x,y) = |ord(x) - ord(y)|.
"""
try:
queryValue = float(queryValue)
# build query string
queryFnc = {
"function_score": {
"query": {
"match_all": {}
},
"script_score": {
"script": {
"params": {
"lst": arrayList,
"attrib": caseAttrib,
"queryValue": queryValue
},
"source": "1 - ( Math.abs(lst.indexOf(params.queryValue) - lst.indexOf(doc[params.attrib].value)) / lst.length )"
}
},
"boost": weight,
"_name": "interval"
}
}
return queryFnc
except ValueError:
print("Interval() is only applicable to numbers") |
def tweet_type(tweet: dict) -> str:
"""Returns the type of the tweet
:param tweet: Full tweet object
:return: Type of tweet ('retweet with comment', 'retweet without comment', 'reply', or 'original tweet)
"""
if not tweet["referenced_tweets"]:
return "original tweet"
# sometimes there are multiple referenced tweets like
# [{'id': 1392103648059080705, 'type': 'quoted'}, {'id': 1393088697713709057, 'type': 'replied_to'}]
# how should we treat these cases? For now we simply focus on the first tweet
reference_type = tweet["referenced_tweets"][0]["type"]
if reference_type == "quoted":
return "retweet with comment"
elif reference_type == "retweeted":
return "retweet without comment"
elif reference_type == "replied_to":
return "reply"
else: # somehow there is an unknown reference type
raise Exception(
f"Unknown tweet type with reference type {reference_type} occurred"
) |
def spin_words(sentence: str) -> str:
"""
>>> spin_words("Hey wollef sroirraw")
'Hey fellow warriors'
"""
splited_sentence = sentence.split()
lists_of_word_spined = []
for i in splited_sentence:
if len(i) >= 5:
word = list(i)
word.reverse()
word = "".join(word)
else:
word = i
lists_of_word_spined.append(word)
return " ".join(lists_of_word_spined) |
def get_cell(caves, row, col):
"""Get (row, col) cell in caves."""
return caves[row][col] |
def dp_lcs_iter(s1: str, s2: str):
"""O(M * N)"""
m = len(s1)
n = len(s2)
tab = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
tab[i][j] = 0
elif s1[i - 1] == s2[j - 1]:
tab[i][j] = tab[i - 1][j - 1] + 1
else:
tab[i][j] = max(tab[i - 1][j], tab[i][j - 1])
return tab[m][n] |
def utf8_to_str(d):
"""
Many of the rogue variables are returned as UTF8 formatted byte
arrays by default. This function changes them from UTF8 to a
string
Args
----
d : int array
An integer array with each element equal to a character.
Returns
-------
str
The string associated with input d.
"""
return ''.join([str(s, encoding='UTF-8') for s in d]) |
def format_torch_name(name):
""" Formats a torch layer name into a caffe2 blob name """
name = name.replace('.', '_')
if name.endswith('bias'):
name = name[:-4] + 'b'
if name.endswith('weight'):
name = name[:-6] + 'w'
return name |
def make_urls_list(ids_list):
"""
Appends each video id to the base url and insert them into a list
:param list ids_list: youtube video id list
:return list: list of urls
"""
base_url = 'https://www.youtube.com/watch?v='
urls_list = [base_url + _id
for _id in ids_list
if _id is not None]
return urls_list |
def apply_rotations(size, step, rotations):
"""
Apply the "rotations" to step and return the step. This is used by
compress_solution() to remove all of the whole cube rotations from
the solution.
"""
if step in ("CENTERS_SOLVED", "EDGES_GROUPED"):
return step
if step.startswith("COMMENT"):
return step
for rotation in rotations:
# remove the number at the start of the rotation...for a 4x4x4 cube
# there might be a 4U rotation (to rotate about the y-axis) but we
# don't need to keep the '4' part.
if size <= 9:
rotation = rotation[1:]
elif size <= 99:
rotation = rotation[2:]
else:
rotation = rotation[3:] # For a 100x or larger cube!!
if rotation == "U" or rotation == "D'":
if "U" in step:
pass
elif "L" in step:
step = step.replace("L", "F")
elif "F" in step:
step = step.replace("F", "R")
elif "R" in step:
step = step.replace("R", "B")
elif "B" in step:
step = step.replace("B", "L")
elif "D" in step:
pass
elif rotation == "U'" or rotation == "D":
if "U" in step:
pass
elif "L" in step:
step = step.replace("L", "B")
elif "F" in step:
step = step.replace("F", "L")
elif "R" in step:
step = step.replace("R", "F")
elif "B" in step:
step = step.replace("B", "R")
elif "D" in step:
pass
elif rotation == "F" or rotation == "B'":
if "U" in step:
step = step.replace("U", "L")
elif "L" in step:
step = step.replace("L", "D")
elif "F" in step:
pass
elif "R" in step:
step = step.replace("R", "U")
elif "B" in step:
pass
elif "D" in step:
step = step.replace("D", "R")
elif rotation == "F'" or rotation == "B":
if "U" in step:
step = step.replace("U", "R")
elif "L" in step:
step = step.replace("L", "U")
elif "F" in step:
pass
elif "R" in step:
step = step.replace("R", "D")
elif "B" in step:
pass
elif "D" in step:
step = step.replace("D", "L")
elif rotation == "R" or rotation == "L'":
if "U" in step:
step = step.replace("U", "F")
elif "L" in step:
pass
elif "F" in step:
step = step.replace("F", "D")
elif "R" in step:
pass
elif "B" in step:
step = step.replace("B", "U")
elif "D" in step:
step = step.replace("D", "B")
elif rotation == "R'" or rotation == "L":
if "U" in step:
step = step.replace("U", "B")
elif "L" in step:
pass
elif "F" in step:
step = step.replace("F", "U")
elif "R" in step:
pass
elif "B" in step:
step = step.replace("B", "D")
elif "D" in step:
step = step.replace("D", "F")
else:
raise Exception("%s is an invalid rotation" % rotation)
return step |
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
return "|".join("(?:%s)" % r for r in regexes) |
def lunToScsiDiskName(lun,partnum):
"""
Convert lun to '/dev/sd[chr(ord('c')+lun)]partnum'
"""
return str('/dev/sd'+chr( (ord('c')+int(lun)) ) +str(partnum)) |
def ignore_some_combinations(testcase, clock_domains, ft_clock, fifo_clock, data_width):
"""Special function to ignore some fixtures combinations"""
if (('SINGLE' in clock_domains) and ('48' not in fifo_clock)):
# no need to use multiple fifo clock values in the single domain mode
return True |
def duplicar(valores):
"""duplica os valores de uma lista
>>> duplicar([1, 2, 3, 4])
[2, 4, 6, 8]
>>> duplicar([])
[]
>>> duplicar(['a', 'b', 'c'])
['aa', 'bb', 'cc']
>>> duplicar([True, None])
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for *: 'int' and 'NoneType'
"""
return [2 * elemento for elemento in valores] |
def utf8ize(x):
"""
Converts to utf8 if non-empty
:param x:
:return:
"""
if x is None:
return None
return x.encode('utf-8') |
def input_require_int(raw_input):
"""
Utility function that requires a given input to be an integer greater zero.
In case the given input is not an integer or zero or negative, the user is requested to provide a valid input.
:param raw_input: the initial user input
:type raw_input: str
:return: a number greater zero the user inserted
:rtype: int
"""
while True:
try:
if int(raw_input) > 0:
return int(raw_input)
else:
raise ValueError
except ValueError:
raw_input = input("Please input a valid number\n") |
def _concat(*lists):
"""Concatenates the items in `lists`, ignoring `None` arguments."""
concatenated = []
for list in lists:
if list:
concatenated += list
return concatenated |
def year_check(year, b_year, e_year):
""" Check year is correct length and falls in range """
return len(year) == 4 and (int(year) >= b_year and int(year) <= e_year) |
def _parse_module_size(description):
"""Return a string size of the memory module in description.
>>> _parse_module_size('4GB')
'4'
>>> _parse_module_size('8 GB')
'8'
>>> _parse_module_size('16GB (2x8GB)')
'2x8'
>>> _parse_module_size('(2 x 8GB) 16GB')
'2x8'
Args:
description: String description of a RAM module.
Returns:
String size of the memory module in description.
"""
if not isinstance(description, str):
raise TypeError('description must be a string.')
sizes = []
end = -1
while True:
end = description.find('GB', end + 1)
if end <= 0:
break
start = end - 1
# Skip any space between the size and "GB"
while (start >= 0) and description[start].isspace():
start -= 1
# Move to the start of the size
while (start >= 0) and description[start].isdigit():
start -= 1
start += 1
try:
sizes.append(int(description[start:end].strip()))
except ValueError:
pass
if len(sizes) <= 0:
return None
if len(sizes) == 1:
return str(sizes[0])
else:
if sizes[0] == sizes[1]:
return str(sizes[0])
else:
numerator = max(*sizes[:2])
denominator = min(*sizes[:2])
return '{}x{}'.format(numerator // denominator, denominator) |
def is_all_upper(s):
"""Returns True if the whole of the string s is upper case."""
return sum(c.isupper() for c in s) == len(s) |
def get_namespace_as_kwargs(namespace):
"""Create keyword arguments using the provided namespace. If it is None, then return
empty keywords arguments.
Mostly for the case of having namespaced or non-namespaced resources in the same
function or method.
Args:
namespace (str): the given namespace.
Returns:
dict[str, str]: The generated keywords arguments.
"""
kwargs = {}
if namespace:
kwargs["namespace"] = namespace
return kwargs |
def second_help_calculate_lp(score, number_of_sub_graphs):
"""
Helper function for scores calculation
"""
score = [x / number_of_sub_graphs for x in score]
return score |
def get_checkpoint_steps(checkpoint):
"""Add on steps in checkpoint to num steps to get final steps
"""
checkpoint_steps = int(checkpoint.split("-")[-1].split(".")[0])
return checkpoint_steps |
def setdefaults(kwargs,defaults):
""" set dictionary with defaults. """
for k,v in defaults.items():
kwargs.setdefault(k,v)
return kwargs |
def two_sum(nums, target):
"""
:param nums:
:param target:
:return:
"""
result = {}
n = len(nums)
for i in range(n):
difference = target - nums[i]
if difference in result:
return [i, result[difference]]
result[nums[i]] = i |
def overlaps(period1, period2):
"""Do periods intersect?"""
return max(period1[0], period2[0]) <= min(period1[1], period2[1]) |
def strip_sandbox_prefix(path: str, marker: str) -> str:
"""Strip a path prefix from a path using a marker string to find the start of the portion to not
strip. This is used to strip absolute paths used in the execution sandbox by `go`.
Note: The marker string is required because we cannot assume how the prefix will be formed since it
will differ depending on which execution environment is used (e.g, local or remote).
"""
marker_pos = path.find(marker)
if marker_pos != -1:
return path[marker_pos:]
else:
return path |
def set_support_dimensions(loading_span=10e-3,support_span=20e-3):
"""set the jig dimensions
inputs:
loading_span:
default 10mm
support_span:
default 20mm
"""
dims = {'loading_span':loading_span, 'support_span':support_span}
dims['spanratio'] = loading_span / support_span
return dims |
def downsample(signal, fs, fmax):
"""Downsamples signal by integer factor if fs > 2 * fmax
Downsamples the signal output if downsampling is possible.
Downsampling is possible when fs > 2*fmax, where 2*fmax is the Nyquist
frequency for highest frequency of interest.
Parameters
----------
signal : generator
Generator that yields a 1D np.array containing data samples
fs : int
Sampling rate of the unmodified signal
fmax : int
The highest frequency of interest.
Frequencies higher than fmax may be removed during downsampling
Returns
-------
ds_signal : generator
Generator that yields chunks (np.array) containing data samples.
Data samples are downsampled if downsampling is possible, otherwise
the original signal generator is returned
ds_fs : int
The downsampled sampling rate. If downsampling is not possible
then the original sampling rate is returned.
"""
if fs < 2 * fmax:
raise ValueError('Sampling frequency fs must be at least 2 * fmax')
n = int(fs / (2 * fmax))
if n == 1:
# Downsampling is not possible
return signal, fs
else:
# Downsample the signal generator
ds_signal = (chunk[::n] for chunk in signal)
ds_fs = int(fs // n)
return ds_signal, ds_fs |
def optional_value(data):
"""
This function will turn any values into a string so that all data
can be stored together as the same data types
"""
value = str(data)
if value != "None":
return value
else:
return "" |
def Not(value):
"""Convert the given boolean value to the opposite value."""
if not isinstance(value, bool):
raise ValueError('Expected a boolean value. Got "%s"' % value)
return not value |
def x_ian(x, word):
"""
Given a string x, returns True if all the letters in x are
contained in word in the same order as they appear in x.
>>> x_ian('eric', 'meritocracy')
True
>>> x_ian('eric', 'cerium')
False
>>> x_ian('john', 'mahjong')
False
x: a string
word: a string
returns: True if word is x_ian, False otherwise
"""
# if we've eaten up the whole word, we must have found all chars
# in word
if len(x) == 0:
return True
# check if the next character can be found in rest of string
pos_x_i = word.find(x[0])
if pos_x_i == -1:
return False
return x_ian(x[1:], word[pos_x_i:]) |
def get_attributes_dict(attr_entries):
"""
Handle Protobuf AttributeList.entries defined below, and return
the attibutes as a dictionary.
syntax = "proto2";
message Attribute {
enum Type { STRING = 1; DOUBLE = 2; INT = 3; }
required Type type = 1;
required string key = 2;
optional string str_val = 3;
optional double double_val = 4;
optional int32 int_val = 5;
}
message AttributeList {
repeated Attribute entries = 1;
}
"""
attr_dict = dict()
for entry in attr_entries:
if entry.type == 1:
val = entry.str_val
elif entry.type == 2:
val = entry.double_val
else:
val = entry.int_val
attr_dict[entry.key] = val
return attr_dict |
def is_release_package(data):
"""
Returns whether the data is a release package.
A release package has a required ``releases`` field. Its other required fields are shared with record packages.
To distinguish a release package from a record, we test for the absence of the ``ocid`` field.
"""
return 'releases' in data and 'ocid' not in data |
def split_first(s, delims):
"""
.. deprecated:: 1.25
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, "", None
return s[:min_idx], s[min_idx + 1 :], min_delim |
def ResolveResidueName(res):
""" Separate residue name, number and chain in 'res'.
:param str res: full residue name used in FU ('resnam:resnmb:chain')
:return: resnam(str),resnmb(int),chain(str) - residue name, residue number, chain name
:seealso: lib.PackResDat()
"""
# res: resnam:resnmb:chain -> resnam,resnum, chain name
resnam=''; resnmb=-1; chain=''
if len(res) > 0:
item=res.split(':')
resnam=item[0]
if len(item) > 1:
if item[1][0] == '*': resnmb=-1
else: resnmb=int(item[1])
if len(item) > 2:
chain=item[2]
return resnam,resnmb,chain |
def collate_question(query, template, slot):
"""
Collate a question based on template and slot
"""
T1 = "select one to refine your search"
T2 = "what (do you want | would you like) to know about (.+)?"
T3 = "(which | what) (.+) do you mean?"
T4 = "(what | which) (.+) are you looking for?"
T5 = "what (do you want | would you like) to do with (.+)?"
T6 = "who are you shopping for?"
T7 = "what are you trying to do?"
T8 = "do you have any (specific | particular) (.+) in mind?"
question = None
if slot == "<QUERY>":
slot = query
if template == T1 or template == T6 or template == T7:
question = template
elif template == T2:
if slot == query:
question = "what do you want to know about %s?" % slot
else:
question = "what do you want to know about this %s?" % slot
elif template == T3:
question = "which %s do you mean?" % slot
elif template == T4:
question = "what %s are you looking for?" % slot
elif template == T5:
question = "what do you want to do with %s?" % slot
elif template == T8:
question = "do you have any specific %s in mind?" % slot
else:
raise ValueError("Error of template!")
return question |
def get_file_version(path):
"""scans a file to get the version"""
try:
with open(path, "rt") as fp:
for line in fp:
line = line.rstrip('\r\n')
if (line.startswith('#VERSION ')):
return line[9:]
except:
pass
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.