content stringlengths 42 6.51k |
|---|
def is_rotation_slicing(s1, s2):
"""Uses slicing."""
if not s1:
return False
n = len(s1)
if n != len(s2):
return False
for i in range(n):
if s1[i:n] + s1[0:i] == s2:
return True
return False |
def expected_hamming(p1, p2=None):
"""
Return the expected hamming distance between two random vectors X and Y
where X_i ~ Bernoulli(p1) and Y_i ~ Bernoulli(p2) (defaults to p1 if p2
isn't specified), under the following assumptions:
1. P(X_i = Y_i) = P(X_j = Y_j). In words, this means (X_i, Y_i) and
(X_j, Y_j) are identically jointly distributed. In other words, all data
points are equally easy (or hard) to learn (this is an empirically false
assumption).
2. X_i and Y_i are conditionally independent (conditioned on i). In other
words, the predictions between any two learned models on the same test
example are independent (obviously false assumption).
"""
if p2 is None:
return 2 * p1 * (1 - p1)
else:
return p1 + p2 - 2 * p1 * p2 |
def index_format(FORMAT):
"""Determines indices of FORMAT values"""
format_indices = {}
fields = FORMAT.split(":")
for i in range(0, len(fields)):
field = fields[i]
format_indices[field] = i
return format_indices |
def avoidhexsingularity(rotation):
""" Avoid rotation of exact multiples of 15 degrees to avoid NaN's in hextransformEE().
Parameters
----------
rotdegrees : rotation in degrees int or float
Returns
----------
replacement value for rotation with epsilon = 1.0e-12 degrees added.
Precondition before using rotationdegrees in Affine2d for hex geometries
"""
diagnostic = rotation/15.0 - int(rotation/15.0)
epsilon = 1.0e-12
if abs(diagnostic) < epsilon/2.0:
rotation_adjusted = rotation + epsilon
else:
rotation_adjusted = rotation
return rotation_adjusted |
def add_trailing_slash(string):
""" Add trailing slash
"""
if len(string)>0 and string[-1] != '/':
string += '/'
return string |
def replace_operators(str):
"""Returns string with mathematical operators put back"""
str = str.replace('_p_','+')
str = str.replace('_m_','-')
str = str.replace('_t_','*')
str = str.replace('_d_','/')
return str |
def flatten_list (alst):
"""Similar to flatten except only flattens lists
>>> flatten_list([0, (2, 3), [4])
[0, (2, 3), 4]
"""
def _recur (blst):
for elem in blst:
if type(elem) is list:
for i in _recur(elem):
yield i
else:
yield elem
return list(_recur(alst)) |
def not_0(upper, lower):
"""Fills both values to whichever is not equal to 0, or leaves in place."""
# Note that I compare the values to zero istead of using if not lower
# This is because the values coming in can be anything, including an
# empty list, which should be considered nonzero
if upper == 0:
return lower, lower
elif lower == 0:
return upper, upper
else:
return upper, lower |
def exit_H_CUST(state):
"""
Handle leaving the ``H_CUST`` state.
Add the customer data being processed to the list of all processed customers.
"""
current_customer = state.pop('current_customer', None)
if current_customer:
state.setdefault('customers', []).append(current_customer)
return state |
def html_end(f, body, links=None):
""" Fill in the HTML file and end it
Parameters
----------
f : file
body : str
links : str, optional
"""
# Write links
if links is not None:
f.write(links)
f.write('</ul>\n')
f.write('<hr>\n')
# Write body
f.write(body)
# Finish
end = '</body>\n'
end += '</html>\n'
f.write(end)
return end |
def gen_list_dict(active_relays):
"""
Method that generates a list of dictionaries, where each dictionary
contains the fields of a relay.
@type active_relays: C{QuerySet}
@param active_relays: A set of all the relays that are going to be
displayed.
@rtype: C{list}
@return: A list of dictionaries - each dictionary contains the fields
of a relay.
"""
list_dict = []
if active_relays:
for relay in active_relays:
relay_dict = {'isbadexit': 1 if relay.isbadexit else 0,
'country': relay.country,
'longitude': relay.longitude,
'latitude': relay.latitude,
'nickname': relay.nickname,
'bandwidthkbps': str(relay.bandwidthkbps) + " KB/s",
'uptime': str(relay.uptimedays) + " d",
'address': relay.address,
#'hostname': relay.hostname,
'hibernating': 1 if relay.ishibernating else 0,
'orport': relay.orport,
'dirport': relay.dirport,
'isbadexit': 1 if relay.isbadexit else 0,
'isnamed': 1 if relay.isnamed else 0,
'isexit': 1 if relay.isexit else 0,
'isauthority': 1 if relay.isauthority else 0,
'isfast': 1 if relay.isfast else 0,
'isguard': 1 if relay.isguard else 0,
'isstable': 1 if relay.isstable else 0,
'isv2dir': 1 if relay.isv2dir else 0,
'platform': relay.platform,
'fingerprint': relay.fingerprint,
'published': relay.published,
'contact': relay.contact,
'isbaddirectory': 1 if relay.isbaddirectory else 0,
}
list_dict.append(relay_dict)
return list_dict |
def parse_lambda_tags_from_arn(arn):
"""Generate the list of lambda tags based on the data in the arn
Args:
arn (str): Lambda ARN.
ex: arn:aws:lambda:us-east-1:172597598159:function:my-lambda[:optional-version]
"""
# Cap the number of times to split
split_arn = arn.split(":")
# If ARN includes version / alias at the end, drop it
if len(split_arn) > 7:
split_arn = split_arn[:7]
_, _, _, region, account_id, _, function_name = split_arn
return [
"region:{}".format(region),
"account_id:{}".format(account_id),
# Include the aws_account tag to match the aws.lambda CloudWatch metrics
"aws_account:{}".format(account_id),
"functionname:{}".format(function_name),
] |
def is_prime(n: int) -> bool:
"""
:param n: integer
:return: True if the number is prime, False is not
"""
n = abs(n)
if n < 3 or n % 2 == 0:
return n == 2
else:
return not any(n % i == 0 for i in range(3, int(n**0.5 + 2), 2)) |
def parse_duration(s: str) -> int:
"""
>>> parse_duration('00:01')
1
>>> parse_duration('03:33')
213
>>> parse_duration('01:14:00')
4440
"""
parts = s.split(':')
seconds = int(parts[-1])
if len(parts) > 1:
seconds += int(parts[-2]) * 60
if len(parts) == 3:
seconds += int(parts[0]) * 3600
return seconds |
def decode(arr, input_type = "int"):
"""Takes an array of varint converted numbers, returns original number"""
if not arr:
raise ValueError("Could not decode varint, empty values passed")
decoded_val = 0
shift = 0
if input_type == "hex":
arr = [int(ele,16) for ele in arr]
for val in arr:
#remove msb
decoded_val += (val & 0x7f) << shift
shift = shift + 7 #next loop value will be shifted left by 7, encode shifts right by 7
return decoded_val |
def scoreClass(scoreValue=0):
"""
Returns class name based on the score performance.
"""
try:
if scoreValue < 45:
returnClass = "pl-poorscore"
elif scoreValue < 75:
returnClass = "pl-avgscore"
elif scoreValue > 74:
returnClass = "pl-goodscore"
else:
returnClass = ""
except Exception as ex:
returnClass = ""
return returnClass |
def day_suffix(d:int) -> str:
"""Returns day-suffix 'st', 'nd', 'rd', 'th' for a day of a month.
For example, the 21st of August -> returns 'st'.
Parameters
----------
d: :class:`int`
Integer that represents the day of the month.
Ranges from 1-31(max).
"""
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th') |
def convertAsterisk(text):
"""
- Double asterisk: bold
- Single asterisk: italic
"""
state = ["NONE", "ESC", "SINGLE", "POSTSINGLE", "DOUBLE"]
transitions = { "NONE": {"*": "SINGLE", "\\": "ESC", "default": "NONE"}, # From NONE state
"ESC": {"*:": "NONE", "\\": "NONE", "default": "NONE"}, # From ESC state
"SINGLE": {"*": "DOUBLE", "\\": "POSTSINGLE", "default": "POSTSINGLE"}, # From SINGLE state
"POSTSINGLE": {"*": "SINGLE", "\\": "NONE", "default": "NONE"}, # From POSTSINGLE state
"DOUBLE": {"default": "NONE"} # From DOUBLE state
}
outputText = []
state = "NONE"
boldFlag = False
italicFlag = False
for c in text:
# Transition to new state
if c in transitions[state]:
state = transitions[state][c]
else:
state = transitions[state]["default"]
# Perorm action based on state
if state == "NONE":
outputText.append(c)
if state == "ESC":
outputText.append(c)
if state == "POSTSINGLE":
if not italicFlag: outputText.append("<em>")
else: outputText.append("</em>")
outputText.append(c)
italicFlag = not italicFlag
if state == "DOUBLE":
if not boldFlag: outputText.append("<b>")
else: outputText.append("</b>")
boldFlag = not boldFlag
if state == "SINGLE": outputText.append("</em>")
return "".join(outputText) |
def fix(value):
"""Turn negative 32-bit numbers into positive numbers."""
return (value + 2 ** 32) if (value < 0) else value |
def strip_comments(script):
"""
initializing empty lists to get the comments,
and an empty string to store the comments
"""
comments = []
comment = ''
row = 0
# iterate over the script line by line
for line in script:
row += 1
# get single line comments
if line.find('#') != -1:
# get where the comment begins in the line
ind = line.find('#')
# isolate commment from the line
comment = (str(line))[ind:]
# append comment to the list of comments
comments.append(comment)
# replacing the comments in script with an empty string
def replaceCharacters(s, unwanted, input_char = ''):
# Iterate over the strings to be replaced
for elem in unwanted:
# Replace the string, does not do anything if `elem` not in `s`
s = s.replace(elem, input_char)
return s
# call the function
script_new = [replaceCharacters(x, comments) for x in script]
# remove all the empty strings in the script_new
raw_code = list(filter(None, script_new))
return raw_code, comments |
def get_insert_many_query(table_name):
"""Build a SQL query to insert a RDF triple into a PostgreSQL dataset"""
return f"INSERT INTO {table_name} (subject,predicate,object) VALUES %s ON CONFLICT (subject,predicate,object) DO NOTHING" |
def _data_verification_and_normalize(data: dict) -> dict:
""""Check if data key exists and if value is valid. If true, replace data for new format.
Keyword arguments:
`data: dict` - Data dictionary for verification and format change
"""
data = data.copy()
if data.get('data', False):
is_valid = data['data'].split(' - ')[-1].lower() == 'verdadeiro'
data['data'] = is_valid
return data |
def hashable(x):
"""Check if an object is hashable. Hashable objects will usually be
immutable though this is not guaranteed.
Parameters
----------
x: object
The item to check for hashability.
Returns
-------
bool: True if `x` is hashable (suggesting immutability), False otherwise.
"""
try:
_ = hash(x)
return True
except TypeError:
return False |
def pluralize(container_type):
""" Convert container_type to plural name
Simplistic logic that supports:
group, project, session, subject, acquisition, analysis, collection
"""
if container_type == 'analysis':
return 'analyses'
if not container_type.endswith('s'):
return container_type + 's'
return container_type |
def deep_merge(a, b, overwrite=False, path=None):
"""
Deep merges dict b in dict a
Taken from https://stackoverflow.com/a/7205107/1935553
:param a: dict to merged into
:param b: dict to merge
:param overwrite: If true values from a will be overwritten by values
from b that share the same key in the same level
:param path: The path - needed for error reporting
:returns: deep merged dict (a)
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
deep_merge(a[key], b[key], overwrite, path + [str(key)])
elif not overwrite or a[key] == b[key]:
pass # same leaf value
elif overwrite:
a[key] = b[key]
else:
raise Exception("Conflict at '{}'.".format(path + [str(key)]))
else:
a[key] = b[key]
return a |
def make_html_safe(s):
"""Rouge use html, has to make output html safe"""
return s.replace("<", "<").replace(">", ">") |
def weekday_name(weekday_num):
"""Return a string, name of weekday, given the number.
Monday is 0, Sunday is 6.
"""
names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
return names[weekday_num] |
def text_summary(text, length=40):
"""
Make a summary of `text`, at most `length` chars long.
The middle will be elided if needed.
"""
if len(text) <= length:
return text
else:
start = (length - 3) // 2
end = (length - 3 - start)
return text[:start] + "..." + text[-end:] |
def rf(x, n):
"""
Implements the rising factorial or Pochhammer symbol.
"""
ret = 1
if n < 0:
return rf(x - abs(n), abs(n)) ** (-1)
for k in range(0, n):
ret *= x + k
return ret |
def get_author_details(repo, branch: str, commit_branch_list):
"""Function which gets the statistics of how many authors have commited in branch based
on their username details. This implementation uses python dictionaries
"""
author_details = {}
for i in range(len(commit_branch_list)):
commit = commit_branch_list[i]
author_details[commit.author.name] = 0
for i in range(len(commit_branch_list)):
commit = commit_branch_list[i]
author_details[commit.author.name] += 1
print(author_details)
return None |
def bernoulli_param(N, T, I_T, F_N):
""" Bernoulli parameter definition (Maximum Likelihood)
Args:
N : Total number of people in the city
T : Number of people in the test sample
I_T : Number of infected people in the test sample
F_N : Total number of fatalities in the city
Returns:
p_T : Bernoulli probabilities
p_I : --|--
p_F : --|--
"""
p_T = T / N # Testing rate Bernoulli parameter
p_I = I_T / T # Infection rate Bernoulli parameter
p_F = F_N / N # Fatality Bernoulli parameter
return p_T, p_I, p_F |
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower() |
def trace2index_flat(shape,ntrace):
"""
Calculate the index of a trace assuming a flat structure
"""
# algorithm is to take quotient/remainers of sizes in reverse
q = ntrace # seed quotient with remained
index = []
for s in shape[:0:-1]: # loop from last size to 2nd size
q,r = divmod(q,s)
index.insert(0,r)
index.insert(0,q)
return tuple(index) |
def get_average_tuple(list_of_tuples):
"""Return the average tuple from the list_of_tuples
"""
sum_0 = 0
sum_1 = 0
sum_2 = 0
for i in range(len(list_of_tuples)):
sum_0 += list_of_tuples[i][0]
sum_1 += list_of_tuples[i][1]
sum_2 += list_of_tuples[i][2]
avg_0 = sum_0 / 9
avg_1 = sum_1 / 9
avg_2 = sum_2 / 9
return (round(avg_0), round(avg_1), round(avg_2)) |
def pos_t(t, v):
"""Calculate pos with velocity v, at time t"""
# x(t) = (-1/2)t^2 + (1/2 + vx)t
return -1/2 * t**2 + (1/2 + v) * t |
def create_multidimensional_list(shape, element=None):
"""Creating a multidimensional list
The recusion using list comprehension was obtained from create_empty_array_of_shape()
at stackoverflow. Erko added element and the possibility to let element be a list.
For example shape = [2, 3] returns [[None, None, None], [None, None, None]]
Remarks:
. The advantage of using lists instead of numpy arrays is that the elements in the
list can be of any type.
. Uses list() to take copy of element in case element is a list, to ensure that all
elements in the multidimensional list will be unique. Without using list() all
elements in the multidimensional list would relate and apply to a single element,
due to how Python treats list instances.
"""
# Implementation scheme A is much faster than the scheme B, therefore scheme B is
# only kept in comment for comparison.
# Scheme A recurses and then creates lowest dimensions with the elements first:
if len(shape)>0:
return [create_multidimensional_list(shape[1:], element) for i in range(shape[0])]
elif isinstance(element, list):
return list(element) # take copy to ensure unique element
else:
return element
# Scheme B recurses and then creates highest dimensions first:
#if len(shape)>1:
# aList = shape[0] * [[]]
# for i in range(shape[0]):
# aList[i] = create_multidimensional_list(shape[1:], element)
#else:
# if isinstance(element, list):
# aList = []
# for i in range(shape[0]):
# aList.append(list(element)) # take copy to ensure unique element
# else:
# aList = shape[0] * [element]
#return aList |
def tau(zeta_2, zeta_1):
"""
transmission coefficient
"""
return 2*zeta_2/(zeta_2+zeta_1) |
def compute_most_frequent(dict1, dict2):
"""
The keys of dict1 and dict2 are all lowercase,
you will NOT need to worry about case sensitivity.
Args:
dict1: frequency dictionary for one text
dict2: frequency dictionary for another text
Returns:
list of the most frequent word(s) in the input dictionaries
The most frequent word:
* is based on the combined word frequencies across both dictionaries.
If a word occurs in both dictionaries, consider the sum the
freqencies as the combined word frequency.
* need not be in both dictionaries, i.e it can be exclusively in
dict1, dict2, or shared by dict1 and dict2.
If multiple words are tied (i.e. share the same highest frequency),
return an alphabetically ordered list of all these words.
"""
list11 = []
list12 = []
stringdict1 = ""
list21 = []
list22 = []
stringdict2 = ""
for i in dict1.keys():
list11.append(i)
for j in dict1.values():
list12.append(j)
for b in range (len(list11)):
stringdict1 += " " + (list11[b] +" ")* list12[b]
for k in dict2.keys():
list21.append(k)
for y in dict2.values():
list22.append(y)
for d in range(len(list21)):
stringdict2 += " " + (list21[d] +" ")* list22[d]
total_words = stringdict1 + stringdict2
#return gttt
thelist = total_words.split()
my_dict= {}
yooolist = []
themost_freq_words = []
for r in thelist:
if r in my_dict.keys():
my_dict[r] += 1
else:
my_dict[r] = 1
for c in my_dict.values():
yooolist.append(c)
themax_freq = max(yooolist)
for f in my_dict.keys():
if my_dict[f] == themax_freq:
themost_freq_words.append(f)
themost_freq_words.sort()
return themost_freq_words |
def argmax(xs: list) -> int:
"""Returns the index of the largest value"""
return max(range(len(xs)), key=lambda i: xs[i]) |
def normalizeRotationAngle(value):
"""
Normalizes an angle.
* Value must be a :ref:`type-int-float`.
* Value must be between -360 and 360.
* If the value is negative, it is normalized by adding it to 360
* Returned value is a ``float`` between 0 and 360.
"""
if not isinstance(value, (int, float)):
raise TypeError("Angle must be instances of "
":ref:`type-int-float`, not %s."
% type(value).__name__)
if abs(value) > 360:
raise ValueError("Angle must be between -360 and 360.")
if value < 0:
value = value + 360
return float(value) |
def calculate_rescue_time_pulse(very_distracting, distracting, neutral, productive, very_productive):
"""
Per RescueTime API
:param very_distracting: integer - number of seconds spent
:param distracting: integer - number of seconds spent
:param neutral: integer - number of seconds spent
:param productive: integer - number of seconds spent
:param very_productive: integer - number of seconds spent
Per documentation, this is how the productive score is calculated.
http://help.rescuetime.com/kb/rescuetime-website/how-is-my-productivity-pulse-calculated
"""
# which is zero, lol
very_distracting_score = very_distracting * 0
distracting_score = distracting * 1
neutral_score = neutral * 2
productive_score = productive * 3
very_productive_score = very_productive * 4
total_score = very_distracting_score + distracting_score + neutral_score + productive_score + very_productive_score
total_time_spent = very_distracting + distracting + neutral + productive + very_productive
# final multiplier to even things out
total_time_spent_scaled = total_time_spent * 4
try:
rt_score = total_score / total_time_spent_scaled
except ZeroDivisionError:
rt_score = 0
return rt_score |
def sort(pinyin_d):
"""
:rtype: list
"""
return sorted(pinyin_d.items(), key=lambda x: x[0]) |
def patch_strptime_cache_size(max_size=100):
"""Patch for strptime regex cache max size."""
try: # pragma: nocover
import _strptime
if not hasattr(_strptime, "_CACHE_MAX_SIZE"):
return False
if not hasattr(_strptime, "_cache_lock"):
return False
except (ImportError, AttributeError): # pragma: nocover
return False
lock = _strptime._cache_lock
lock.acquire()
try:
_strptime._CACHE_MAX_SIZE = max_size
finally:
lock.release()
return True |
def encode_headers(headers):
"""Encodes HTTP headers.
Args:
headers: Dictionary of HTTP headers.
Returns:
String containing encoded HTTP headers.
"""
return '\n'.join(["%s: %s"%(k,headers[k]) for k in sorted(headers.keys())]) |
def make_matrix(num_rows, num_cols, entry_fn):
"""returns a num_rows x num_cols matrix
whose (i,j)th entry is entry_fn(i, j)"""
return [[entry_fn(i, j) # given i, create a list
for j in range(num_cols)] # [entry_fn(i, 0), ... ]
for i in range(num_rows)] |
def titlecase(string: str):
"""Safely recast a string to title case"""
try:
return string.title()
except AttributeError:
return string |
def left_color_min_max(objects):
"""Returns min or max number on the balls depending on the leftmost color."""
min_color = ["green", "blue", "purple"]
max_color = ["yellow", "red", "orange"]
leftmost_color = min(objects, key=lambda o: o["position"][0])["color"]
if leftmost_color in min_color:
min_or_max = min
elif leftmost_color in max_color:
min_or_max = max
else:
raise ValueError(f"Color '{leftmost_color}' is not supported.")
return min_or_max(o["number"] for o in objects) - 1 |
def process_passport(passport):
"""Turn a passport list into a dictionary."""
pass_string = ' '.join([n.strip('\n') for n in passport])
pass_list = pass_string.split(' ')
pass_dict = {}
for n in pass_list:
key, entry = n.split(':')
pass_dict[key] = entry
return pass_dict |
def part1(entries: list) -> int:
"""part1 solver"""
nice_strings = 0
for string in entries:
vowels_count = 0
doubles = False
forbidden = False
for index, char in enumerate(string):
if char in 'aeiou':
vowels_count += 1
if not doubles and index < len(string)-1 and char == string[index+1]:
doubles = True
for forbidden_substring in ('ab', 'cd', 'pq', 'xy'):
if forbidden:
break
if forbidden_substring in string:
forbidden = True
if vowels_count >= 3 and doubles and not forbidden:
nice_strings += 1
return nice_strings |
def create_correction_tree(correction_value, feature_to_split_on):
"""Creates new tree with the given correction amount
Parameters
----------
correction_value : float
leaf values for new tree
feature_to_split_on : string
feature name for the new tree
"""
return {
"children": [
{
"leaf": correction_value,
"nodeid": 1
},
{
"leaf": correction_value,
"nodeid": 2
}
],
"depth": 0,
"missing": 1,
"no": 2,
"nodeid": 0,
"split": feature_to_split_on,
"split_condition": 1,
"yes": 1
} |
def find_irreducible_prefix(brackets):
"""Find minimal prefix of string which is balanced (equal Ls and Rs).
Args:
brackets: A string containing an equal number of (and only) 'L's and 'R's.
Returns:
A two-element tuple.
The first element is the minimal "irreducible prefix" of
`brackets`, which is the shortest non-empty prefix of `brackets` which
contains an equal number of 'L's and 'R's.
The second element is the rest of the string.
Raises:
ValueError: No irreducible prefix could be found, or `brackets` was empty.
"""
depth = 0
for i, bracket in enumerate(brackets):
depth += 1 if bracket == 'L' else -1
if i > 0 and depth == 0:
return brackets[:i + 1], brackets[i + 1:]
raise ValueError('unbalanced or empty: %s' % ''.join(brackets)) |
def singleton(string):
"""
Return a list with a single member.
Useful when collecting result of parsers chained by `seq`.
"""
# interesting alternative names: capture, lift
return [string] |
def convert_training_examples_for_spacy(all_training_examples):
"""
convert to spacy training format
"""
TRAIN_DATA = []
for exs in all_training_examples:
entities = [(ex[0], ex[1], ex[3]) for ex in exs[0]]
TRAIN_DATA.append((exs[1], {"entities": entities}))
return TRAIN_DATA |
def exchange_money(budget, exchange_rate):
"""
Estimated value of the foreign currency you can receive
:param budget: float - amount of money you are planning to exchange.
:param exchange_rate: float - unit value of the foreign currency.
:return: float - estimated value of the foreign currency you can receive
This function should return the estimated value of the foreign currency you
can receive based on your budget and the current exchange rate.
"""
return float(budget / exchange_rate) |
def CleanDate(date):
"""remove [] and trailing period from dates"""
if not date: return ''
date = date.replace('[', '').replace(']','').replace('\n', ' ')
if date[-1] == '.': date = date[:-1]
return date |
def pkcs7_pad(message, block_size):
"""Pads the given message with the PKCS 7 padding format for the given block size."""
# If the length of the given message is already equal to the block size, there is no need to pad
if len(message) == block_size:
return message
# Otherwise compute the padding byte and return the padded message
ch = block_size - len(message) % block_size
return message + bytes([ch] * ch) |
def sign_of_sequence_fast(sequence: list) -> int:
"""
Sign of sequence equals to (-1)**(sequence inversions).
Sequence must contain elements from 0 to len(sequence).
"""
sign = 1
elements_to_check = [True for _ in range(len(sequence))]
for i in range(len(sequence)):
if elements_to_check[i]:
current_element = sequence[i]
while current_element != i:
elements_to_check[current_element] = False
sign = -sign
current_element = sequence[current_element]
return sign |
def write_to_file(sentences, filename):
"""Write sentences to filename and return None."""
with open(filename + '_sent_tok', 'w', encoding='utf8') as toF:
for sent in sentences:
toF.write('{}\n'.format(sent))
return None |
def unescape(s):
"""The inverse of cgi.escape()."""
s = s.replace('"', '"').replace('>', '>').replace('<', '<')
return s.replace('&', '&') |
def digit(n, k, base):
"""
>>> digit(1234, 0, 10)
4
>>> digit(1234, 1, 10)
3
>>> digit(1234, 2, 10)
2
>>> digit(1234, 3, 10)
1
"""
return n // base**k % base |
def distinct(l):
"""
Given an iterable will return a list of all distinct values.
param:
l:an iterable
return:
the list
"""
return list(set(l)) |
def replace_at_idx(tup, i, val):
"""Replaces a value at index *i* of a tuple *tup* with value *val*
:param tup: tuple to be updated
:param i: index at which the value should be replaced
:type i: integer
:param val: new value at index i
:type val: value
:return: new tuple with replaced value
"""
tup_list = list(tup)
tup_list[i] = val
return tuple(tup_list) |
def mask(val: str, preserve_length: int = 0) -> str:
"""Mask the val.only unmask the length specified."""
if not val:
return val
replace_char = 'X'
if preserve_length is None or preserve_length == 0: # mask fully
return replace_char * len(val)
return val[-preserve_length:].rjust(len(val), replace_char) |
def recursive_binary_search(items, target):
"""O(log n)."""
mid = len(items) // 2
if len(items) == 0:
return None
elif items[mid] == target:
return mid
elif items[mid] < target:
res = recursive_binary_search(items[mid + 1:], target)
return mid + res + 1 if res is not None else None
else:
return recursive_binary_search(items[:mid], target) |
def add_atom(d, key, atom):
"""
d is a dict, add an atom to list d[key]
"""
if key not in d:
d[key] = (atom, )
else:
d[key] = tuple(d[key] + (atom, ))
return d |
def has_symbols(password):
"""Return True if password has at least one symbol."""
return any(not char.isalnum() for char in password) |
def is_none(value):
"""Convert a string indicating 'None' or missing value to None."""
if value == 'NA' or value == 'None':
return None
else:
return value |
def parse_in(line):
"""Parse an incoming IRC message."""
prefix = ''
trailing = []
if not line:
print("Bad IRC message: ", line)
return None
if line[0] == ':':
prefix, line = line[1:].split(' ', 1)
if line.find(' :') != -1:
line, trailing = line.split(' :', 1)
args = line.split()
args.append(trailing)
else:
args = line.split()
command = args.pop(0)
return prefix, command, args |
def kw2re(x):
"""Convert a list of keywords to a regex."""
return r'(%s)' % '|'.join(sorted(list(set(x)))) |
def range_check(low, high):
"""\
Verifies that that given range has a low lower than the high.
If the condition is not met, a ValueError is raised.
Otherwise, the values are returned, but as floats.
"""
low, high = float(low), float(high)
if low >= high:
raise ValueError('low >= high')
else:
return low, high |
def is_single_index(slc) -> bool:
"""Is the slice equivalent to a single index?
"""
if slc.step is None:
step = 1
else:
step = slc.step
return slc.start is not None and \
slc.stop is not None and \
slc.start + step >= slc.stop |
def LeastGraded(adjList):
"""Search for the least graded node of a graph"""
grade = len(adjList)
node = 0
for current in range(len(adjList)):
toCompare = len(adjList[current])
if toCompare < grade:
node = current
grade = toCompare
return grade, node |
def int_to_le_bytes(x: int, length: int, signed: bool):
"""Converts an integer to a little endian byte array.
"""
return [i for i in x.to_bytes(length, 'little', signed=signed)]
return [0xff & x >> 8 * i for i in range(length)] |
def chunker(seq, size):
"""Split a list into equal segments (maybe except the last one)."""
return list(seq[pos : pos + size] for pos in range(0, len(seq), size)) |
def slots_to_range(week_hours, end_hour, allowed_availabilities):
"""Convert per-hour availability flags into ranges format."""
week_ranges = []
for day_hours in week_hours:
day_ranges = []
start = None
for i, value in enumerate(day_hours):
# Start of new range:
if start is None and value != 0:
start = i
continue
# End of range:
# (A range will end if either the current slot is unavailable
# (value 0) or if the current slot is the last one.)
if start is not None:
if value not in allowed_availabilities: # Unavailable
day_ranges.append([start, i])
start = None
elif i == end_hour - 1: # Last slot
day_ranges.append([start, end_hour])
else:
continue
week_ranges.append(day_ranges)
return week_ranges |
def equalsignore(s1, s2):
"""Equals 2 string parameters is true or false without word upper or lower
:param s1:
:param s2:
:return:
"""
return s2 is None if s1 is None else s1.lower() == s2.lower() |
def simple_to_detailed(templates):
"""
Convert a simple taxes object into a more detailed data structure.
Example input:
{
"France VAT 20%": {
"account_name": "VAT 20%",
"tax_rate": 20,
"default": 1
},
"France VAT 10%": {
"account_name": "VAT 10%",
"tax_rate": 10
}
}
"""
return {
'chart_of_accounts': {
'*': {
'item_tax_templates': [{
'title': title,
'taxes': [{
'tax_type': {
'account_name': data.get('account_name'),
'tax_rate': data.get('tax_rate')
}
}]
} for title, data in templates.items()],
'*': [{
'title': title,
'is_default': data.get('default', 0),
'taxes': [{
'account_head': {
'account_name': data.get('account_name'),
'tax_rate': data.get('tax_rate')
}
}]
} for title, data in templates.items()]
}
}
} |
def sort_hyps(hyps):
"""Return a list of Hypothesis objects, sorted by descending average log probability"""
return sorted(hyps, key=lambda h: h.avg_log_prob, reverse=True) |
def format_icd9_code(code):
"""Nicely format an ICD9 code into the form used in the bayes_hcup_ccs table."""
return "'{:5}'".format(code.replace('.', '')) |
def polygon_to_points( poly ):
"""
Plotting helper, which rearranges polygon vertices into lists
of X and Y coordinates. The first point is duplicated at the end
of each list, to make a closed path.
:Parameters:
poly: tuple of ((x1,y1),(x2,y2),...)
The coordinates of the vertices of the polygon.
:Returns:
(xlist, ylist): list of 2 tuples
((x1, x2, ..., x1), (y1, y2, ..., y1))
"""
xlist = []
ylist = []
for vertex in poly:
xlist.append(vertex[0])
ylist.append(vertex[1])
xlist.append(xlist[0])
ylist.append(ylist[0])
return (xlist, ylist) |
def value_error_to_dict(obj):
"""Serializer function for ValueError."""
return {"__class__": "ValueError",
"args": [str(arg) for arg in obj.args]} |
def _captalize(arg1):
"""Returns the string with an initial capital"""
return str(arg1).title() |
def remove_duplicates(l):
"""
Remove any duplicates from the original list.
Return a list without duplicates.
"""
new_l = l[:]
tmp_l = new_l[:]
for e in l:
tmp_l.remove(e)
if e in tmp_l:
new_l.remove(e)
return new_l |
def action2label(action):
""" Transform action into label
"""
if action[0] == 'Shift':
label = action[0]
elif action[0] == 'Reduce':
label = '-'.join(list(action))
else:
raise ValueError("Unrecognized parsing action: {}".format(action))
return label |
def to_bytes(obj):
"""Convert object to bytes"""
if isinstance(obj, bytes):
return obj
if isinstance(obj, str):
return obj.encode('utf-8')
return str(obj).encode('utf-8') |
def createList(n):
"""
This function creates a list of given indices
"""
m = []
for i in range(n):
num = int(input("Enter num: "))
m.append(num)
return m |
def fn_url_p(fn):
"""check if fn is a url"""
url_sw = ['http://', 'https://', 'ftp://', 'ftps://']
for u in url_sw:
try:
if fn.startswith(u):
return(True)
except:
return(False)
return(False) |
def get_previous_item(lst, item):
""" Given an item, find its previous item in the list
If the item appears more than once in the list, return the first index
Args:
lst: the list to search
item: the item we want to find the previous item for
Returns: The previous item or None if not found.
"""
try:
i = lst.index(item)
if i > 0:
return lst[i - 1]
except (IndexError, ValueError):
pass
return None |
def make_divisible(v, divisor, min_val=None):
"""
ref: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_val is None:
min_val = divisor
new_v = max(min_val, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v |
def OSM_dict_from_other_tags(other_tags):
"""
Creates a dict from the other_tags string of an OSM road segment
Arguments:
*other_tags* (string) : string containing all the other_tags data from the OSM road segment
Returns:
*lanes* (int) : integer containing the number of lines of the road segment
"""
dct = {}
if other_tags is not None:
try:
lst = other_tags.split("\",\"")
for i in lst:
j = i.split('=>')
dct['{}'.format(j[0].replace("\"",""))] =j[1].replace("\"","")
except:
print("Dict construction did not work for: {}".format(other_tags))
return dct |
def val_to_mrc(code, val):
"""
Convert one single `val` to MRC.
This function may be used for control fields in MARC records.
Args:,
code (str): Code of the field.
val (str): Value of the field.
Returns:
str: Correctly padded MRC line with field.
"""
code = str(code)
if len(code) < 3:
code += (3 - len(code)) * " "
return "%s L %s" % (code, val) |
def containsDuplicateB(nums):
"""
:type nums: List[int]
:rtype: bool
"""
if not nums:
return False
d={}
for i in nums:
if i in d:
return True
else:
d[i] = True
return False |
def convert_time_ranges(time_ranges, times):
""" Convert a parsed list of month, season ranges consistent ranges. """
ranges = ['%s - %s' % r for r in time_ranges]
return ranges |
def pe10(n=2000000):
"""
>>> pe10()
142913828922
"""
s = [True] * (n + 1)
s[0], s[1] = False, False
sq = int(n**0.5)
for i in range(2, sq + 1):
if s[i]:
m = n // i - i
s[i * i : n + 1 : i] = [False] * (m + 1)
return(sum(i for i in range(n + 1) if s[i])) |
def ct_lsb_prop_u8(val):
"""Propagate LSB to all 8 bits of the returned byte. Constant time."""
val &= 0x01
val |= val << 1
val |= val << 2
val |= val << 4
return val |
def bar1s(ep,ed):
"""
Compute element force in spring element (spring1e).
Parameters:
ep = k spring stiffness or analog quantity
ed = [u1 u2] element displacements
u1, u2: nodal displacements
Returns:
es element force [N]
"""
k = ep
return k*(ed[1]-ed[0]); |
def f2c(fahrenheit):
"""
Covert Fahrenheit to Celsius
:param fahrenheit: [float] Degrees Fahrenheit
:return: [float] Degrees Celsius
"""
return 5 / 9 * (fahrenheit - 32) |
def to_ascii(data):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
A better solution would be to use transliteration based on a precomputed
unidecode map to be used by translate as explained here:
http://stackoverflow.com/questions/2854230/
"""
import unicodedata
if isinstance(data, bytes):
data = data.decode()
nkfd_form = unicodedata.normalize('NFKD', data)
only_ascii = nkfd_form.encode('ASCII', 'ignore')
# Return a string
return only_ascii.decode('ascii') |
def filterTags(attrs):
""" Convert some ShapeVIS attributes to OSM. """
result = {}
if 'HAALPLTSMN' in attrs:
result['name'] = attrs['HAALPLTSMN']
result['bus'] = 'yes'
# Default
result['public_transport'] = 'stop_position'
if 'station' in result['name'].lower() and\
'centrum' in result['name'].lower():
result['public_transport'] = 'station'
return result |
def aggregate(method, value, self):
""" Aggregate record-style ``value`` for a method decorated with ``@one``. """
spec = getattr(method, '_returns', None)
if spec:
# value is a list of instances, concatenate them
model, _, _ = spec
if model == 'self':
return sum(value, self.browse())
elif model:
return sum(value, self.env[model])
return value |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.