content stringlengths 42 6.51k |
|---|
def convert_from_alphabet(a):
"""Encode a character
:param a: one character
:return: the encoded value
"""
if a == 9:
return 1
if a == 10:
return 127 - 30 # LF
elif 32 <= a <= 126:
return a - 30
else:
return 0 |
def duplicate(N, Elem):
"""
N = integer() >= 0
Elem = T
List = [T]
T = term()
Returns a list which contains N copies of the term Elem. For example:
> lists:duplicate(5, xx).
[xx,xx,xx,xx,xx]
Note: Function taken from Erlang -
http://erldocs.com/17.3/stdlib/lists.html#duplicate
"""
if hasattr(Elem, "copy"):
return [Elem.copy() for i in range(N)]
else:
return [Elem for i in range(N)] |
def split_query_into_tokens(query):
"""
Splits query string into tokens for parsing by 'tokenize_query'.
Returns list of strigs
Rules:
Split on whitespace
Unless
- inside enclosing quotes -> 'user:"foo bar"'
- end of last word is a ':' -> 'user: foo'
Example:
>>> split_query_into_tokens('user:foo user: bar user"foo bar' foo bar) =>
['user:foo', 'user: bar', 'user"foo bar"', 'foo', 'bar']
"""
tokens = []
token = ""
quote_enclosed = False
quote_type = None
end_of_prev_word = None
for idx, char in enumerate(query):
next_char = query[idx + 1] if idx < len(query) - 1 else None
token += char
if next_char and not char.isspace() and next_char.isspace():
end_of_prev_word = char
if char.isspace() and not quote_enclosed and end_of_prev_word != ":":
if not token.isspace():
tokens.append(token.strip(" "))
token = ""
if char in ("'", '"'):
if not quote_enclosed or quote_type == char:
quote_enclosed = not quote_enclosed
if quote_enclosed:
quote_type = char
if not token.isspace():
tokens.append(token.strip(" "))
return tokens |
def is_sudoku_complete(output_grid):
"""
Checks if every square of the output grid is non-zero
:param output_grid: Grid of the found squares, len(9) list of len(9) lists of ints
:return: True if complete, False if not
"""
if any([any([square == 0 for square in row]) for row in output_grid]):
return False
else:
return True |
def pack_parameter_id(domain_id: int, unique_id: int, linear_index: int) -> bytearray:
"""Packs the Parameter ID (bytearray with 4 bytes) which is part of the service 20 packets.
The first byte of the parameter ID is the domain ID, the second byte is a unique ID and the
last two bytes are a linear index if a parameter is not loaded from index 0.
:param domain_id: One byte domain ID
:param unique_id: One byte unique ID
:param linear_index: Two byte linear index.
"""
parameter_id = bytearray(4)
parameter_id[0] = domain_id
parameter_id[1] = unique_id
parameter_id[2] = linear_index >> 8 & 0xFF
parameter_id[3] = linear_index & 0xFF
return parameter_id |
def _urldecode(input):
"""URL-decode metadata"""
output = bytearray()
nibbles = 0
value = 0
# Each input character
for char in input:
if char == '%':
# Begin a percent-encoded hex pair
nibbles = 2
value = 0
elif nibbles > 0:
# Parse the percent-encoded hex digits
value *= 16
if char >= 'a' and char <= 'f':
value += ord(char) + 10 - ord('a')
elif char >= 'A' and char <= 'F':
value += ord(char) + 10 - ord('A')
elif char >= '0' and char <= '9':
value += ord(char) - ord('0')
nibbles -= 1
if nibbles == 0:
output.append(value)
elif char == '+':
# Treat plus as space (application/x-www-form-urlencoded)
output.append(ord(' '))
else:
# Preserve character
output.append(ord(char))
return output.decode('utf-8') |
def lines_from_geometry(geo):
"""Convert an iterable of geometry to lines. Suitable for passing
directly to `matplotlib.collections.LineCollection`.
:param geo: An iterable of geometry items. If cannot be coverted to a
line, then ignored.
:return: A list of coordinates.
"""
lines = []
for x in geo:
try:
lines.append( list(x.coords) )
except:
pass
return lines |
def get_hashtags(tokens):
"""Extract hashtags from a set of tokens"""
hashtags = [x for x in tokens if x.startswith("#")]
return hashtags |
def hex_str_to_bytes_str(hex_str):
"""Converts the hex string to bytes string.
:type hex_str: str
:param hex_str: The hex tring representing trace_id or span_id.
:rtype: str
:returns: string representing byte array
"""
return bytes(bytearray.fromhex(hex_str)) |
def map_atoms(indices, nres_atoms=1):
""" Map the indices of a sub-system to indices of the full system
:param indices: indices of atoms to map with respect to full system
:param nres_atoms: number of atoms per residue
:type indices: list
:type nres_atoms: int
:return: dictionary of mapped indices
"""
index_map = {}
nres = len(indices) // nres_atoms
for i in range(nres):
index_map[i] = indices[i*nres_atoms:(i + 1)*nres_atoms]
return index_map |
def copy_state_dict(state_dict_1, state_dict_2):
"""Manual copy of state dict.
Why ? Because when copying a state dict to another with load_state_dict, the values of weight are copied only
when keys are the same in both state_dict, even if strict=False.
"""
state1_keys = list(state_dict_1.keys())
state2_keys = list(state_dict_2.keys())
for x in range(len(state1_keys)):
state_dict_2[state2_keys[x]] = state_dict_1[state1_keys[x]]
return state_dict_2 |
def autodocument_from_superclasses(cls):
"""Fill in missing documentation on overridden methods.
Can be used as a class decorator.
"""
undocumented = []
for name, attribute in cls.__dict__.items():
# is it a method on the class that is locally undocumented?
if hasattr(attribute, '__call__') and not attribute.__doc__:
# don't muck with builtins
if not hasattr(attribute, '__module__'):
continue
# find docs on a superclass
for supercls in cls.__bases__:
try:
superdoc = getattr(supercls, name).__doc__
if superdoc:
setattr(attribute, '__doc__', superdoc)
break
except (AttributeError, TypeError):
pass
return cls |
def is_url(url: str) -> bool:
"""Return True if a string is a URL.
>>> is_url("")
False
>>> is_url(" ")
False
>>> is_url("http://example.com")
True
"""
return url.startswith("http") |
def format_limit(lim):
"""Format the 'LIMIT' keyword line for SPARQL queries.
"""
if lim is None:
return ""
return "\nLIMIT %d" % lim |
def repeatName(name, times):
"""Repeat a name a number of times."""
name_repeated = name * times
return name_repeated |
def get_excluded_params(schema):
"""
Get all params excluded in this schema,
if "only" is provided in schema instance,
consider all not included params as excluded.
:param schema: instance or cls schema
:return: set of excluded params
"""
if isinstance(schema, type):
return set()
exclude = set()
only = set()
if getattr(schema, "exclude", ()):
exclude = set(getattr(schema, "exclude", ()))
if getattr(schema, "only", ()):
only = set(getattr(schema, "only", ()))
if only:
for field in schema._declared_fields:
if field not in only:
exclude.add(str(field))
return exclude |
def steps_f12(j=None, Xs=None):
"""Stepsize for f update given current state of Xs"""
# Lipschitz const is always 2
L = 2
slack = 0.1# 1.
return slack / L |
def insertion(l):
""""Insertion Sort. takes input as a list by reference."""
m = len(l)
for i in range(1, m):
k = l[i]
j = i - 1
while j >= 0 and k < l[j]:
l[j + 1] = l[j]
j -= 1
l[j + 1] = k
return l |
def supportInterval(thing):
"""Lower and upper bounds on this value, if known."""
if hasattr(thing, 'supportInterval'):
return thing.supportInterval()
elif isinstance(thing, (int, float)):
return thing, thing
else:
return None, None |
def actionColor(status):
"""
Get a action color based on the workflow status.
"""
if status == 'success':
return 'good'
elif status == 'failure':
return 'danger'
return 'warning' |
def estimate_sparse_size(num_rows, topK):
"""
:param num_rows: rows or colum of square matrix
:param topK: number of elements for each row
:return: size in Byte
"""
num_cells = num_rows*topK
sparse_size = 4*num_cells*2 + 8*num_cells
return sparse_size |
def radix_sort(arr, radix=10):
""" Sorts an array of integers inplace using radix sort method.
A type of bucket sort method which sorts keys by their binary representation.
Algorithms: sequencially select the least significant digit (for radix=10)
and collect all keys with equal digit in the same bucket. Then move to the
next digit. The algorithm runs for the number of digits the largest key has.
Args:
arr: list of integers. Note! these must be integers withing a well
defined interval.
Returns:
list, the sorted array
"""
mask = 1
max_length = False
while not max_length:
max_length = True
buckets = [[] for __ in range(radix)]
# Place keys in buckets corresponding to the digit mask.
for key in arr:
tmp = key/mask
buckets[tmp % radix].append(key)
if max_length is True and tmp > 0:
max_length = False
# Replace the input array with the contents of the buckets.
a = 0
for b in range(radix):
bucket = buckets[b]
for i in bucket:
arr[a] = i
a += 1
mask *= radix
return arr |
def get_name_spaces(words):
"""Check number of spaces for a given set of words.
Args:
words (list): A list of words
Returns:
dict: The data and summary results.
"""
results = [{'word': word, 'spaces': len(word.split(r' '))}
for word in words]
return {
'data': results,
'summary': None
} |
def make_board(N):
"""
Utility function that returns a new N x N empty board (empty spaces represented by '*')
Arg N: integer - board dimensions - must be greater than or equal to 1
"""
assert N >= 1, "Invalid board dimension";
assert type(N) == int, "N must be an integer";
return [["*" for x in range(N)] for x in range(N)]; |
def render_hunspell_word_error(
data,
fields=["filename", "word", "line_number", "word_line_index"],
sep=":",
):
"""Renders a mispelled word data dictionary.
This function allows a convenient way to render each mispelled word data
dictionary as a string, that could be useful to print in the context of
spell checkers command line interfaces.
Args:
data (dict): Mispelled word data, as it is yielded by the method
:py:meth:`hunspellcheck.HunspellChecker.check`.
fields (list): List of fields to include in the response.
sep (str): Separator string between each field value.
Returns:
str: Mispelled word data as a string.
"""
values = []
for field in fields:
value = data.get(field)
if value is not None:
values.append(str(value))
return (sep).join(values) |
def f_path_rename(text):
"""Function to rename path columns - aux function"""
list_columns = []
for properties_name in text:
properties_name = properties_name.lower()
properties_name = properties_name.replace(' ', '_')
properties_name = 'path_' + properties_name
## print(properties_name)
list_columns.append(properties_name)
return(list_columns) |
def _slice_required_len(slice_obj):
"""
Calculate how many items must be in the collection to satisfy this slice
returns `None` for slices may vary based on the length of the underlying collection
such as `lst[-1]` or `lst[::]`
"""
if slice_obj.step and slice_obj.step != 1:
return None
# (None, None, *) requires the entire list
if slice_obj.start is None and slice_obj.stop is None:
return None
# Negative indexes are hard without knowing the collection length
if slice_obj.start and slice_obj.start < 0:
return None
if slice_obj.stop and slice_obj.stop < 0:
return None
if slice_obj.stop:
if slice_obj.start and slice_obj.start > slice_obj.stop:
return 0
return slice_obj.stop
return slice_obj.start + 1 |
def getRelevantInfoDict (dataDict):
"""Returns a dictionary of the relevant/useful information from JSON object string returned from API call given in the form of a dictionary."""
return {
'locationName' : dataDict['name'],
'country' : dataDict['sys']['country'],
'temp' : dataDict['main']['temp'],
'condition' : dataDict['weather'][0]['main'],
'windSpeed' : dataDict['wind']['speed'],
'percentCloud' : dataDict['clouds']['all'],
} |
def count(context, tag, needle):
"""
*musicpd.org, music database section:*
``count {TAG} {NEEDLE}``
Counts the number of songs and their total playtime in the db
matching ``TAG`` exactly.
"""
return [('songs', 0), ('playtime', 0)] |
def is_inverse(a, b) -> bool:
"""Checks if two provided directions are the opposites of each other.
"""
if (a == 2 and b == 3) or (a == 3 and b == 2):
return True
if (a == 0 and b == 1) or (a == 1 and b == 0):
return True
return False |
def remove_empty(dictionary):
"""Removes empty entries from a dictionary."""
for key in list(dictionary.keys()):
if dictionary.get(key) is None:
del dictionary[key]
return dictionary |
def distinct_words(corpus):
""" Determine a list of distinct words for the corpus.
Params:
corpus (list of list of strings): corpus of documents
Return:
corpus_words (list of strings): list of distinct words across the corpus, sorted (using python 'sorted' function)
num_corpus_words (integer): number of distinct words across the corpus
"""
corpus_words = []
num_corpus_words = -1
# ------------------
# Write your implementation here.
corpus_words = [y for x in corpus for y in x]
corpus_words = sorted(list(set(corpus_words)))
num_corpus_words = len(corpus_words)
# ------------------
return corpus_words, num_corpus_words |
def orient(mag_azimuth, field_dip, or_con):
"""
uses specified orientation convention to convert user supplied orientations
to laboratory azimuth and plunge
Parameters:
________________
mag_azimuth: float
orientation of the field orientation arrow with respect to north
field_dip : float
dip (or hade) or field arrow.
if hade, with respect to vertical down
if inclination, with respect to horizontal (positive down)
or_con : int
orientation convention : int
Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below.
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
Returns:
___________
azimuth and dip of lab arrow
"""
or_con = str(or_con)
if mag_azimuth == -999:
return "", ""
if or_con == "1": # lab_mag_az=mag_az; sample_dip = -dip
return mag_azimuth, -field_dip
if or_con == "2":
return mag_azimuth - 90., -field_dip
if or_con == "3": # lab_mag_az=mag_az; sample_dip = 90.-dip
return mag_azimuth, 90. - field_dip
if or_con == "4": # lab_mag_az=mag_az; sample_dip = dip
return mag_azimuth, field_dip
if or_con == "5": # lab_mag_az=mag_az; sample_dip = dip-90.
return mag_azimuth, field_dip - 90.
if or_con == "6": # lab_mag_az=mag_az-90.; sample_dip = 90.-dip
return mag_azimuth - 90., 90. - field_dip
if or_con == "7": # lab_mag_az=mag_az; sample_dip = 90.-dip
return mag_azimuth - 90., 90. - field_dip
if or_con == "8": # lab_mag_az=(mag_az-180)%360; sample_dip = 90.-dip
return (mag_azimuth - 180.)%360, 90. - field_dip
print("Error in orientation convention") |
def convert_to_azimuth(angle):
"""Converts Near 180 to -180 angles to Azimuth Angles. Will also normalize any number to 0-360 .
@param: angle - angle denoted in terms of 180 to -180 degrees
@returns angle - angle 0 to 360"""
if angle <= 180 and angle > 90:
azimuth_angles = 360.0 - (angle - 90)
else:
azimuth_angles = abs(angle - 90)
if abs(azimuth_angles) > 360:
azimuth_angles % 360
return azimuth_angles |
def roll_by_one( cups ):
"""
This functions rolls the cups by one unit.
The new current cup is the next cup in the clockwise direction.
"""
return cups[1:] + cups[:1] |
def get_events_by_ref_des(data, ref_des):
"""
"""
result = []
return result |
def duplicatesRemoval(checked, node, path_penalty):
"""
IF A collaborated with B, B is also connected to A so when checking B we do not need to add A
however since we have multi path this is not as straightforward, we are only not required to add the node if we found a previous path to the node that presents an higher penalty than the new one.
This function achieves that
"""
if node not in checked:
checked[node] = path_penalty
return True
if checked[node] < path_penalty:
checked[node] = path_penalty
return True
else:
return False |
def xmatch_score(a, b):
"""
Simple scoring function: 1 for same value, else 0
"""
if a == b:
return 1
else:
return 0 |
def var_lower_length(tabu_lenght, tabu_var):
"""
Validation function that assert that tabu_var isn't higher number than tabu_lenght
Parameters:
-----------
tabu_lenght: int
tabu_var: int
"""
if tabu_var <= tabu_lenght:
return tabu_var
else:
raise ValueError("tabu_var can't be higher than tabu_lenght") |
def gcContent(seq):
"""calculate G/C content of sequence"""
gc = seq.count("C") + seq.count("G")
gcPercent = 100 * (float(gc) / len(seq))
return int(round(gcPercent)) |
def make_filter_gff_cmd(gff, baddies, newgff):
""" given a gff file and a file of unwanted locus tags, run inverse grep
Note 2019-04-25 this is a ticking time bomb
"""
# -f means get pattern from file
# -v means return inverse match
return "grep {0} -f {1} -v > {2}".format(gff, baddies, newgff) |
def sigmoid_5params(x, a, b, c, d, g):
"""
:return:
"""
return d + ((a - d) / (1 + (x / c) ** b) ** g) |
def decode_textfield_base64(content):
"""
Decodes the contents for CIF textfield from Base64.
:param content: a string with contents
:return: decoded string
"""
import base64
return base64.standard_b64decode(content) |
def get_schemaloc_string(ns_set):
"""Build a "schemaLocation" string for every namespace in ns_set.
Args:
ns_set (iterable): set of Namespace objects
"""
schemaloc_format = '{0.name} {0.schema_location}'
# Only include schemas that have a schema_location defined (for instance,
# 'xsi' does not.
return " ".join([schemaloc_format.format(x) for x in ns_set
if x.schema_location]) |
def mock_install_repository(path: str):
"""
Does not actually perform anything, but still returns a result dict like the other install functions. This
result dict contains the following fields:
- success: True
- path: The path passed as parameter
- git: Github URL
:return: dict
"""
return {
'success': True,
'path': path,
'git': 'https://github.com'
} |
def parse_pct(value):
"""
Parse percentage
"""
return float(value)/100 |
def dict_to_boto3_tags(tag_dict):
"""
Convenience function for converting a dictionary to boto3 tags
:param tag_dict: A dictionary of str to str.
:return: A list of boto3 tags.
"""
return [
{"Key": key, "Value": value}
for key, value in tag_dict.items()
] |
def ppmv2pa(x, p):
"""Convert ppmv to Pa
Parameters
----------
x Gas pressure [ppmv]
p total air pressure [Pa]
Returns
-------
pressure [Pa]
"""
return x * p / (1e6 + x) |
def dot(a, b):
"""Dot product of two TT-matrices or two TT-vectors"""
if hasattr(a, '__dot__'):
return a.__dot__(b)
if a is None:
return b
else:
raise ValueError(
'Dot is waiting for two TT-vectors or two TT- matrices') |
def are_vulnerabilities_equivalent(vulnerability_1, vulnerability_2):
"""
Check if two vulnerability JSON objects are equivalent
:param vulnerability_1: dict JSON object consisting of information about the vulnerability in the format
presented by the ECR Scan Tool
:param vulnerability_2: dict JSON object consisting of information about the vulnerability in the format
presented by the ECR Scan Tool
:return: bool True if the two input objects are equivalent, False otherwise
"""
if (vulnerability_1["name"], vulnerability_1["severity"]) == (vulnerability_2["name"], vulnerability_2["severity"]):
# Do not compare package_version, because this may have been obtained at the time the CVE was first observed
# on the ECR Scan, which would result in unrelated version updates causing a mismatch while the CVE still
# applies on both vulnerabilities.
if all(
attribute in vulnerability_2["attributes"]
for attribute in vulnerability_1["attributes"]
if not attribute["key"] == "package_version"
):
return True
return False |
def direction(a, b):
"""
3, 5 => +1
5, 3 => -1
5, 5 => 0
"""
return 1 if b > a else -1 if a > b else 0 |
def list2dict(lst):
"""Convert the list from RedisAI to a dict."""
if len(lst) % 2 != 0:
raise RuntimeError("Can't unpack the list: {}".format(lst))
out = {}
for i in range(0, len(lst), 2):
key = lst[i].decode().lower()
val = lst[i + 1]
if key != "blob" and isinstance(val, bytes):
val = val.decode()
out[key] = val
return out |
def get_vpo(values):
"""
This function shifts values one index backwards.
Day_1: m11, m12, *m13
Day_2: m21, m22, *m23
Day_3: m31, m32, *m33
We want to predict values with *, so
If we want to train our network to predict Day_1
we don't have any data from the previous day, so we can't
do that, we base prediction of m23 on metrics from prev data:
[m11, m12, m13] and we can do the same for Day_2:
X: m11,m12,m13 Y: m23
X: m21,m22,m23 Y: m33
What about data from Day_3? Well, we don't have any
data from Day_4 to use for prediction using metrics
from Day_3.
So, this is how we're constructing our data:
X: No data Y: m13 <- We discard this first value,
X: m11,m12,m13 Y: m23 since we don't have any X data from Day_0
X: m21,m22,m23 Y: m33
X: m31,m32,m32 Y: No data <- We need to discard this as well, since there's no data for Y from Day_4
"""
shifted_y = list(values)
shifted_y.pop(0)
shifted_y.append(None)
return shifted_y |
def _cohort_cache_key(user_id, course_key):
"""
Returns the cache key for the given user_id and course_key.
"""
return f"{user_id}.{course_key}" |
def even(n):
""" Counts the number of EVEN digits in a given integer/float
"""
try:
if type(n) in [int, float]:
return sum([True for d in str(n) if d.isdigit() and int(d) % 2 == 0])
else:
raise TypeError("Given input is not a supported type")
except TypeError as e:
print("Error:", str(e)) |
def get_url(year_of_study, session):
"""
:param year_of_study: 1, 2, 3 or 4.
:param session: Examples: 20199 is fall 2019. 20195 is summer 2019.
:return:
"""
return "https://student.utm.utoronto.ca/timetable/timetable?yos={0}&subjectarea=&session={1}&courseCode=&sname=&delivery=&courseTitle=".format(
year_of_study, session) |
def webotsToScenicPosition(pos):
"""Convert Webots positions to Scenic positions."""
x, y, z = pos
return (x, -z) |
def update_residual_model(residual_model, coefficients_to_add, delta, delta_old):
"""Update linear and square terms of the residual model.
Args:
residual_model (dict): Dictionary containing the parameters of the
residual model, i.e. "intercepts", "linear_terms", and "square terms".
coefficients_to_add (dict): Coefficients used for updating the
parameters of the residual model.
delta (float): Trust region radius of the current iteration.
delta_old (float): Trust region radius of the previous iteration.
Returns:
residual_model_updated (dict): Dictionary containing the parameters of
the residual model with update "linear_terms" and "square_terms".
"""
residual_model_updated = residual_model.copy()
residual_model_updated["linear_terms"] = (
coefficients_to_add["linear_terms"]
+ (delta / delta_old) * residual_model["linear_terms"]
)
residual_model_updated["square_terms"] = (
coefficients_to_add["square_terms"]
+ (delta / delta_old) ** 2 * residual_model["square_terms"]
)
return residual_model_updated |
def capitalize_word(string):
"""capitalize_word
Title case the word without .title
Args:
string (str): The string
Returns:
str: The capitalized string
"""
final_word = str()
# #1 - Split the string
split_str = string.split()
# #2 - For each word, captialise
for word in split_str:
ascii_word = ord(word[0])
if 97<=ascii_word<=122:
# it is a small letter
cap_let = chr(ascii_word-32)
final_word += cap_let + word[1:] + " "
else:
final_word += word + " "
# #3 - Get rid of space
final_word = final_word[:-1]
return final_word |
def numRollsToTarget(d, f, target):
"""
throw d dice with faces numbered 1 to f. how many combinations of faces showing sum to target
say on the nth die, we see face value j, then numWays(n, f, target) = numWays(n-1, f, target-j)
"""
dp = [[0]*(target+1) for _ in range(d+1)]
dp[0][0] = 0
for f in range(1, f+1):
for n in range(1, target+1):
if f == n:
dp[1][n] = 1
for i in range(2, d+1):
for j in range(1, target+1):
for k in range(1, f+1):
if k > j:
continue
else:
dp[i][j] += dp[i-1][j-k]
return dp[d][target] % (10**9+7) |
def fibbs(n):
"""
Input: the nth number.
Output: the number of the Fibonacci sequence via iteration.
"""
sequence = []
for i in range(n+1):
if i == 0:
sequence.append(0)
elif i == 1:
sequence.append(1)
else:
total = sequence[i - 2] + sequence[i - 1]
sequence.append(total)
return (sequence[-1]) |
def inverse_interleave(a, b):
"""
Given a coordinate where `a` has been interleaved and `b` hasn't, return
the value that `a` would have at `b=0`.
"""
if a % 2 == 0:
return a + b % 2
else:
return a - (b+1) % 2 |
def getLine(x1, y1, x2, y2):
"""Returns a list of (x, y) tuples of every point on a line between
(x1, y1) and (x2, y2). The x and y values inside the tuple are integers.
Line generated with the Bresenham algorithm.
Args:
x1 (int, float): The x coordinate of the line's start point.
y1 (int, float): The y coordinate of the line's start point.
x2 (int, float): The x coordinate of the line's end point.
y2 (int, float): The y coordiante of the line's end point.
Returns:
[(x1, y1), (x2, y2), (x3, y3), ...]
Example:
>>> getLine(0, 0, 6, 6)
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]
>>> getLine(0, 0, 3, 6)
[(0, 0), (0, 1), (1, 2), (1, 3), (2, 4), (2, 5), (3, 6)]
>>> getLine(3, 3, -3, -3)
[(3, 3), (2, 2), (1, 1), (0, 0), (-1, -1), (-2, -2), (-3, -3)]
"""
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
points = []
issteep = abs(y2-y1) > abs(x2-x1)
if issteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
rev = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
rev = True
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if issteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error < 0:
y += ystep
error += deltax
# Reverse the list if the coordinates were reversed
if rev:
points.reverse()
return points |
def get_util(maximize, p_dog, p_other, payoffs):
"""
>>> get_util(True, 1.0, 1.0, [1,2,3,4])
1.0
>>> get_util(True, 1.0, 0.5, [1,2,3,4])
1.5
>>> get_util(True, 0.5, 0.5, [1,2,3,4])
2.25
>>> get_util(True, 0.0, 1.0, [1,2,3,4])
4.0
>>> get_util(True, 1.0, 0.0, [1,2,3,4])
2.0
>>> get_util(False, 1.0, 1.0, [1,2,3,4])
1.0
>>> get_util(False, 1.0, 0.5, [1,2,3,4])
0.5
>>> get_util(False, 0.5, 0.5, [1,2,3,4])
0.25
>>> get_util(False, 0.0, 1.0, [1,2,3,4])
0.0
>>> get_util(False, 1.0, 0.0, [1,2,3,4])
0.0
"""
if maximize:
util = 0
util += p_dog * p_other * payoffs[0]
util += (1 - p_dog) * p_other * payoffs[3]
util += (1 - p_other) * payoffs[1]
else:
util = p_dog * p_other
return util |
def replace_domain_terms(text, domain_terms, replacement):
"""
Replace domain terms within text
:param text: the text to process
:param domain_terms: the list of domain terms
:param replacement: the replacement for the domain terms
:return: the processed string
"""
for word in domain_terms:
text = text.replace(word, replacement)
return text |
def str2num(string):
"""
--------------------------------------------------
Tries to see if 'string' is a number
If 'string' is a string, returns:
int(string) for integers
float(string) for floats
'string' otherwise
If 'string' is a float or an integer, returns:
string
If none of the above, treats it like a list or tuple
and returns for each entry of 'string' a float,int,
or str as required. Returns as a list
--------------------------------------------------
"""
if isinstance(string,int):
output = string
elif isinstance(string,float):
output = string
elif not isinstance(string,str):
output = []
for a in string:
try:
output.append(int(a))
except:
try:
output.append(float(a))
except:
output.append(a)
if len(output) == 1:
output = output[0]
else:
output = string
try:
output = int(string)
except:
try:
output = float(string)
except:
pass
return output |
def parse_float(float_str, default=0):
"""Parses the float_str and returns the value if valid.
Args:
float_str: String to parse as float.
default: Value to return if float_str is not valid.
Returns:
Parsed float value if valid or default.
"""
try:
return float(float_str)
except ValueError:
return default |
def render_bytes(source, *args):
"""Peform ``%`` formating using bytes in a uniform manner across Python 2/3.
This function is motivated by the fact that
:class:`bytes` instances do not support ``%`` or ``{}`` formatting under Python 3.
This function is an attempt to provide a replacement:
it converts everything to unicode (decoding bytes instances as ``latin-1``),
performs the required formatting, then encodes the result to ``latin-1``.
Calling ``render_bytes(source, *args)`` should function roughly the same as
``source % args`` under Python 2.
.. todo::
python >= 3.5 added back limited support for bytes %,
can revisit when 3.3/3.4 is dropped.
"""
if isinstance(source, bytes):
source = source.decode("latin-1")
result = source % tuple(arg.decode("latin-1") if isinstance(arg, bytes)
else arg for arg in args)
return result.encode("latin-1") |
def should_import(managedcluster):
"""
should_import returns True if the input managedCluster should be imported,
and False if otherwise.
:param managedcluster: name of managedCluster to import
:return: bool
"""
conditions = managedcluster['status'].get('conditions', [])
for condition in conditions:
if condition['type'] == 'ManagedClusterJoined':
return False
return True |
def parse_path(path):
"""
http://www.w3.org/TR/2014/WD-html-json-forms-20140529/#dfn-steps-to-parse-a-json-encoding-path
"""
original = path
failure = [(original, {'last': True, 'type': object})]
steps = []
try:
first_key = path[:path.index("[")]
if not first_key:
return original
steps.append((first_key, {'type': 'object'}))
path = path[path.index("["):]
except ValueError:
return failure
while path:
if path.startswith("[]"):
steps[-1][1]['append'] = True
path = path[2:]
if path:
return failure
elif path[0] == "[":
path = path[1:]
try:
key = path[:path.index("]")]
path = path[path.index("]")+1:]
except ValueError:
return failure
try:
steps.append((int(key), {'type': 'array'}))
except ValueError:
steps.append((key, {'type': 'object'}))
else:
return failure
for i in range(len(steps)-1):
steps[i][1]['type'] = steps[i+1][1]['type']
steps[-1][1]['last'] = True
return steps |
def calc_x(f, t):
"""Calc x from t.
:param f: the param of interp
:type f: dict
:param t: step of interp
:type t: int
:return: x corrdinate
:rtype: float
"""
return f['a_x'] + f['b_x'] * t + f['c_x'] * t * t + f['d_x'] * t * t * t |
def falling(n, k):
"""Compute the falling factorial of n to depth k.
>>> falling(6, 3) # 6 * 5 * 4
120
>>> falling(4, 3) # 4 * 3 * 2
24
>>> falling(4, 1) # 4
4
>>> falling(4, 0)
1
"""
"*** YOUR CODE HERE ***"
res = 1
while k > 0:
res = res * n
n -= 1
k -= 1
return res |
def is_classifier(estimator):
"""
Returns True if the given estimator is (probably) a classifier.
From: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py#L526
"""
return getattr(estimator, "_estimator_type", None) == "classifier" |
def max_precision(term2rank, total_terms):
"""Computes the MAP (max average precision) over the whole candidate list
Args:
result2rank: A dict of source to ranks of good translation candidates.
total_terms: The expected term count.
Returns:
A dict containing a precision value for each cutoff rank
"""
term2prec = dict()
for term, ranks in term2rank.items():
term2prec[term] = 1.0 / min(term2rank[term])
return sum(term2prec.values()) / total_terms |
def checksums2dict(checksums: list) -> dict:
"""
Converts a list of checksums to a dict for easier look up of a block
:param checksums: tuple of checksums
:return: dictionary of {checksum: index}
"""
result = {}
for index, checksum in enumerate(checksums):
if checksum not in result:
result[checksum] = index
return result |
def logistic_rhs(t, x, r=2., k=2.):
"""
RHS evaluation of logistic ODE,
returns
f(t, x) = r * x * (1 - x/k)
"""
return r * x * (1. - x / k) |
def is_callable(value, **kwargs):
"""Indicate whether ``value`` is callable (like a function, method, or class).
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
return hasattr(value, '__call__') |
def parse_comma_separated_list(value):
"""Parse a comma-separated list.
:param value:
String or list of strings to be parsed and normalized.
:returns:
List of values with whitespace stripped.
:rtype:
list
"""
if not value:
return []
if not isinstance(value, (list, tuple)):
value = value.split(',')
return [item.strip() for item in value] |
def dt2str (dt):
"""
Convert a datetime object to a string for display, without microseconds
:param dt: datetime.datetime object, or None
:return: str, or None
"""
if dt is None:
return None
dt = dt.replace(microsecond = 0)
return str(dt) |
def boyer_moore_majority_vote(arr):
"""
My Python implementation of the Boyer-Moore majority vote algorithm
Finds the majority (more than half) element of an input sequence, if it exists
Time complexity: O(n), n = Length of input sequence
Space complexity: O(1)
"""
majority, counter = None, 0
for element in arr: # O(n)
# If counter is 0, there is no majority
if counter == 0:
# Store element as majority and increment counter
majority = element
counter = 1
elif element == majority:
# If element is current majority, increment counter
counter += 1
else:
# Otherwise, decrement counter
counter -= 1
# Return overall majority element, if it exists
return majority |
def human_list(l, separator="and"):
"""
Formats a list for human readability.
Parameters
----------
l : sequence
A sequence of strings
separator : string, optional
The word to use between the last two entries. Default:
``"and"``.
Returns
-------
formatted_list : string
Examples
--------
>>> human_list(["vanilla", "strawberry", "chocolate"], "or")
'vanilla, strawberry or chocolate'
"""
if len(l) == 1:
return l[0]
else:
return ', '.join(l[:-1]) + ' ' + separator + ' ' + l[-1] |
def decode_extra_length(bits, length):
"""Decode extra bits for a match length symbol."""
if length == 285:
return 258
extra = (length - 257) / 4 - 1
length = length - 254
if extra > 0:
ebits = bits.read(extra)
length = 2**(extra+2) + 3 + (((length + 1) % 4) * (2**extra)) + ebits
return length |
def calc_categ_accur(g_truth, predicts):
"""two lists must be same in size, one for ground:truth and another for model predictions,
we keep the function works over lists even if it is a many to one prediction, that is for reusability on
many to many sequence models"""
true_counter = 0
false_counter = 0
if predicts == g_truth:
true_counter += 1
else:
false_counter += 1
return true_counter / (true_counter + false_counter) |
def conv_if_neg(x):
"""Returns abs of x if it's negative"""
if x < 0:
return abs(x), True
return x, False |
def rectangles_intersect(r1, r2, shift1=(0, 0), shift2=(0, 0), extraSize=3):
"""
gets two 4-tuples of integers representing a rectangle in min, max coord-s
optional params. @shifts can be used to move boxes on a larger canvas (2d plane)
@extraSize, forces the rectangles to stay away from each other
by the given size (number of pixels)
returns True if the rectangles intersect
"""
if ((min(r1[0] - extraSize + shift1[0], r1[2] + extraSize + shift1[0]) > max(r2[0] - extraSize + shift2[0],
r2[2] + extraSize + shift2[0]))
or (max(r1[0] - extraSize + shift1[0], r1[2] + extraSize + shift1[0]) < min(r2[0] - extraSize + shift2[0],
r2[2] + extraSize + shift2[
0]))):
return False
if ((min(r1[1] - extraSize + shift1[1], r1[3] + extraSize + shift1[1]) > max(r2[1] - extraSize + shift2[1],
r2[3] + extraSize + shift2[1]))
or (max(r1[1] - extraSize + shift1[1], r1[3] + extraSize + shift1[1]) < min(r2[1] - extraSize + shift2[1],
r2[3] + extraSize + shift2[
1]))):
return False
return True |
def bound(value, bound1, bound2):
"""
returns value if value is between bound1 and bound2
otherwise returns bound that is closer to value
"""
if bound1 > bound2:
return min(max(value, bound2), bound1)
else:
return min(max(value, bound1), bound2) |
def eps(i, d, N):
"""
Dispersion; the spacing between levels is d. This is used to compute the energy for the singly occupied levels.
"""
return d*(i - ((N-1)/2)) |
def Hubble_convert(H_0):
"""
Converts the Hubble parameter from km/s/Mpc to Myr^-1
Parameters
----------
H_0 : float
The Hubble parameter in km/s/Mpc.
Returns
-------
result : float
The Hubble parameter in Myr^-1.
"""
result = H_0*1000.0*3.1536*10**13/(3.09*10**16)/10**6 #This formula convert the Hubble parameter from
#km/s/Mpc to Myr^-1 in order to match the unit convention in this program
return result |
def get_column(data, index=0):
"""Get a column from a dataset
Parameters:
data: Could be a list of list or a list of dict, etc
Given a = [{"k1":1, "k2":5},{"k1":3, "k2":5},{"k1":2, "k2":5}]
get_column(a, "k1") will get the values of k1
Returns:
list: of values that belong to the column
"""
result = []
for row in data:
result.append(row[index])
return result |
def genRunEntryStr(queryId, docId, rank, score, runId):
"""A simple function to generate one run entry.
:param queryId: query id
:param docId: document id
:param rank: entry rank
:param score: entry score
:param runId: run id
"""
return f'{queryId} Q0 {docId} {rank} {score} {runId}' |
def _one_or_both(a, b):
"""Returns f"{a}\n{b}" if a is truthy, else returns str(b).
"""
if not a:
return str(b)
return f"{a}\n{b}" |
def add(num1: int,num2: int):
"""
Add 2 numbers and provide the result
"""
print("Good Day, World!")
return num1+num2 |
def normalizeCUAddr(addr):
"""
Normalize a cuaddr string by lower()ing it if it's a mailto:, or
removing trailing slash if it's a URL.
@param addr: a cuaddr string to normalize
@return: normalized string
"""
lower = addr.lower()
if lower.startswith("mailto:"):
addr = lower
if (
addr.startswith("/") or
addr.startswith("http:") or
addr.startswith("https:")
):
return addr.rstrip("/")
else:
return addr |
def get_author_name(author_id, users, original=False):
"""get the name of the author from the includes
Arguments:
- author_id: the author_id
- users: the users part of the includes
- original:
"""
for user in users:
if ('id' in user and 'username' in user) and (
(user['id'] == author_id and not original) or user['id'] != author_id and original):
return user['username']
return "" |
def edges_to_adj_list(edges):
"""
Transforms a set of edges in an adjacency list (represented as a dictiornary)
For UNDIRECTED graphs, i.e. if v2 in adj_list[v1], then v1 in adj_list[v2]
INPUT:
- edges : a set or list of edges
OUTPUT:
- adj_list: a dictionary with the vertices as keys, each with
a set of adjacent vertices.
"""
adj_list = {} # store in dictionary
for v1, v2 in edges:
if v1 in adj_list: # edge already in it
adj_list[v1].add(v2)
else:
adj_list[v1] = set([v2])
if v2 in adj_list: # edge already in it
adj_list[v2].add(v1)
else:
adj_list[v2] = set([v1])
return adj_list |
def find_duplicate_number(array):
"""
We know that we will be given a series of numbers [0, n-2] with one extra number (the duplicate number)
so if we take the total sum of series [0, n-2] (arithmetic series sum = n((a1 + an)/2) ) and extract it
from actual sum of the array we we will get the extra number
:param array:
:return: duplicate number
"""
# total terms
n = ((len(array)-2) + 1) # +1 to include 0 as the range is [0, n-2] with 0 inclusive
# first term
a1 = 0
# last term
an = len(array)-2
# arithmetic series total sum = n((a1 + an)/2
expected_sum = n * (a1 + an) / 2
actual_sum = sum(array) # this will be greater than expected sum as there is an extra duplicate number
return actual_sum - expected_sum |
def is_sorted(arr):
"""
Check if each comparison returns 1 (True)
If any comparison is 0 (False), it should not pass.
"""
# Check if arr isn't None
if arr:
if all(arr[i] <= arr[i + 1] for i in range(len(arr) - 1)):
return True
return False |
def position_shuffle(objs, saved=False):
"""
:param list objs: objects need to be ordered
:param bool saved: True / False
code sample::
position_shuffle( HomeBox.objects.all(), True)
"""
if objs:
for index, obj in enumerate(objs):
if obj.position != index:
obj.position = index
if saved:
obj.save()
return objs
else:
return [] |
def make_numbers_form_list(lis):
"""changes list to number"""
num = ""
i = 0
while i < len(lis):
num += str(lis[i])
i += 1
return int(num) |
def getFibonacciRecursive(n: int) -> int:
"""
Calculate the fibonacci number at position n recursively
"""
a = 0
b = 1
def step(n: int) -> int:
nonlocal a, b
if n <= 0:
return a
a, b = b, a + b
return step(n - 1)
return step(n) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.