content stringlengths 42 6.51k |
|---|
def _tup_equal(t1,t2):
"""Check to make sure to tuples are equal
:t1: Tuple 1
:t2: Tuple 2
:returns: boolean equality
"""
if t1 is None or t2 is None:
return False
return (t1[0] == t2[0]) and (t1[1] == t2[1]) |
def toQt5Name(name):
""" Convert name from QtSomeName to Qt5SomeName """
if name.startswith('Qt') and name[2] != '5':
return '%s5%s' % (name[:2], name[2:])
return name |
def transforma_em_lista(arquivo):
"""Transforma arquivo em lista"""
lista = arquivo.split("\n")
return lista |
def check_ktriangles(L, now=0):
"""Check whether kinetic triangles are all linked up properly
"""
valid = True
# check if neighbours are properly linked
for ktri in L:
if ktri.stops_at is not None:
continue
for ngb in ktri.neighbours:
if ngb is not None:
if ktri not in ngb.neighbours:
print(("non neighbouring triangles:", id(ktri), "and", id(ngb)))
valid = False
for v in ktri.vertices:
if ktri.is_finite:
if not ((v.starts_at <= now and v.stops_at is not None and v.stops_at > now) or (
v.starts_at <= now and v.stops_at is None)):
print(("triangle", id(ktri), " with invalid kinetic"
" vertex", id(v), " for this time"))
print(( "", v.starts_at, v.stops_at))
valid = False
# check if the sides of a triangle share the correct vertex at begin / end
if False: # FIXME: enable!!!
for ktri in L:
for i in range(3):
ngb = ktri.neighbours[i]
if ngb is not None:
j = ngb.neighbours.index(ktri)
if not ngb.vertices[cw(j)] is ktri.vertices[ccw(i)]:
print( "something wrong with vertices 1")
valid = False
if not ngb.vertices[ccw(j)] is ktri.vertices[cw(i)]:
print( "something wrong with vertices 2")
valid = False
# FIXME: check orientation of triangles ????
# -- could be little difficult with initial needle triangles at terminal
# vertices of PSLG
return valid |
def _message_from_pods_dict(errors_dict):
"""Form a message string from a 'pod kind': [pod name...] dict.
Args:
- errors_dict: a dict with keys pod kinds as string and values
names of pods of that kind
Returns: a string message
"""
msg_list = [
"{0}: {1}".format(key, ", ".join(msg)) for key, msg in errors_dict.items()
]
return "; ".join(msg_list) |
def argument(*name_or_flags, **kwargs):
""" Helper method to format arguments for subcommand decorator """
return (list(name_or_flags), kwargs) |
def find_word(word, text):
"""Returns all found positions of the word in text.
word and text parameters must use the same encoding
@param word: word to search
@param text: Text where you search the word"""
# All found position
found_pos = []
# Search all positions
index = 0 # First index where you search the word
while True:
pos = text.find(word, index)
if pos == -1:
break
found_pos.append(pos)
index = pos + 1
return tuple(found_pos) |
def remove_dot_git(url_with_dot_git):
"""
Removes trailing .git in repo name or url
:param url_with_dot_git:
:return:
"""
url = url_with_dot_git.split('.git')[0]
return url |
def print_indent(t, indent=4):
"""
print a text (eg a python piece of code) with indent
:param t: text (string with \n)
:param indent: indentation
:return: formatted text
"""
s_indent = " "*indent
return s_indent + t.replace('\n', '<br>' + s_indent).replace('\t', s_indent) |
def issafe(arg):
"""Returns False if arg contains ';' or '|'."""
return arg.find(';') == -1 and arg.find('|') == -1 |
def generate_dot_source(
connections, node_conf, edge_conf, graph_conf):
"""
node_conf is a disctionary with keys being a possible type of a node.
edge_conf is a dictionary with keys being a possible type of an edge.
The values of the dictionaries are dictionaries with settings.
edges is a list of Edge instances.
"""
header = "digraph dependencies {"
for (key, value) in graph_conf.items():
header += "{0}=\"{1}\"; ".format(key, value)
footer = "}"
def node(n):
label = '"' + n.label + '"'
label += '[id="activate(\'{0}\', \'{1}\')"]'.format(n.kind, n.label)
if n.kind in node_conf:
label += "".join(["[{0}=\"{1}\"]".format(k, v) for
(k, v) in node_conf[n.kind].items()])
else:
label += "".join(["[{0}=\"{1}\"]".format(k, v) for
(k, v) in node_conf['default'].items()])
return label
def edge(e):
line = '"{0}" -> "{1}"'.format(
e.node1.label,
e.node2.label)
kind = e.node1.kind + '-' + e.node2.kind
if kind == "task-tags":
return ""
if kind in edge_conf:
line += "".join(["[{0}=\"{1}\"]".format(k, v) for
(k, v) in edge_conf[kind].items()])
else:
line += "".join(["[{0}=\"{1}\"]".format(k, v) for
(k, v) in edge_conf['default'].items()])
return line
res = [header]
# edges
for e in connections:
res.append(edge(e))
res.append(node(e.node1))
res.append(node(e.node2))
res.append(footer)
return "\n".join(res) |
def blocksize_limit_for_txs(txps, txsize=226):
"""
Given a transactions per second figure, calculate the equivalent bitcoin
blocksize.
"""
mbs = (txps * txsize) / (1024 * 1024.0) # megabytes per second
return mbs * 600.0 |
def unpack_row(row, cols):
"""Convert a suds row object into serializable format.
Transform a row of results objects received from the DFP API's
Publisher Query Language Service into a Python dict.
Args:
row: A row of suds object which include an array of values.
cols: An array of strings representing the column names.
Returns:
dict A serializable Python dict.
"""
try:
# throws AttributeError is 'values' does not exist in row object
values = map(lambda value: value['value'], row['values'])
return dict(zip(cols, values))
except AttributeError:
return {} |
def rint(num):
"""
Returns rounded number as type integer
:param num: input number
:returns: integer rounded number
"""
return int(round(num)) |
def list_metrics_to_list_bac(list_metrics):
"""
Arg:
list_metrics: A list of dictionaries.
Return:
list_bacs: A list of floats with the same length as list_metric's.
"""
list_bacs = [None]*len(list_metrics)
for iter_idx, iter_dict in enumerate(list_metrics):
bac = iter_dict["BAC"][0]
list_bacs[iter_idx] = bac
return list_bacs |
def qml(yi, yi1, dqml):
"""
Computes transition probability q(y_i | y_i-1) using maximum likelihood estimation on the training set.
:param yi: a label/state y_i
:param yi1: a label/state y_i-1
:param dqml: dictionary for qml where pre-computed values are stored
:return: qml(y_i | y_i-1)
"""
return dqml[yi1][yi] / sum(dqml[yi1].values()) |
def get_commands_disable_feature(feature):
"""Get commands to disable feature
Args:
feature (str): name of feature to disable
Returns:
list: ordered list commands to disable feature
Note:
Specific for Ansible module(s). Not to be called otherwise.
"""
commands = []
if feature:
command = 'no feature ' + feature
commands.append(command)
return commands |
def rev_comp(seq):
"""Generates reverse complement of DNA string"""
new_seq = ''
conversion = {'A': 'T', 'T': 'A', 'G':'C', 'C':'G'}
for i in reversed(seq):
new_seq += conversion[i]
return new_seq |
def dur_attributes_to_dur(d_half, d_semiqvr):
"""
Convert arrays of d_hlf and d_sqv to d.
- See eq. (2) of the paper.
"""
def d_hlf_dur_sqv_to_d(d_hlf, d_sqv):
return 8 * d_hlf + d_sqv
d = d_hlf_dur_sqv_to_d(d_half, d_semiqvr)
return d |
def postprocess_seq(seq, bos_idx, eos_idx, output_bos=False, output_eos=False):
"""
Post-process the decoded sequence.
"""
eos_pos = len(seq) - 1
for i, idx in enumerate(seq):
if idx == eos_idx:
eos_pos = i
break
seq = [
idx for idx in seq[:eos_pos + 1]
if (output_bos or idx != bos_idx) and (output_eos or idx != eos_idx)
]
return seq |
def kgtk_geo_coords(x):
"""Return True if 'x' is a KGTK geo coordinates literal.
"""
# Assumes valid KGTK values, thus only tests for initial character:
return isinstance(x, str) and x.startswith('@') |
def accuracy(conf_matrix):
"""
Given a confusion matrix, returns the accuracy.
Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml
"""
total, correct = 0.0, 0.0
for true_response, guess_dict in list(conf_matrix.items()):
for guess, count in list(guess_dict.items()):
if true_response == guess:
correct += count
total += count
return correct/total |
def get_octagonal_number(n: int) -> int:
"""Get Octagonal number `O_n=n*(3n-2)` for a given number `n`."""
return n * (3*n - 2) |
def _is_nak(msg):
"""Test if a message is a NAK from the modem."""
if hasattr(msg, "ack") and msg.ack.value == 0x15:
return True
return False |
def convert_bytes_string_to_string(bytes_str):
"""
Convert a byte string to string by utf-8.
Args:
bytes_str (bytes): A bytes string.
Returns:
str, a str with utf-8 encoding.
"""
if isinstance(bytes_str, bytes):
return bytes_str.decode('utf-8')
return bytes_str |
def instance_dialogs(context, request, instance=None, instance_name=None, landingpage=False, start_form=None,
stop_form=None, reboot_form=None, terminate_form=None, associate_ip_form=None, disassociate_ip_form=None):
"""Modal dialogs for Instance landing and detail page."""
return dict(
instance=instance,
instance_name=instance_name,
landingpage=landingpage,
start_form=start_form,
stop_form=stop_form,
reboot_form=reboot_form,
terminate_form=terminate_form,
associate_ip_form=associate_ip_form,
disassociate_ip_form=disassociate_ip_form,
) |
def getKey(dct, key):
"""
Returns a value if the key is present or None.
"""
if key in dct.keys():
return dct[key]
else:
return None |
def sequence(start, end, factor):
""" Like `range()` but each term is `factor` * previous term.
`start` is inclusive, `end` is exclusive.
Returns a list.
"""
values = []
v = start
while v < end:
values.append(v)
v *= factor
return values |
def make_headers(user_agent):
"""Make HTTP headers from arguments.
Args:
user_agent (str): User-Agent of servername communication
Returns:
dict: HTTP headers
"""
headers = {}
if user_agent is not None:
headers['User-Agent'] = user_agent
return headers |
def static_init(cls):
"""
Marks a class as having a static initialization function and
executes the function when class is statically constructed.
"""
if getattr(cls, "static_init", None):
cls.static_init()
return cls |
def train_test_split(filepaths_and_text, train_size):
"""
Split dataset into train & test data
Parameters
----------
filepaths_and_text : list
List of samples
train_size : float
Percentage of entries to use for training (rest used for testing)
Returns
-------
(list, list)
List of train and test samples
"""
train_cutoff = int(len(filepaths_and_text) * train_size)
train_files = filepaths_and_text[:train_cutoff]
test_files = filepaths_and_text[train_cutoff:]
print(f"{len(train_files)} train files, {len(test_files)} test files")
return train_files, test_files |
def has_data(data: str) -> bool:
"""
Checks if the input contains data. If there are any non-whitespace
characters then return True, else return False.
Parameters:
data: (string) input to check whether it contains data
Returns:
Boolean True if input string (data) contains non-whitespace
characters, otherwise False
"""
return bool(data and not data.isspace()) |
def is_sensible_roll(dice):
"""Checks the dice results for sensible sizes. Returns a boolean, and an error string if false."""
if int(dice[1]) > 100: # Attempted to roll more than 100 dice in a single group.
return False, "Attempted to roll more than 100 dice in a single group."
elif int(dice[2]) > 50000: #Attempted to roll a die with more than 50000 sides.
return False, "Attempted to roll a dice with more than 50000 sides."
else:
return True, "" |
def reverse_chars(s):
"""returns string whose char order is mirror image of s """
return ''.join(reversed(s)) |
def convert_dcc_scale(scale):
"""
Converts given scale into a scale that DCC can manage
NOTE: tpDcc uses Y up coordinate axes as the base reference axis
:param scale: list(float, float, float)
:return: list(float, float, float)
"""
return scale[0], scale[2], scale[1] |
def fix_saving_name(name):
"""Neutralizes backslashes in Arch-Vile frame names."""
return name.rstrip('\0').replace('\\', '`') |
def find_note_index(scale, search_note):
""" Given a scale, find the index of a particular note """
for i, note in enumerate(scale):
# Deal with situations where we have a list of enharmonic
# equivalents, as well as just a single note as and str.
if type(note) == list:
if search_note in note:
return i
elif type(note) == str:
if search_note == note:
return i |
def _format(addresses, expanded=False):
"""Adjust output format of the addresses depending on the verbosity requested
expanded=True means all the details about each address are given
expanded=False means only the list of IP addresses is returned
"""
if expanded:
return addresses
else:
address_list = []
for ip in addresses:
try:
address_list.append(ip['address'])
except Exception:
# IP is badly configured, missing address attribute for some reason
pass
return address_list |
def _is_string_same_case(input: str):
"""Returns flag indicating whether input string is a single case.
"""
return input == input.lower() or input == input.upper() |
def is_leap_year(year):
"""Returns whether a given year was a leap year"""
# "There is a leap year every year whose number is perfectly divisible by four -
# except for years which are both divisible by 100 and not divisible by 400."
# quoted from Western Washington University
# https://www.wwu.edu/astro101/a101_leapyear.shtml
if year % 100 == 0: # if year is a century year, an additional check is required (see above)
if year % 400 == 0: # centuries must also be divisible by 400
return True
return False
if year % 4 == 0:
return True
return False |
def matched(s, patterns):
"""Check if string s matches any of the patterns."""
if not patterns:
return False
for p in patterns:
if p in s:
return True
return False |
def sysv_hash(symbol):
"""sysv_hash(str) -> int
Fallback hash function used in ELF files if .gnuhash is not present
"""
h = 0
g = 0
for c in symbol:
h = (h << 4) + ord(c)
g = h & 0xf0000000
h ^= (g >> 24)
h &= ~g
return h & 0xffffffff |
def trim(s):
"""Removes whitespace, carriage returns, and new line characters."""
s = s.strip().replace("\n", "").replace("\r", "")
return s |
def _split_camel(s: str, delimiter: str = '-') -> str:
"""Turn camel case name to snake-case-like name."""
return ''.join(delimiter + c.lower() if c.isupper() else c
for c in s).lstrip(delimiter) |
def wpzeros(t):
"""Precision needed to compute higher zeros"""
wp = 53
if t > 3*10**8:
wp = 63
if t > 10**11:
wp = 70
if t > 10**14:
wp = 83
return wp |
def round(f_d, f_n):
"""
DOCUMENT ME!
"""
f_d *= pow(10, f_n)
f_d += .5
f_d = int(f_d)
f_d /= pow(10, f_n)
# return
return f_d |
def _lookup_module(modmap, name, obj, main_module): #FIXME: needs work
"""lookup name if module is imported"""
for modobj, modname in modmap[name]:
if modobj is obj and modname != main_module.__name__:
return modname |
def manhattan(permutation, comp=None):
"""A certain measure of sortedness for the list A, based on Manhattan distance.
>>> perm = [0, 1, 2, 3, 4]
>>> manhattan(perm) # sorted # doctest: +ELLIPSIS
1.0...
>>> perm = [0, 1, 2, 5, 4, 3]
>>> manhattan(perm) # almost sorted! # doctest: +ELLIPSIS
0.777...
>>> perm = [2, 9, 6, 4, 0, 3, 1, 7, 8, 5] # doctest: +ELLIPSIS
>>> manhattan(perm)
0.4
>>> perm = [2, 1, 6, 4, 0, 3, 5, 7, 8, 9] # better sorted! # doctest: +ELLIPSIS
>>> manhattan(perm)
0.72
"""
if len(permutation) == 0:
return 0
if comp is None:
comp = sorted(permutation)
return 1 - (2 * sum(abs(comp[index] - element) for index, element in enumerate(permutation))) / (len(permutation) ** 2) |
def column(matrix, i):
"""return a specific column from a list of lists (matrix)"""
return [row[i] for row in matrix] |
def shorten_ip(ipaddr):
"""
only returns first 128 characters of "ip" response which could be html if the page is flaking out
"""
return ipaddr[:min(len(ipaddr), 128)] |
def trim(value):
"""Shorten excessively long fields in error log."""
if isinstance(value, dict):
return {k: trim(v) for k, v in value.items()}
if isinstance(value, list):
return [trim(v) for v in value]
if isinstance(value, str) and len(value) > 160:
return value[:77] + '...' + value[-80:]
return value |
def left_shift(array, count):
"""Rotate the array over count times"""
res = array[:]
for i in range(count):
temp = res[1:]
temp.append(res[0])
res[:] = temp[:]
return res |
def update_in_alist(alist, key, value):
"""Updates a value in a list of tuples"""
return [(k, v1, v2, v3) if (k != key) else (key, value, v2, v3) for (k, v1, v2, v3) in alist] |
def euler48(n=1000, nb_dig=10):
"""Solution for problem 48."""
mod = 10 ** nb_dig
return sum(pow(i, i, mod) for i in range(1, n + 1)) % mod |
def find_oedema_location_columns(columns):
"""This method find bleeding column sites.
.. note: bleeding_severe is not a location
and therefore should not be included.
Also we should double check that
they are all boolean variables.
.. warning: Include other, severe, severity?
.. note: Bleeding other might be an string.
"""
# Create locations
locations = ['face', 'feet', 'hands', 'pulmonary']
# Return variables
return [e for e in columns
if 'oedema_' in e and
e.split('_')[1] in locations] |
def author(record):
"""
Split author field by the string 'and' into a list of names.
Parameters
----------
record : dict
the record
Returns
-------
dict
the given `record` with any updates applied
"""
if "author" in record:
if record["author"]:
record["author"] = [i.strip() for i in record["author"].replace('\n', ' ').split(" and ")]
else:
del record["author"]
return record |
def isnapoleon(word):
"""Only takes a six character string"""
one = word[0] == word[1]
two = word[2] == word[3]
thr = word[4] == word[5]
if one and two and thr:
return True
else:
return False |
def accuracy(gold, guess):
"""
Calculates how often the top predicted answer matches the first gold answer.
:param gold: jtr dataset with gold answers.
:param guess: jtr dataset with predicted answers
:return: accuracy (matches / total number of questions)
"""
# test whether the top answer is the gold answer
correct = 0
total = 0
for gold_instance, guess_instance in zip(gold['instances'], guess['instances']):
for gold_question, guess_question in zip(gold_instance['questions'], guess_instance['questions']):
top = gold_question['answers'][0]['text']
target = guess_question['answers'][0]['text']
if top == target:
correct += 1
total += 1
return correct / total |
def categorize_by_damage(hurricanes):
"""Categorize hurricanes by damage and return a dictionary."""
damage_scale = {0: 0,
1: 100000000,
2: 1000000000,
3: 10000000000,
4: 50000000000}
hurricanes_by_damage = {0:[],1:[],2:[],3:[],4:[],5:[]}
for cane in hurricanes:
total_damage = hurricanes[cane]['Damage']
if total_damage == "Damages not recorded":
hurricanes_by_damage[0].append(hurricanes[cane])
elif total_damage == damage_scale[0]:
hurricanes_by_damage[0].append(hurricanes[cane])
elif total_damage > damage_scale[0] and total_damage <= damage_scale[1]:
hurricanes_by_damage[1].append(hurricanes[cane])
elif total_damage > damage_scale[1] and total_damage <= damage_scale[2]:
hurricanes_by_damage[2].append(hurricanes[cane])
elif total_damage > damage_scale[2] and total_damage <= damage_scale[3]:
hurricanes_by_damage[3].append(hurricanes[cane])
elif total_damage > damage_scale[3] and total_damage <= damage_scale[4]:
hurricanes_by_damage[4].append(hurricanes[cane])
elif total_damage > damage_scale[4]:
hurricanes_by_damage[5].append(hurricanes[cane])
return hurricanes_by_damage |
def all_nonzero(intlist):
""" Checks if any items in a list are not zero
Arguments:
intlist {list[int]} -- list of ints
Returns:
bool -- Are there any 0s in intlist?
"""
for item in intlist:
if item == 0:
return False
return True |
def get_field(sample, field_name, allowed_types=None, allow_none=True):
"""Gets the given sample field and optionally validates its type and value.
Args:
sample: a :class:`fiftyone.core.sample.Sample`
field_name: the name of the field to get
allowed_types (None): an optional iterable of
:class:`fiftyone.core.labels.Label` types to enforce that the field
value has
allow_none (True): whether to allow the field to be None
Returns:
the field value
Raises:
ValueError if the field does not exist or does not meet the specified
criteria
"""
try:
value = sample[field_name]
except KeyError:
raise ValueError(
"Sample '%s' has no field '%s'" % (sample.id, field_name)
)
if not allow_none and value is None:
raise ValueError(
"Sample '%s' field '%s' is None" % (sample.id, field_name)
)
if allowed_types is not None:
field_type = type(value)
if field_type not in allowed_types:
raise ValueError(
"Sample '%s' field '%s' is not a %s instance; found %s"
% (sample.id, field_name, allowed_types, field_type)
)
return value |
def jac(a, b):
"""return the Jaccard similarity of two sets"""
if type(a) != set:
a = set(a)
if type(b) != set:
b = set(b)
n = len(a.intersection(b))
return n / float(len(a) + len(b) - n) |
def mu(b, I, mu0, mu1):
"""
Recovery rate.
Parameters:
-----------
b
hospital beds per 10,000 persons
I
number of infected
mu0
Minimum recovery rate
mu1
Maximum recovery rate
"""
# recovery rate, depends on mu0, mu1, b
mu = mu0 + (mu1 - mu0) * (b / (I + b))
return mu |
def get_xls_ir_opt_ir_generated_files(args):
"""Returns a list of filenames generated by the 'xls_ir_opt_ir' rule found in 'args'.
Args:
args: A dictionary of arguments.
Returns:
Returns a list of files generated by the 'xls_ir_opt_ir' rule found in 'args'.
"""
return [args.get("opt_ir_file")] |
def bin2hex(binbytes):
"""
Converts a binary string to a string of space-separated hexadecimal bytes.
"""
return ' '.join('%02x' % ord(c) for c in binbytes) |
def contains(pattern, dictionary):
"""Flexible lookup that searches for a pattern
instead of a key. Returns empty string if pattern
is not found in dictionary.
"""
v = ''
kys = dictionary.keys()
for k in kys:
if pattern in k:
v = dictionary[k]
break
return v |
def to_python(repr_str):
"""Execute a python statement and return its result."""
ns = {}
exec("val = (%s)" % repr_str, ns)
return ns['val'] |
def get_searchable_attributes(attributes, cad_keys,
ca_definitions, content):
"""Get all searchable attributes for a given object that should be indexed
Args:
attributes: Attributes that should be extracted from some model
cad_keys: IDs of custom attribute definitions
ca_definitions: Dictionary of "CAD ID" -> "CAD title"
content: dictionary (JSON) representation of an object
Return:
Dict of "key": "value" from objects revision
"""
searchable_values = {attr: content.get(attr) for attr in attributes}
if "custom_attributes" in content and content["custom_attributes"]:
for cav in content["custom_attributes"]:
cav_id = cav["custom_attribute_id"]
if cav_id in cad_keys:
searchable_values[ca_definitions[cav_id]] = cav["attribute_value"]
return searchable_values |
def validatenodechancapacities(chancaps, minchanstiers):
"""
This function allows more granular selection of candidates than total or
average capacity.
"""
for tierfilter in minchanstiers:
if 'k' in tierfilter:
ksize, mincount = tierfilter.split('k')
size = int(ksize)*1e3
elif 'M' in tierfilter:
Msize, mincount = tierfilter.split('M')
size = int(Msize)*1e6
else:
raise RuntimeError('No recognized seperator in minchannel filter')
if sum((c >= size for c in chancaps)) < int(mincount):
return False
return True |
def get_y(pin_number: int, pin_count: int, spacing: float, grid_align: bool) -> float:
"""
Return the y coordinate of the specified pin. Keep the pins grid aligned, if desired.
The pin number is 1 index based. Pin 1 is at the top. The middle pin will
be at or near 0.
"""
if grid_align:
mid = float((pin_count + 1) // 2)
else:
mid = (pin_count + 1) / 2
y = -round(pin_number * spacing - mid * spacing, 2)
if y == -0.0: # Returns true for 0.0 too, but that doesn't matter
return 0.0
return y |
def ss_version_reply(nick) -> str:
"""The version reply sent to the server, just the text.
@rtype: str
Vars:
:nick: a string of the nickname to send to
:returns: string
"""
return (
"NOTICE "
+ nick
+ " :\x01VERSION \x02Trio-ircproxy.py\x02 5ioE.3 from \x1fhttps://ashburry.pythonanywhere.com\x1f\x01"
) |
def find_brute(T, P):
"""Return the lowest index of T at which substring P begins (or else -1)."""
n, m = len(T), len(P) # introduce convenient notations
for i in range(n - m + 1): # try every potential starting index within T
k = 0 # an index into pattern P
while k < m and T[i + k] == P[k]: # kth character of P matches
k += 1
if k == m: # if we reached the end of pattern,
return i # substring T[i:i+m] matches P
return -1 |
def FindPoint(x1: float, y1: float, x2: float, y2: float, x: float, y: float) -> bool:
"""
Check if the point ``(x,y)`` is inside the rectangle determined by ``x1, y1, x2, y2``.
:param x1: minimum ``x`` coordinate of the rectangle vertices.
:param y1: minimum ``y`` coordinate of the rectangle vertices.
:param x2: maximum ``x`` coordinate of the rectangle vertices.
:param y2: maximum ``y`` coordinate of the rectangle vertices.
:param x: ``x`` coordinate of the point to be examined.
:param y: ``y`` coordinate of the point to be examined.
:return: ``True`` if the point ``(x, y)`` lies inside the rectangle, ``False`` otherwise.
"""
if (x1 < x < x2) and (y1 < y < y2):
return True
else:
return False |
def get_valid_keys(target, prefix):
"""Lists the valid keys to be used on specific proto"""
keys = map(lambda key: key.replace(f'{prefix}_', ''), target.keys())
return list(filter(lambda item: item != 'UNSPECIFIED', list(keys))) |
def get_mode_size(mode):
"""Converts a PIL image mode string into a dimension cardinality"""
return len([i for i in mode if i.isupper()]) |
def lerp(t,p0,p1):
""" Linear interpolation between two points"""
return (p0[0] * (1-t) + p1[0] * t, p0[1] * (1-t) + p1[1] * t) |
def nD0_active(N, vzero, dr, L):
"""
Returns product of particle density n = N/L^2 and active diffusion constant
D_0 = vzero^2/2*dr.
Parameters
----------
N : int or float
Number of particles.
vzero : float
Self-propelling velocity.
dr : float
Rotation diffusion constant.
L : float
Characteristic system length.
Returns
-------
product : float
n D_0
"""
return (N*(vzero**2))/(2*dr*(L**2)) |
def updateDict(dictObj1, dictObj2):
"""
Util for updating the dictObj1 with the context of dictObj2 and return the final dict
"""
dictObj1.update(dictObj2)
return dictObj1 |
def format_path_and_attribute_for_batch(raw_path: str) -> tuple:
"""
Prettify the batch attribute and path names.
Returned formatted_path will have the slash sliced off.
Returned formatted_attribute will be attached to the final return of vehicle.batch().
The naming of the attribute should consider empty and nested paths.
Args:
raw_path: Raw path (minus the slash) to smartcar endpoint
Returns:
(<formatted path>, <formatted attribute>)
e.g.
1. "EMPTY" raw_path == '/' -> ('', 'attributes')
2. "NORMAL" raw_path == '/odometer' -> ('odometer', 'odometer')
3. "NESTED" raw_path == '/engine/oil' -> ('engine/oil', 'engine_oil')
"""
mapper = {
"battery/capacity": "battery_capacity",
"engine/oil": "engine_oil",
"tires/pressure": "tire_pressure",
"": "attributes",
}
formatted_path = raw_path[1:] if raw_path[0] == "/" else raw_path
formatted_attribute = mapper.get(formatted_path, formatted_path)
return formatted_path, formatted_attribute |
def parse_number(text):
"""Returns the first number in the text
"""
if type(text) is str:
tokens = text.split(" ")
number = -1
for token in tokens:
try:
number = int(token)
break
except ValueError:
pass
else:
number = -1
try:
number = int(text)
except ValueError:
pass
return number |
def field(name):
"""Return a string for accessing the field called 'name'. This can
be either a field in a context, or a field in the target object. """
if '.' in name:
return name.replace(".", "_ctx->")
else:
return "obj->" + name |
def read_file(filename, mode='r'):
"""read from file"""
content = ''
try:
with open(filename, mode, encoding='utf-8') as f:
content = f.read()
except Exception as err:
print('[x] failed to read: {}, err: {}'.format(filename, err))
return content |
def alpha(string):
"""Returns a string only containing letters.
Args:
string: String containing all kinds of characters.
Returns:
The string without any non-alpha characters."""
return ''.join([x for x in string if x in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ']) |
def download(path):
"""Use urllib to download a file.
Parameters
----------
path : str
Url to download
Returns
-------
path : str
Location of downloaded file.
"""
import os
from six.moves import urllib
fname = path.split('/')[-1]
if os.path.exists(fname):
return fname
print('Downloading ' + path)
def progress(count, block_size, total_size):
if count % 20 == 0:
print('Downloaded %02.02f/%02.02f MB' % (
count * block_size / 1024.0 / 1024.0,
total_size / 1024.0 / 1024.0), end='\r')
filepath, _ = urllib.request.urlretrieve(
path, filename=fname, reporthook=progress)
return filepath |
def find_latest_artifact(artifacts_dict: dict, name: str = "rst_files") -> int:
"""Finds id of latest artifact that can be downloaded
:param artifacts_dict: Fetched github actions
:type artifacts_dict: dict
:param name: Name of searched artifact. Defaults to "rst_files".
:type name: str
:returns: Id of latest artifact containing rst files
:rtype: int
"""
return max([r["id"] for r in artifacts_dict["artifacts"] if r["name"] == name]) |
def swift_library_output_map(name, module_link_name):
"""Returns the dictionary of implicit outputs for a `swift_library`.
This function is used to specify the `outputs` of the `swift_library` rule; as
such, its arguments must be named exactly the same as the attributes to which
they refer.
Args:
name: The name of the target being built.
module_link_name: The module link name of the target being built.
Returns:
The implicit outputs dictionary for a `swift_library`.
"""
lib_name = module_link_name if module_link_name else name
return {
"archive": "lib{}.a".format(lib_name),
} |
def _strip_prefix(package, version, strip_prefix = ""):
"""Computes the strip prefix for a downloaded archive of the provided
PyPI package and version.
Args:
package: PyPI package name.
version: version for which the archive should be downloaded.
strip_prefix: additional directory prefix to strip from the extracted
files.
"""
if strip_prefix:
return "{0}-{1}/{2}".format(package, version, strip_prefix)
return "{0}-{1}".format(package, version) |
def calc_fuel(inputs):
"""Calculate fuel for given mass."""
return int(inputs/3)-2 |
def list_to_string_with_comma(thing):
"""Input a list, returns every item in the list as a string with commas in between"""
string = ""
for item in thing:
string += str(item) + ','
return string[:-1] |
def get_minimum(vacant_groups):
"""
retrieves group number with minimum student count
"""
vacant_groups = dict(sorted(vacant_groups.items(), key=lambda x: x[1]))
minimum = list(vacant_groups.keys())[0]
return minimum |
def xyz2rgb(x, y, z):
"""
Convert from xyz color space to rgb.
Ganked from http://en.wikipedia.org/wiki/Srgb
Parameters
----------
x, y, z all floats in the xyz color space
Returns
-------
rgb triple with r, g, b between 0 and 1
NB: If x, y, z is out of gamut, then we clip rgb to the [0, 1]^3 cube.
"""
try:
x = float(x)
y = float(y)
z = float(z)
except ValueError:
raise ValueError(("x (%s), y (%s), and z (%s) must all be (convertible "
+ "to) floats") % (str(x), str(y), str(z)))
# The linear transformation to linear rgb
rlin = 3.2406 * x - 1.5372 * y - 0.4986 * z
glin = -0.9689 * x + 1.8758 * y + 0.0415 * z
blin = 0.0557 * x - 0.2040 * y + 1.0570 * z
# What if things are out of gamut?
rlin = min(1., max(0., rlin))
glin = min(1., max(0., glin))
blin = min(1., max(0., blin))
# Now for the correction
def correction(c):
if c <= 0.0031308:
return 12.92 * c
return 1.055 * c ** (1. / 2.4) - 0.055
return correction(rlin), correction(glin), correction(blin) |
def split_strings(strings, start, chr_lens):
"""
Split strings based on string lengths and given start.
"""
return [strings[i - start:j - start] for i, j in zip([start] + chr_lens[:-1], chr_lens)] |
def create_time_schedule_url(quarter: str, year: str, sln: str) -> str:
"""
:param quarter:
:param year:
:param sln:
:return: URL for UW Time Schedule.
Example: "https://sdb.admin.uw.edu/timeschd/uwnetid/sln.asp?QTRYR=AUT+2020&SLN=13418"
"""
base_url = "https://sdb.admin.uw.edu/timeschd/uwnetid/sln.asp?"
return f"{base_url}QTRYR={quarter}+{year}&SLN={sln}" |
def as_url(url):
"""Prepends "file:" to ``url`` if it is likely to refer to a local file
"""
if ':' in url and url.split(':')[0] in ('http', 'https', 'ftp', 'scp', 'file'):
return url
else:
return 'file:' + url |
def pointerize(decl: str, name: str) -> str:
"""Given a C decl and its name, modify it to be a declaration to a pointer."""
# This doesn't work in general but does work for all our types...
if '(' in decl:
# Function pointer. Stick a * in front of the name and wrap it in parens.
return decl.replace(name, '(*{})'.format(name))
else:
# Non-function pointer. Just stick a * in front of the name.
return decl.replace(name, '*{}'.format(name)) |
def _generate_table_urls(base_url: str, n_records: int, odata_version: str) -> list:
"""Creates a list of urls for parallel fetching.
Given a base url, this function creates a list of multiple urls, with query parameters
added to the base url, each reading "$skip={i}" where i is a multiplication of 10,000
(for v3) or 100,000 (for v4). The base url is meant to be the url for a CBS table, and
so each generated url corresponds to the next 10,000(/100,000) rows of the table.
Parameters
----------
base_url : str
The base url for the table.
n_records : int
The amount of rows(=records/observations) in the table.
odata_version : str
version of the odata for this dataset - must be either "v3" or "v4".
Returns
-------
table_urls : list of str
A list holding all urls needed to fetch full table data.
"""
# Since v3 already has a parameter ("?$format=json"), the v3 and v4 connectors are different
connector = {"v3": "&", "v4": "?"}
cbs_limit = {"v3": 10000, "v4": 100000}
trailing_zeros = {"v3": 4, "v4": 5}
# Only the main table has more then 10000(/100000 for v4) rows, the other tables use None
if n_records is not None:
# Create url list with query parameters
table_urls = [
base_url
+ f"{connector[odata_version]}$skip={str(i+1)}"
+ ("0" * trailing_zeros[odata_version])
for i in range(n_records // cbs_limit[odata_version])
]
# Add base url to list
table_urls.insert(0, base_url)
else:
table_urls = [base_url]
return table_urls |
def average_batch_errors(errors, n_samples, batch_size):
"""
Computes average error per sample.
Parameters
----------
errors : list
List of errors where each element is a average error
per batch.
n_samples : int
Number of samples in the dataset.
batch_size : int
Mini-batch size.
Returns
-------
float
Average error per sample.
"""
if batch_size is None:
return errors[0]
n_samples_in_final_batch = n_samples % batch_size
if n_samples_in_final_batch == 0:
return batch_size * sum(errors) / n_samples
all_errors_without_last = errors[:-1]
last_error = errors[-1]
total_error = (
sum(all_errors_without_last) * batch_size +
last_error * n_samples_in_final_batch
)
average_error = total_error / n_samples
return average_error |
def _record_row_parser(buf):
"""
Parses the given `buf` as str representation of 'row' into `column`
and `value`.
Additionally, strips leading and trailing whitespace characters.
`buf` should be formatted in::
'<column> : <value>'
Example::
'name : "br1"'
:param buf: single row in str type.
:return: tuple of `column` and `value`.
"""
column, value = buf.split(':', 1)
return column.strip(), value.strip() |
def day_name(x):
"""function returning the next compass point in the clockwise direction"""
res = {0:"Sunday", 1:"Monday", 2:"Tuesday", 3:"Wednesday", 4:"Thursday",
5:"Friday", 6:"Saturday"}
ans = res.get(x, None)
return ans |
def has_won(board):
"""
returns 0 if game is unwon.
returns player number of winning player if game is won.
"""
#Check the rows
if board[0][0] == board[0][1] and board[0][2] == board[0][0] and not board[0][0] == 0:
return board[0][0]
elif board[1][0] == board[1][1] and board[1][2] == board[1][0] and not board[1][0] == 0:
return board[1][0]
elif board[2][0] == board[2][1] and board[2][2] == board[2][0] and not board[2][0] == 0:
return board[1][0]
#Check the columns
elif board[0][0] == board[1][0] and board[2][0] == board[0][0] and not board[0][0] == 0:
return board[1][0]
elif board[0][1] == board[1][1] and board[2][1] == board[0][1] and not board[0][1] == 0:
return board[0][1]
elif board[0][2] == board[1][2] and board[2][2] == board[0][2] and not board[0][2] == 0:
return board[0][2]
#Check the diagonals
elif board[0][0] == board[1][1] and board[2][2] == board[0][0] and not board[0][0] == 0:
return board[0][0]
elif board[2][0] == board[1][1] and board[0][2] == board[1][1] and not board[2][0] == 0:
return board[2][0]
else:
return 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.