content stringlengths 42 6.51k |
|---|
def draw_dot(image=None, coordinates=None,
radius=None, color=None):
"""
Generate a dot on a given image canvas at the given coordinates
:param image: Pillow/PIL Image Canvas
:param coordinates: coordinate pair that will be the center of the dot
:param radius: radius size of the dot
:param color: color of the dot
:returns: image ellipse return or False
:raises TypeError: none
"""
if image \
and coordinates \
and radius \
and color:
drawn_dot = image.ellipse([coordinates[0]-radius,
coordinates[1]-radius, coordinates[0]+radius,
coordinates[1]+radius],
fill=color, outline=color)
return drawn_dot
else:
return False |
def get_attribute(obj: dict, path: list):
"""
Get attribute iteratively from a json object.
:param obj: object to iterate on
:param path: list of string to get sub path within json
:return: the value if the path is accessible, empty string if not found
"""
current_location = obj
for token in path:
if isinstance(current_location, dict) and token in current_location:
current_location = current_location[token]
elif isinstance(current_location, list) and token < len(current_location):
current_location = current_location[token]
else:
return ""
return current_location |
def equal_weight(x, y):
"""Return integer one (dummy method for equally weighting)."""
# everything gets 1 vote
return 1 |
def _acceptance_tolerance_band(
upper_acceptance_limit,
lower_acceptance_limit,
gradient_upper_acceptance_limit,
gradient_lower_acceptance_limit,
delta_product_change_reversal_point,
max_31_0,
min_31_0,
):
"""
inner loop of acceptance_tolerance_band
:param upper_acceptance_limit:
:param lower_acceptance_limit:
:param gradient_upper_acceptance_limit:
:param gradient_lower_acceptance_limit:
:param delta_product_change_reversal_point:
:param max_31_0:
:param min_31_0:
:return:
"""
# calculate upper acceptance limit prerequisites:
# maximum out of all setpoints between (t-31) -> (t) and
# the upper acceptance limit of one time step before (t-1)-upper gradient at t
for i in range(len(upper_acceptance_limit)):
# calculate upper acceptance limit prerequisites
max_upper_acceptance_limit = max(
max_31_0[i],
(upper_acceptance_limit[i - 1] - gradient_upper_acceptance_limit[i]),
)
# calculate lower acceptance limit prerequisite:
# minimum out of all setpoints between (t-31) -> (t) and
# the lower acceptance limit of one time step before (t-1)-lower gradient at t
min_lower_acceptance_limit = min(
min_31_0[i],
(lower_acceptance_limit[i - 1] + gradient_lower_acceptance_limit[i]),
)
# calculate upper acceptance limit (formula 3)
# if t_productchange > t > t_reversalpoint
upper_acceptance_limit[i] = (
max_upper_acceptance_limit
if delta_product_change_reversal_point[i] == 0 or delta_product_change_reversal_point[i + 1] == 0
# else (t_productchange <= t <= t_reversalpoint)
else max(max_upper_acceptance_limit, 0)
)
# calculate lower acceptance limit (formula 4)
lower_acceptance_limit[i] = (
min_lower_acceptance_limit
if delta_product_change_reversal_point[i] == 0 or delta_product_change_reversal_point[i + 1] == 0
else min(min_lower_acceptance_limit, 0)
)
return (
upper_acceptance_limit,
lower_acceptance_limit,
gradient_upper_acceptance_limit,
gradient_lower_acceptance_limit,
) |
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps) |
def build_select_unnamed(table, to_select, where, join='AND'):
"""
Build an select request with multiple parameters.
Parameters
----------
table : str
Table where query will be directed.
to_set : iterable
The list of columns to select
where : iterable
The list of conditions to constrain the query.
join : str
The joining clause of the parameters.
Returns
-------
str
Built query.
"""
sql_q = "SELECT "
sql_q += ', '.join('{0}'.format(w) for w in to_select)
sql_q += ' FROM \"' + table + '\"'
if len(where) > 0:
sql_q += ' WHERE '
sql_q += (' ' + join + ' ').join('{0} = ?'.format(w) for w in where)
return sql_q |
def break_long_string(string: str, max_len: int = 70, indent: int = 0) -> str:
"""Break long string into shorter strings of max_len (with indent added)"""
import numpy as np
string = " ".join(
string.split(" ")
) # convert multiple consecutive white spaces to single
content = string.split(" ")
str_len = [len(c) + 1 for c in content] # +1 for whitespace
cum_len = np.cumsum(str_len)
block_idx = np.floor(cum_len / max_len).astype(int)
N_blocks = max(block_idx) + 1
content = np.array(content, dtype=str)
output = ""
for b in range(N_blocks):
(idx,) = np.where(block_idx == b)
new_line = " " * indent + " ".join(content[idx].tolist()) + "\n"
output += new_line
output.rstrip("\n") # remove last newline
return output |
def highlight_example(text, highlighted):
"""'Highlights' ALL the highlighted parts of the word usage example with * characters.
Args:
text: The text of the example
highlighted: Indexes of the highlighted parts' indexes
Returns:
The highlighted word usage example
"""
def insert_char(string, index, char):
"""Inserts the given character into a string.
Example:
string = "abc"
index = 1
char = "+"
Returns: "a+bc"
Args:
string: Given string
index: Index where to insert
char: Which char to insert
Return:
String string with character char inserted at index index.
"""
return string[:index] + char + string[index:]
def highlight_once(string, start, end, shift):
"""'Highlights' ONE highlighted part of the word usage example with two * characters.
Example:
string = "This is a sample string"
start = 0
end = 4
shift = 0
Returns: "*This* is a sample string"
Args:
string: The string to be highlighted
start: The start index of the highlighted part
end: The end index of the highlighted part
shift: How many highlighting chars were already inserted (to get right indexes)
Returns:
The highlighted string.
"""
s = insert_char(string, start + shift, "*")
s = insert_char(s, end + shift + 1, "*")
return s
shift = 0
for start, end in highlighted:
text = highlight_once(text, start, end, shift)
shift += 2
return text |
def _rzfill(string, to_len):
"""right-pad a string with zeros to the given length"""
if len(string) > to_len:
raise ValueError("string is already longer than to_len")
return string + '0' * (to_len - len(string)) |
def alcohol_by_volume_alternative(og, fg):
"""
Alcohol by Volume Alternative Calculation
:param float og: Original Gravity
:param float fg: Final Gravity
:return: Alcohol by Volume decimal percentage
:rtype: float
Alternate Formula:
A more complex equation which attempts to provide greater accuracy at higher gravities is:
:math:`\\text{ABV} = \\frac{76.08 \\times \\big( \\text{og} - \\text{fg} \\big)}{1.775 - \\text{og}} \\times \\frac{\\text{fg}}{0.794}`
This comes from Balling's famous formula, where the Original Extract and
Real Extract values have been converted using the simple Plato to SG
equation, giving Alcohol by Weight. This is then converted to Alcohol
by Volume multiplying by the ratio of Final Gravity to Density of Ethanol.
The alternate equation reports a higher ABV for higher gravity beers.
This equation is just a different take on it. Scientists rarely agree
when it comes to equations. There will probably be another equation for
ABV down the road.
The complex formula, and variations on it come from:
* Ritchie Products Ltd, (Zymurgy, Summer 1995, vol. 18, no. 2)
* Michael L. Hall's article Brew by the Numbers: Add Up What's in Your Beer, and Designing Great Beers by Daniels.
Source:
* http://www.brewersfriend.com/2011/06/16/alcohol-by-volume-calculator-updated/
""" # noqa
# Density listed (possibly incorrectly) from Zymergy Mag
DENSITY_ETHANOL = 0.794
return (76.08 * (og - fg) / (1.775 - og)) * (fg / DENSITY_ETHANOL) / 100.0 |
def _run(test, num_samples=None, num_iters=None, verbose=None,
measure_memory=False):
"""Helper function that constructs tuple with arguments for run method."""
return (
test, num_samples, num_iters, verbose, measure_memory) |
def string_empty(string: str) -> bool:
"""Return True if the input string is None or whitespace."""
return (string is None) or not (string and string.strip()) |
def prefix_match(query, db):
"""Naive implementation of "is seq prefix of one in expecteds or vice versa"."""
for entry in db:
min_len = min(len(query), len(entry))
if query[:min_len] == entry[:min_len]:
return True
return False |
def fib_fast(n):
"""Source: http://www.nayuki.io/res/fast-fibonacci-algorithms/fastfibonacci.py
Further reading: http://www.nayuki.io/page/fast-fibonacci-algorithms
"""
if n == 0:
return (0, 1)
else:
a, b = fib_fast(n / 2)
c = a * (b * 2 - a)
d = a * a + b * b
if n % 2 == 0:
return (c, d)
else:
return (d, c + d) |
def complete_url(string):
"""Return complete url"""
return "http://www.bhinneka.com" + string |
def _get_boundary(vs, boundary_string):
""" Return slice representing boundary
Parameters
----------
boundary_string
Identifer for boundary. May take one of the following values:
SURFACE: [:, :, -1] only the top layer
BOTTOM: bottom_mask as set by veros
else: [:, :, :] everything
"""
if boundary_string == 'SURFACE':
return tuple([slice(None, None, None), slice(None, None, None), -1])
if boundary_string == 'BOTTOM':
return vs.bottom_mask
return tuple([slice(None, None, None)] * 3) |
def batch_statistics(batch):
"""
Computes the total number of task and the number of labeled tasks in a batch.
Args:
batch: identifier of a batch
Returns: total number of tasks in batch, number of labeled tasks in batch
"""
lc = 0
for task in batch["tasks"]:
if task["is_labeled"]:
lc += 1
return len(batch["tasks"]), lc |
def get_player(equipment, hit_points):
"""Get player based on equipment and hit_points."""
values = {'Damage': 0, 'Armor': 0}
for _, attributes in equipment:
for attribute in ('Damage', 'Armor'):
values[attribute] += attributes[attribute]
return {'Hit Points': hit_points,
'Damage': values['Damage'],
'Armor': values['Armor']} |
def list_union(l1,l2):
"""Return union of elements in two lists"""
return list(set().union(l1,l2)) |
def qmap(f, q):
"""
Apply `f` post-order to all sub-terms in query term `q`.
"""
if hasattr(q, '_fields'):
attrs = []
for field in q._fields:
attr = getattr(q, field)
attrs.append(qmap(f, attr))
cls = type(q)
obj = cls(*attrs)
return f(obj)
elif isinstance(q, (list, tuple)):
cls = type(q)
return cls(qmap(f, x) for x in q)
return f(q) |
def list_append_all_newline(list_item: list) -> list:
"""
Append a newline character to every list_item in list object.
:param list_item: A list object to append newlines to.
:return list: A list object with newlines appended.
"""
return list(map(lambda x: f'{x}\n', list_item)) |
def extract_pairs_from_lines(lines):
"""Extract pairs from raw lines."""
collected_pairs = []
for i in range(len(lines) - 1):
first_line = lines[i].strip()
second_line = lines[i+1].strip()
if first_line and second_line:
collected_pairs.append([first_line, second_line])
return collected_pairs |
def get_compression_effort(p_in: int, p_out: int, flow_rate: int) -> float:
"""Calculate the required electricity consumption from the compressor given
an inlet and outlet pressure and a flow rate for hydrogen."""
# result is shaft power [kW] and compressor size [kW]
# flow_rate = mass flow rate (kg/day)
# p_in = input pressure (bar)
# p_out = output pressure (bar)
Z_factor = 1.03198 # the hydrogen compressibility factor
N_stages = 2 # the number of compressor stages (assumed to be 2 for this work)
t_inlet = 310.95 # K the inlet temperature of the compressor
y_ratio = 1.4 # the ratio of specific heats
M_h2 = 2.15 # g/mol the molecular mass of hydrogen
eff_comp = 0.75 # %
R_constant = 8.314 # J/(mol*K)
part_1 = (
(flow_rate * (1 / (24 * 3600)))
* ((Z_factor * t_inlet * R_constant) / (M_h2 * eff_comp))
* ((N_stages * y_ratio / (y_ratio - 1)))
)
part_2 = ((p_out / p_in) ** ((y_ratio - 1) / (N_stages * y_ratio))) - 1
power_req = part_1 * part_2
return power_req * 24 / flow_rate |
def sort_dict(in_struct):
"""Recursively sort a dictionary by dictionary keys. (saves WS the trouble)"""
if isinstance(in_struct, dict):
return {k: sort_dict(in_struct[k]) for k in sorted(in_struct)}
elif isinstance(in_struct, list):
return [sort_dict(k) for k in in_struct]
# return [sort_dict(k) for k in sorted(in_struct)]
else:
return in_struct |
def get_neighbors_and_bond_types(atom_idx, list_of_bonds, atomic_symbols, bond_types):
""" Returns the bonds for the current atom. """
if not len(list_of_bonds) == len(bond_types):
raise ValueError(('list_of_bonds(%d) and bond_types(%d) should be of the same ' +
'length.')%(len(list_of_bonds), len(bond_types)))
ret = []
for bond, btype in zip(list_of_bonds, bond_types):
if bond[0] == atom_idx:
ret.append((atomic_symbols[bond[1]], btype))
elif bond[1] == atom_idx:
ret.append((atomic_symbols[bond[0]], btype))
return ret |
def octetstr_2_string(bytes_string):
"""Convert SNMP OCTETSTR to string.
Args:
bytes_string: Binary value to convert
Returns:
result: String equivalent of bytes_string
"""
# Initialize key variables
octet_string = bytes_string.decode('utf-8')
# Convert and return
result = ''.join(
['%0.2x' % ord(_) for _ in octet_string])
return result.lower() |
def motion_dollar(input_line, cur, count):
"""Go to end of line and return position.
See Also:
`motion_base()`.
"""
pos = len(input_line)
return pos, False, False |
def listify(x):
"""Turn argument into a list.
This is a convenience function that allows strings
to be used as a shorthand for [string] in some arguments.
Returns None for None.
Returns a list for a list or tuple.
Returns [x] for anything else.
:param x: value to be listified.
"""
if x is None:
return None
elif isinstance(x, (list, tuple)):
return x
else:
return [x] |
def has_colours(stream):
"""Check if terminal supports colors."""
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# guess false in case of error
return False |
def shift_to_the_left(array, dist, pad=True, trim=True):
"""Shift array to the left.
:param array: An iterable object.
:type array: iterable object
:param dist: how far you want to shift
:type disk: int
:param pad: pad array[-1] to the right.
:type pad: boolean (default True)
:param trim: trim the first ``#dist`` items.
:type trim: boolean (default True)
Usage::
>>> array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=True)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=False)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=False, trim=True)
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> shift_to_the_left(self.iterable_list, 1, pad=True, trim=True)
Warning, with pad=False and trim=False, no change applied.
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
if dist < 0:
raise ValueError("Shift distance has to greater or equal than 0.")
if pad:
if trim:
new_array = array[dist:] + [array[-1]] * dist
else:
new_array = array + [array[-1]] * dist
else:
if trim:
new_array = array[dist:]
else:
print("Warning, with pad=False and trim=False, no change applied.")
new_array = list(array)
return new_array |
def generate_primes(n: int):
"""
We iterate until we found the right length by checking if remainders with current primes are !=0
"""
res = [2]
length = 1
curr = 3
while length < n:
# Implicit int -> bool there + using the all function.
if all([curr%e for e in res]):
res.append(curr)
length+=1
curr+=2
return res |
def sortsplit(array, size):
"""Returns array split into different lists in round-robin order.
A fun way to split up a list in round-robin order into separate lists.
Args:
array : list
List of items to be split into separate lists.
size : type
Number of lists in which array will be split into.
Returns:
train : list
A list of lists, each a piece of the provided array
"""
# Have some fun thinking it through! A pen and paper might help :)
return [[array[i] for i in range(j, len(array), size)]
for j in range(size)] |
def bubble_sort(A, show_progress=False):
"""
Bubble Sort is the simplest sorting algorithm that works by repeatedly
swapping the adjacent elements if they are in wrong order.
"""
for i in range(len(A) - 1):
swapped = False
for j in range(0, len(A) - i - 1):
if A[j] > A[j+1]:
if show_progress:
print(A)
A[j], A[j+1] = A[j+1], A[j]
swapped = True
if not swapped:
break
if show_progress:
print(A)
return A |
def find_min(list_, i):
"""
Return the index of the smallest item in list_[i:].
@param list list_: list to search
@param int i: index to search from
@rtype: int
>>> find_min([1, 2, 3], 1)
1
"""
smallest = i
# for j in range(i + 1, len(list_)):
list_len = len(list_)
for j in range(i + 1, list_len):
if list_[j] < list_[smallest]:
smallest = j
return smallest |
def u_to_l_ratio(data):
"""
:param data_rdd: wikipedia content to be pre-processed
:return: upper to lower case transformed content
"""
chars = list(data)
upper_count = sum([char.isupper() for char in chars])
lower_count = sum([char.islower() for char in chars])
return round((1 + (upper_count)) / (1 + (lower_count)), 4) |
def zero_one_loss(f_x,y_true):
"""
Compute the zero-one loss given the returned value f_x from
a linear discrimination function on the feature x and its label y
"""
if f_x*y_true>=0:
return 0
else:
return 1 |
def dms(dec):
"""converts decimal degree coordinates to a usgs station id
:param dec: latitude or longitude value in decimal degrees
:return: usgs id value
.. note:: https://help.waterdata.usgs.gov/faq/sites/do-station-numbers-have-any-particular-meaning
"""
DD = str(int(abs(dec)))
MM = str(int((abs(dec) - int(DD)) * 60)).zfill(2)
SS = str(int(round((((abs(dec) - int(DD)) * 60) - int(MM)) * 60, 0))).zfill(2)
if SS == '60':
MM = str(int(MM) + 1)
SS = '00'
if MM == '60':
DD = str(int(DD) + 1)
MM = '00'
return DD + MM + SS |
def get_events_per_second_api(replay, mods):
"""Gets coordinates and key pressed per second for API"""
events = []
time = 0
replay_events = replay.split(",")
for event in replay_events:
values = event.split("|")
try:
time += float(values[0])
except ValueError:
continue
if 1000*len(events) <= time:
new_y = float(values[2]) if "HR" not in mods else 384-float(values[2])
events.append([float(values[1]), new_y, float(values[3])])
return events |
def set_config_paths(config, private_config, builder_path, builder_private_path):
"""
hook up the actual location of the config files, based on our continuum config
"""
new_config = {}
new_private_config = {}
for key in config.keys():
new_config[key] = builder_path + "/" + config[key]
for key in private_config.keys():
new_private_config[key] = builder_private_path + "/" + private_config[key]
return new_config, new_private_config |
def sort_process_stats_rows(process_stats, column, top, reverse=True):
"""
Sorts process statistics by specified column.
Args:
process_stats: A list of process statistics.
column: An int representing a column number the list should be sorted by.
top: An int representing a process count to be printed.
reverse: A boolean to reverse or not reverse sorted list.
Returns:
A list of top processes sorted by specified column.
"""
if not top:
top = len(process_stats)
return sorted(process_stats, key=lambda p: p[column], reverse=reverse)[:top] |
def findRadius(houses, heaters):
"""
:type houses: List[int]
:type heaters: List[int]
:rtype: int
"""
import bisect
heaters.sort()
dis=[]
for h in houses:
i = bisect.bisect(heaters, h)
if i==0:
dis.append(heaters[0]-h)
if i==len(heaters):
dis.append(heaters[-1]-h)
else:
disr = heaters[i]-h
disl = h-heaters[i-1]
dis.append(min(disr, disl))
return min(dis) |
def split_nonsyllabic_prob(string, onsets, codas):
"""
Guesses split between onset and coda in list with no found syllabic segments
Parameters
----------
string : iterable
the phones to search through
onsets : iterable
an iterable of possible onsets
codas : iterable
an iterable of possible codas
Returns
-------
int
best guess for the index in the string where the onset ends and coda begins
"""
if len(string) == 0:
return None
max_prob = -10000
best = None
for i in range(len(string) + 1):
prob = 0
ons = tuple(string[:i])
cod = tuple(string[i:])
if ons not in onsets:
prob += onsets[None]
else:
prob += onsets[ons]
if cod not in codas:
prob += codas[None]
else:
prob += codas[cod]
if prob > max_prob:
max_prob = prob
best = i
return best |
def strip_www_from_domain(domain):
"""Strip www. from beginning of domain names.
Args:
domain: string with a full domain, eg. www.google.com
Returns:
string: Domain without any www, eg: google.com
"""
if domain.startswith('www.'):
return domain[4:]
return domain |
def get_folder(experiment_id: int):
"""Get the folder-name based on the experiment's ID."""
assert type(experiment_id) == int
return f"experiment{experiment_id}" |
def defuzz_data(dataset, change, cmpr):
""" Remove crap entries according to cmpr's return """
index = 1
while index < (len(dataset) - 1):
pre = dataset[index - 1]
cur = dataset[index]
post = dataset[index + 1]
if cmpr(change, pre, cur, post):
del dataset[index]
if index > 6:
index = index - 5
else:
index = 1
index += 1
return dataset |
def anagram_prime(s1, s2):
"""Write a method to decide if two strings are anagrams or not."""
# O(n) time, O(1) space
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101]
product1 = 1
for char1 in s1:
product1 *= primes[ord(char1) - 97]
product2 = 1
for char2 in s2:
product2 *= primes[ord(char2) - 97]
return product1 == product2 |
def _death_birth_i(state_old, state_new):
"""
Parameters
----------
state_old : dict or pd.Series
Dictionary or pd.Series with the keys "s", "i", and "r".
state_new : dict or pd.Series
Same type requirements as for the `state_old` argument in this function
apply.
Returns
-------
death_birth : bool
True if the event that occurred between `state_old` and `state_new` was
a death of an infected individual and a birth of a new susceptible
individual. False otherwise.
"""
return state_new["s"] == state_old["s"] + 1 and \
state_new["i"] == state_old["i"] - 1 and \
state_new["r"] == state_old["r"] |
def text_string(value, encoding='utf-8'):
"""Convert a string, which can either be bytes or unicode, to
unicode.
Text (unicode) is left untouched; bytes are decoded. This is useful
to convert from a "native string" (bytes on Python 2, str on Python
3) to a consistently unicode value.
"""
if isinstance(value, bytes):
return value.decode(encoding)
return value |
def flatten(*args):
"""Recursively flattens a list containing other lists or
single items into a list.
Examples::
>>> flatten()
[]
>>> flatten(2)
[2]
>>> flatten(2, 3, 4)
[2, 3, 4]
>>> flatten([2, 3, 4])
[2, 3, 4]
>>> flatten([[2, 3], [4, 5], 6, [7, 8]])
[2, 3, 4, 5, 6, 7, 8]
"""
if len(args) == 0:
return []
if len(args) > 1:
return flatten(list(args))
if hasattr(args[0], "__iter__") and not isinstance(args[0], str):
return sum(list(map(flatten, args[0])), [])
return list(args) |
def exp_len(exp):
"""Given expresion in tuple format, returns expression length"""
if type(exp) == str:
return 1
else:
return sum(exp_len(x) for x in exp) |
def _merge_dicts(user, default):
"""Merge corpus config with default config, letting user values override default values."""
if isinstance(user, dict) and isinstance(default, dict):
for k, v in default.items():
if k not in user:
user[k] = v
else:
user[k] = _merge_dicts(user[k], v)
return user |
def startstrip(string: str, part: str) -> str:
"""
Remove ``part`` from beginning of ``string`` if ``string`` startswith ``part``.
Args:
string (str): source string.
part (str): removing part.
Returns:
str: removed part.
"""
if string.startswith(part):
return string[len(part):]
return string |
def prep_next_item_dataset(
k,
item_sequence,
user_negative_items,
candidate_sample_prob=None,
):
"""Converts raw user-item dataset to a dataset with a dictionary of features.
The produced dataset uses the kth item from the end as the next item to be
predicted and items before that as the input sequence. Typically, `k` will be
1 or 2 depending on the data split_type (user/step). When using validation and
split_type=step, `k` will 3 for training data, 2 for validation data and 1 for
testing data.
Args:
k: Integer index denoting the kth item from the end.
item_sequence: The complete user item history.
user_negative_items: Negative item samples used for evaluation. See
evaluation in the proposal doc
(http://shortn/_PO6OdvUuAs#bookmark=id.4dm5yebd3f21) for details.
candidate_sample_prob: The candidate sampling probability. This is used for
correcting the sample bias of in-batch negatives.
Returns:
A dictionary containing the following features in the dataset:
user_item_sequence: List of user item sequences.
next_item: List of next items for each user.
user_negative_items: List of negative items for each user. This is used to
compute metrics for real datasets.
candidate_sample_prob: The candidate sampling probability.
"""
user_item_input = {
'user_item_sequence': item_sequence[:-k],
'next_item': item_sequence[-k],
}
if user_negative_items is not None:
user_item_input['user_negative_items'] = user_negative_items
if candidate_sample_prob is not None:
user_item_input['candidate_sampling_probability'] = candidate_sample_prob
return user_item_input |
def chisquare(y1, y2, yerr):
"""
Figure out the chi square value for the model, given some data points,
model points, and errors
@params
y1 - data points
y2 - model points
yerr - data point errorbars
"""
chisq= 0.0
for i in range(len(y1)):
chisq = chisq + ((y1[i] - y2[i])**2)/(yerr[i]**2)
return chisq |
def _uuid_prefix(uuids, step=4, maxlen=32):
"""Get smallest multiple of `step` len prefix that gives unique values.
"""
full = set(uuids)
for n in range(step, maxlen, step):
prefixes = {u[:n] for u in uuids}
if len(prefixes) == len(full):
return n
return maxlen |
def to_float(s):
"""
Converts a percentage back into a float (essentially the inverse of to_percentage()
Parameters
----------
s: str
Returns
-------
float
Examples
--------
s = "33.33%"
--> 0.3333
"""
n = float(s[:-1])/100
return n |
def freq_by_date(d, time_frame, bin_size):
"""
Takes in a dictionary of novel objects mapped to relative frequencies, and
returns a dictionary with frequencies binned by decades into lists
List name is mapped to the list of frequencies
list names key:
date_to_1810 - publication dates before and not including 1810
date_x_to_y (by decade) - publication dates from x to y
Example: date_1810_to_1819 - publication dates from 1810 to 1819
date_1900_on - publication dates in 1900 and onward
:param d: dictionary
:param time_frame: tuple (int start year, int end year) for the range of dates to return frequencies
:param bin_size: int for the number of years represented in each list of frequencies
:return: dictionary {bin_start_year:[frequencies for documents in this bin of years]
>>> from gender_analysis import document
>>> from pathlib import Path
>>> from gender_analysis import common
>>> from gender_analysis.analysis.gender_frequency import freq_by_date
>>> novel_metadata = {'author': 'Austen, Jane', 'title': 'Persuasion', 'date': '1818',
... 'filename': 'austen_persuasion.txt', 'filepath': Path(common.TEST_DATA_PATH, 'sample_novels', 'texts', 'austen_persuasion.txt')}
>>> austen = document.Document(novel_metadata)
>>> novel_metadata = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter', 'date': '1900',
... 'filename': 'hawthorne_scarlet.txt', 'filepath': Path(common.TEST_DATA_PATH, 'sample_novels', 'texts', 'hawthorne_scarlet.txt')}
>>> scarlet = document.Document(novel_metadata)
>>> d = {scarlet:0.5, austen:0.3}
>>> freq_by_date(d, (1770, 1910), 10)
{1770: [], 1780: [], 1790: [], 1800: [], 1810: [0.3], 1820: [], 1830: [], 1840: [], 1850: [], 1860: [], 1870: [], 1880: [], 1890: [], 1900: [0.5]}
"""
data = {}
for bin_start_year in range(time_frame[0], time_frame[1], bin_size):
data[bin_start_year] = []
for k,v in d.items():
date = getattr(k, 'date', None)
if date is None:
continue
bin_year = ((date - time_frame[0]) // bin_size) * bin_size + time_frame[0]
data[bin_year].append(v)
return data |
def str_is_float(value):
"""Test if a string can be parsed into a float.
:returns: True or False
"""
try:
_ = float(value)
return True
except ValueError:
return False |
def unique(seq, idfun=repr):
"""
Returns a list of unique items in a sequence of items. There are lots of ways to
do this; here is one.
"""
seen = {}
return [seen.setdefault(idfun(e),e) for e in seq if idfun(e) not in seen] |
def tostring(array):
"""
1D array to string
"""
return ",".join([str(x) for x in array]) |
def update_topic(args_dict, topics, topic_data):
"""
Update topics with different kind of data
"""
updated_topics = {}
topic_dict = {key: value for key, value in topics.items()
if args_dict[topic_data[key]] is not None}
for key, value in topic_dict.items():
if value in updated_topics:
updated_topics[value].append(key)
else:
updated_topics[value] = [key]
return updated_topics |
def replace_empty_string(sample):
"""replace empty strings with a non empty string to force
type guessing to use string"""
def replace(cell):
if cell.value == '':
cell.value = 'empty_string'
return cell
return [[replace(cell) for cell in row] for row in sample] |
def get_url(entry):
""" Return URL from response if it was received otherwise requested URL. """
try:
return entry["response"]["url"]
except KeyError:
return entry["request"]["url"] |
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return username == 'guest' and password == 'password' |
def gen_Location(coordinate_str):
"""Generate a Location object."""
Area = {
"@type": "Location",
"Location": coordinate_str
}
return Area |
def find_domain_range(record):
"""Find domain and range info of a record in schema"""
response = {"domain": [], "range": []}
if "http://schema.org/domainIncludes" in record:
if isinstance(record["http://schema.org/domainIncludes"], dict):
response["domain"] = [record["http://schema.org/domainIncludes"]["@id"]]
elif isinstance(record["http://schema.org/domainIncludes"], list):
response["domain"] = [_item["@id"] for _item in record["http://schema.org/domainIncludes"]]
if "http://schema.org/rangeIncludes" in record:
if isinstance(record["http://schema.org/rangeIncludes"], dict):
response["range"] = [record["http://schema.org/rangeIncludes"]["@id"]]
elif isinstance(record["http://schema.org/rangeIncludes"], list):
response["range"] = [_item["@id"] for _item in record["http://schema.org/rangeIncludes"]]
return (response["domain"], response["range"]) |
def c2_normal_to_sub_bn(key, model_keys):
"""
Convert BN parameters to Sub-BN parameters if model contains Sub-BNs.
Args:
key (OrderedDict): source dict of parameters.
mdoel_key (OrderedDict): target dict of parameters.
Returns:
new_sd (OrderedDict): converted dict of parameters.
"""
if "bn.running_" in key:
if key in model_keys:
return key
new_key = key.replace("bn.running_", "bn.split_bn.running_")
if new_key in model_keys:
return new_key
else:
return key |
def assert_extension(path, extension):
"""
Assert that a path is ends with an extension, or add it to the result path.
:param path: the path to be checked.
:param extension: the extension the path should ends with.
:return: the result path, None if input is None.
"""
if path is None:
return path
if not path.endswith(extension):
return path + extension
return path |
def nextInteger(user_list):
"""
"""
# Min value of item
if any(x < -1000000 for x in user_list):
raise Exception('Items exceeds the minimum integer size of -1,000,000.')
sys.exit()
# Max value of item
if any(x > 1000000 for x in user_list):
raise Exception('Items exceeds the maximum integer size of 1,000,000.')
sys.exit()
# User string must be between i = 1 =< 100,000
if len(user_list) > 100000:
raise Exception('Items exceeds the maximum allowed length of 100,000.')
sys.exit()
# If all digits of a[n] are negative, return 1
if all(x < 0 for x in user_list):
return 1
sys.exit() |
def strip_and_split(line):
"""
Helper function which saves a few lines of code elsewhere
:param line:
:return:
"""
line = line.strip().split()
stripped_line = [subline.strip() for subline in line]
return stripped_line |
def seconds_to_timestamp(seconds):
"""
Convert from seconds to a timestamp
"""
minutes, seconds = divmod(float(seconds), 60)
hours, minutes = divmod(minutes, 60)
return "%02d:%02d:%06.3f" % (hours, minutes, seconds) |
def _p(pp, name):
"""
make prefix-appended name
:param pp: prefix
:param name: name
:return: pp_name
"""
return '%s_%s' % (pp, name) |
def get_participant_ids(slots):
"""Get the participant ids for a the set's slots
there should be two for singles and four for doubles"""
participant_ids = []
for slot in slots:
entrant = slot['entrant']
participant_ids.append(entrant['id'])
return participant_ids |
def calculate_president_bop(data, votes):
"""
A function for calculating the presidential balance-of-power.
"""
data['total'] += votes
majority = data['needed_for_majority'] - votes
if majority < 0:
majority = 0
data['needed_for_majority'] = majority
return data |
def double_quoted(keywords):
"""
Usage:
{% double_quoted str1 str2 as str_1_2 %}
"""
quoted = '"{}"'.format(keywords)
return quoted |
def precipitable_water(ta, ea):
"""
Estimate the precipitable water from Prata (1996) :cite:`Prata:1996`
"""
return 4650*ea/ta |
def compatible(cluster_a, value_a, cluster_b, value_b):
"""
Checks compatibility of clusters of variables.
Compatibility means that values agree on common
variables.
"""
for node in list(cluster_a):
position_a = cluster_a.index(node)
if node in list(cluster_b):
position_b = cluster_b.index(node)
if value_a[position_a] != value_b[position_b]:
return False
return True |
def get_attr(attrs, key):
""" Get the attribute that corresponds to the given key"""
path = key.split('.')
dict_ = attrs
for part in path:
if part.isdigit():
part = int(part)
# Let it raise the appropriate exception
dict_ = dict_[part]
return dict_ |
def mean_inplace(tensor_1, tensor_2):
"""
function for meaning softmax outputs from two networks,
Cuurently not able to use as inplace changes to the tensor
cause problems with backpropagation
:param tensor_1:
:param tensor_2:
:return:
"""
for i in range(len(tensor_1)):
for j in range(len(tensor_1[i])):
tensor_1[i][j] = (tensor_1[i][j] + tensor_2[i][j]) / 2
return tensor_1 |
def has_license(lines):
"""Check if first two lines contain a license header."""
if len(lines) < 2:
return False
return (
"SPDX-FileCopyrightText:" in lines[0] and "SPDX-License-Identifier:" in lines[1]
) |
def build_relabel_dict(x):
"""Relabel the input ids to continuous ids that starts from zero.
The new id follows the order of the given node id list.
Parameters
----------
x : list
The input ids.
Returns
-------
relabel_dict : dict
Dict from old id to new id.
"""
relabel_dict = {}
for i, v in enumerate(x):
relabel_dict[v] = i
return relabel_dict |
def list_to_dict(node_list: list):
"""
Convert the list to a dictionary,
Create a dictionary, key is a element in the list,
value is a element which next the key, if no exists, the value is None.
Args:
node_list (list): normal, list of node
Returns:
schema (dict): a dictionary
"""
length = len(node_list)
if length == 0:
schema = {}
elif length == 1:
schema = {node_list[0]: None}
else:
new_node_list = node_list[1:]
new_node_list.append(None)
schema = {k: v for k, v in zip(node_list, new_node_list)}
return schema |
def get_game_ids(cursor, tournament=None, patch=None):
"""
get_game_ids queries the connected db for game ids which match the
input tournament and patch strings.
Args:
cursor (sqlite cursor): cursor used to execute commmands
tournament (string, optional): id string for tournament (ie "2017/EU/Summer_Split")
patch (string, optional): id string for patch to filter for
Returns:
gameIds (list(int)): list of gameIds
"""
if not patch and not tournament:
return []
params = ()
where_clause = []
if tournament:
where_clause.append("tournament=?")
params += (tournament,)
if patch:
where_clause.append("patch=?")
params += (patch,)
query = "SELECT id FROM game WHERE {where_clause} ORDER BY id".format(where_clause=" AND ".join(where_clause))
cursor.execute(query, params)
response = cursor.fetchall()
vals = []
for r in response:
vals.append(r[0])
return vals |
def checkAnchorOverlap(xa, xb, ya, yb):
"""
check the overlap of a region for the same chromosome
"""
if (ya <= xa <= yb) or (ya <= xb <= yb) or (ya <= xa <= xb <= yb):
return True
if (xa <= ya <= xb) or (xa <= yb <= xb) or (xa <= ya <= yb <= xb):
return True
return False |
def dirname(path):
"""
Returns path without the last component, like a directory name in a filesystem path.
"""
return path[:-1] |
def find_ch_interest_dict(show_channel_dict : dict, usr_pref_dict : dict):
"""Pass in show_channel_dict {show:channels} and usr_pref_dict {show: rating}. Returns dictionary {channel : total rating}"""
ch_interest_dict = {}
for show in usr_pref_dict:
if show in show_channel_dict:
if show_channel_dict[show] in ch_interest_dict:
ch_interest_dict[show_channel_dict[show]] += usr_pref_dict[show]
else:
ch_interest_dict[show_channel_dict[show]] = usr_pref_dict[show]
return ch_interest_dict |
def nf_type(cwl_type):
"""Convert a CWL variable type into Nextflow.
"""
if "File" in cwl_type:
return "file"
else:
return "var" |
def follow_abspath(json, abspath):
"""json: an arbitrarily nested python object, where each layer
is a list, dict, or tuple.
abspath: a list of keys or array indices to be followed.
**Returns:** the child of json[abspath[0]][abspath[1]]...[abspath[-1]]"""
out = json
for elt in abspath:
out = out[elt]
return out |
def programme_list_heading_should_be_rendered(programmes):
"""
Used by programme_schedule_list.pug alone.
A horribly convoluted way of hiding empty headings in the programme listings
such as the one below the programme schedule table.
The listing is implemented by using the same programme data as the table does.
Due to this, the data contains all kinds of elements unnecessary to the flat
listing:
[(start_time, incontinuity, programmes)]
where `programmes` is a list of (programme, rowspan).
The list of (programme, rowspan) contains (None, None) for each empty cell
in the schedule. This is why a simple `if programmes` will not do to hide
the heading.
The correct condition is `any(prog for (prog, rowspan) in programmes)` which,
unfortunately, Django templates will not let us embed in the template code
itself due to Hitler and the Nazis.
So we use a filter with `if`, computing the aforementioned condition
safely here in the Python code for the template to simply `if` on it.
"""
return any(prog for (prog, rowspan) in programmes) |
def split_sentence(corpus: list):
"""split corpus
:param corpus: list type sentence
:return: word_list: two-dimensional list
"""
word_list = list()
for i in range(len(corpus)):
word_list.append(corpus[i].split(' '))
return word_list |
def _getMeasurementType1_1(domains):
"""Determine whether measurement results are for web or mail"""
measurementType = 'web'
status = 'failed'
for testDomain in domains:
if (testDomain['status'] == 'ok'):
for category in testDomain['categories']:
if (category['category'] == 'auth'):
measurementType = 'mail'
break
return measurementType |
def fix_url(url):
"""prefix url without http://."""
if '://' not in url:
url = 'http://' + url
return url |
def remover_repetidos(tpl):
"""
Remove os elementos repetidos de um tuplo
Parametros:
tpl (tuplo): tuplo a remover os elementos repetidos
Retorna:
res (tpl): tuplo sem elementos repetidos
"""
res = ()
for el in tpl:
if el not in res:
res += (el,)
return res |
def delete_segment(seq, start, end):
"""Return the sequence with deleted segment from ``start`` to ``end``."""
return seq[:start] + seq[end:] |
def get_currency_to_uah_nbu_exchange_rate(
nbu_exchange_rates: list,
currency_code: str
) -> dict:
"""Fetch exchange rate of UAH to specific foreign currency from NBU
Note:
in case NBU returned several blocks of information about single
currency, method returns only single block of information.
Docs:
https://bank.gov.ua/ua/open-data/api-dev
Args:
nbu_exchange_rates: list - fetched list of currencies rates
currency_code: str - code of the currency in upper case, according to ISO 4217
Returns:
dict - dictionary with exchange rate information for specific currency
OR
dict - empty dictionary in case NBU did not return data for requested currency
"""
# Validate input
if len(nbu_exchange_rates) == 0:
raise ValueError('nbu_exchange_rates param is empty')
# Find out exchange rate for aspecified currency
specific_currency_information = [
item
for item in nbu_exchange_rates
if item['cc'] == currency_code
]
if len(specific_currency_information) >= 1:
return specific_currency_information[0]
else:
return {} |
def parse_scaling(scaling_args):
"""Translate a list of scaling requests to a dict prefix:count."""
scaling_args = scaling_args or []
result = {}
for item in scaling_args:
key, values = item.split('=')
values = values.split(',')
value = int(values[0])
blacklist = frozenset(int(v) for v in values[1:] if v)
result[key + '0'] = value, blacklist
return result |
def get_crashes_archive_name(cycle: int) -> str:
"""Return as crashes archive name given a cycle."""
return 'crashes-%04d.tar.gz' % cycle |
def get_screenres(fallback=(1920, 1080)):
"""
Return the resolution (width, height) of the screen in pixels.
If it can not be determined, assume 1920x1080.
See http://stackoverflow.com/a/3949983 for info.
"""
try:
import tkinter as tk
except ImportError:
return fallback
try:
root = tk.Tk()
except tk.TclError:
return fallback
(width, height) = (root.winfo_screenwidth(), root.winfo_screenheight())
root.destroy()
return (width, height) |
def get_speed(message_fields):
""" get_speed
return the speed as float in km/h from a message.as_dict()['fields'] object
Args:
message_fields: a message.as_dict()['fields'] object (with name 'record')
Returns:
the speed as float in km/h, or 0. if not found
"""
for message_field in message_fields:
try:
if message_field['name'] == 'speed':
return 3.6 * message_field['value']
except TypeError:
# NoneType from message_field['value']
pass
for message_field in message_fields:
if message_field['name'] == 'enhanced_speed':
try:
return 3.6 * message_field['value']
except TypeError:
# NoneType or something???
pass
return 0. |
def remove_anchor(url: str) -> str:
"""
Removes anchor from URL
:param url:
:return:
"""
anchor_pos = url.find('#')
return url[:anchor_pos] if anchor_pos > 0 else url |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.