content stringlengths 42 6.51k |
|---|
def dict_value_add(dict1, dict2):
"""Add values with same keys from two dictionaries."""
result = {key: dict1.get(key, 0) + dict2.get(key, 0)
for key in set(dict1) | set(dict2)}
'''
# This has an issue of only keeping track of >0 values!
from collections import Counter
result = dict(Counter(dict1) + Counter(dict2))
'''
return result |
def invite_more_women(arr: list) -> bool:
"""
Arthur wants to make sure that there are at least as
many women as men at this year's party. He gave you
a list of integers of all the party goers.
Arthur needs you to return true if he needs to invite
more women or false if he is all set.
An array representing the genders of the attendees,
where -1 represents women and 1 represents men.
:param arr:
:return:
"""
if not arr or arr == []:
return False
if sum(arr) <= 0:
return False
return True |
def ClockUsToTimestamp(clock_us, reference_clock_us, reference_timestamp):
"""Converts a reported clock measurement (in us) to a timestamp.
Args:
clock_us: Measured clock [us].
reference_clock_us: Measured clock at a reference moment [us].
reference_timestamp: Seconds after 1/1/1970 at the reference moment.
Returns:
Timestamp corresponding to clock_us in seconds after 1/1/1970.
"""
return reference_timestamp + (clock_us - reference_clock_us) / 1.0e6 |
def chunkify(sequence, chunk_size):
"""Utility method to split a sequence into fixed size chunks."""
return [sequence[i: i + chunk_size] for i in range(0, len(sequence), chunk_size)] |
def get_labels(voice_list, face_list):
""" Take intersection between VoxCeleb1 and VGGFace1,
and reorder pair with number starting from 0
:param voice_list:
:param face_list:
:return: x_dict format:
{ (int) label_id : {'filepath': (str) filepath, 'name': (str) celeb_name, 'label_id': (int) label_id},
...}
"""
voice_names = {item['name'] for item in voice_list}
face_names = {item['name'] for item in face_list}
names = voice_names & face_names # s.intersection(t) ==> s & t
voice_list = [item for item in voice_list if item['name'] in names]
face_list = [item for item in face_list if item['name'] in names]
names = sorted(list(names))
label_dict = dict(zip(names, range(len(names))))
voice_dict = {}
face_dict = {}
for item in voice_list:
identity = label_dict[item['name']]
item['label_id'] = identity
voice_dict[identity] = item
for item in face_list:
identity = label_dict[item['name']]
item['label_id'] = identity
face_dict[identity] = item
return voice_list, face_list, len(names), voice_dict, face_dict |
def is_blank(line):
"""
Returns true iff the line contains only whitespace.
"""
return line.strip() == "" |
def subfolders_in(whole_path):
"""
Returns all subfolders in a path, in order
>>> subfolders_in('/')
['/']
>>> subfolders_in('/this/is/a/path')
['/this', '/this/is', '/this/is/a', '/this/is/a/path']
>>> subfolders_in('this/is/a/path')
['this', 'this/is', 'this/is/a', 'this/is/a/path']
"""
path_fragments = whole_path.lstrip('/').split('/')
if whole_path.startswith('/'):
path_fragments[0] = '/' + path_fragments[0]
path = path_fragments[0]
subfolders = [path]
for fragment in path_fragments[1:]:
path += '/' + fragment
subfolders.append(path)
return subfolders |
def keyExtract(array, key):
"""Returns values of specific key from list of dicts.
Args:
array (list): List to be processed.
key (str): Key to extract.
Returns:
list: List of extracted values.
Example:
>>> keyExtract([
{'a': 0, ...}, {'a': 1, ...}, {'a': 2, ...}, ...
], 'a')
<<< [0, 1, 2]
"""
res = list()
for item in array:
res.append(item[key])
return res |
def get_expert_output(expert_preds, free_expert_vals):
"""
:param expert_preds: (B x nexperts x max_forecast_steps) or []
:param free_expert_vals: (nexperts x max_forecast_steps) -> (1 x nexperts x max_forecast_steps), or None
:return: expert_vals. Size: (* x nexperts x max_forecast_steps)
:meta private:
"""
if free_expert_vals is not None:
free_expert_vals = free_expert_vals.unsqueeze(0)
expert_vals = free_expert_vals
else:
expert_vals = expert_preds
return expert_vals |
def Get_nongap_uppstream(topo, begin_TM):#{{{
"""
Get the first non gap state uppstream
Input:
topo topology sequence of the protein
begin_TM sequence position at the beginning of the TM helix
(begin_TM, end_TM) defines the location of the TM helix
in the sequence
Output:
state non gap state, or '' if all gaps
"""
i = begin_TM
while i >= 0:
if topo[i] != '-':
return topo[i]
i -= 1
return '' |
def isdigit(char):
"""Return True iff char is a digit.
"""
return char.isdigit() |
def get_x_bits(num: int, max_bits: int, num_bits: int, right_bits: bool = True) -> int:
"""ensure the correct number of bits and pull the upper x bits"""
bits = bin(num).lstrip("0b")
bits = bits.zfill(max_bits)
if right_bits:
return int(bits[-num_bits:], 2)
return int(bits[:num_bits], 2) |
def convert_command(input_filename, output_filename,
vcodec='libx264'):
"""Convert to H.264 format"""
cmd = ['ffmpeg',
'-i', input_filename,
'-vcodec', vcodec,
output_filename,
'-loglevel', 'error']
return cmd |
def neg_poly(p):
"""Returns a negation of a polynomial"""
result = [m.copy() for m in p]
for m in result:
m.c = -m.c
return result |
def format_time(t):
"""Return human-readable interval of time.
Assumes t is in units of seconds.
"""
minutes = int(t / 60)
seconds = t % 60
r = ""
if minutes > 0:
r += "%d minute%s " % (minutes, "" if minutes == 1 else "s")
r += "%.3f seconds" % seconds
return r |
def _filter_relevant_datasets(datasets, load_columns):
"""
Filter datasets so only ones that actually load columns are left.
Parameters
----------
datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
Datasets to filter.
load_columns: Dict[str, Set[str]]
Columns to load.
Returns
-------
datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
Filtered datasets.
"""
which = set(load_columns.keys())
return {
ktk_cube_dataset_id: ds
for ktk_cube_dataset_id, ds in datasets.items()
if ktk_cube_dataset_id in which
} |
def extract_label(predictions):
"""
extract the predicted label without the following prediction
:param predictions: list of Strings / complete predicted output
:return: list of Strings / only labels
"""
# extract the predicted label without the following prediction
array = []
for pred in predictions:
if pred.startswith('correct'):
x = 'correct'
elif pred.startswith('incorrect'):
x = 'incorrect'
elif pred.startswith('partially correct'):
x = 'partially correct'
else:
x = 'wrong label'
array.append(x)
return array |
def rot(a, n):
""" Renvoie a <<< n. """
return a[n:] + a[:n] |
def sum_of_minimums(numbers: list) -> int:
"""
This function returns the sum of minimum value in each row.
"""
return sum([min(i) for i in numbers]) |
def convertVoltage(raw_voltage):
""" Ground is 1
1.8 is 4095
"""
converted_voltage = (raw_voltage/4095)*1.8
return "%.3f" % converted_voltage |
def null_condition_attribute(obj, attribute) :
"""
Return the value of the item with key equals to attribute.
Args:
obj (:obj:`dict`) : Dictionary object.
attribute (:obj:`str`) : Attribute name of obj.
Returns:
The value of the item.
If obj is None, return None.
"""
if (obj is None):
return None
else :
return getattr(obj, attribute) |
def getBoundsOverlap(bb1, bb2):
"""Returns the intersection of two bounding boxes"""
minX = max(bb1[0], bb2[0])
minY = max(bb1[1], bb2[1])
maxX = min(bb1[2], bb2[2])
maxY = min(bb1[3], bb2[3])
return (minX, minY, maxX, maxY) |
def compare_flavors(flavor_item):
"""
Helper function for sorting flavors.
Sorting order: Flavors with lower resources first.
Resource importance order: GPUs, CPUs, RAM, Disk, Ephemeral.
:param flavor_item:
:return:
"""
return (
flavor_item.get("Properties", {}).get("Accelerator:Number", "0"),
flavor_item.get("VCPUs"),
flavor_item.get("RAM"),
flavor_item.get("Disk"),
flavor_item.get("Ephemeral"),
) |
def get_item_properties(item, columns):
"""Get specified in columns properties, with preserved order.
Required for correct cli table generation
:param item: dict
:param columns: list with arbitrary keys
"""
properties = []
for key in columns:
properties.append(item.get(key, ''))
return properties |
def flatten_routes(routes):
"""Flatten the grouped routes into a single list of routes.
Arguments:
routes {list} -- This can be a multi dementional list which can flatten all lists into a single list.
Returns:
list -- Returns the flatten list.
"""
route_collection = []
for route in routes:
if isinstance(route, list):
for r in flatten_routes(route):
route_collection.append(r)
else:
route_collection.append(route)
return route_collection |
def utf8_bytes(text):
"""
Ensures that text becomes utf-8 bytes.
:param text: strings or bytes.
:return: a bytes object.
"""
if not isinstance(text, bytes):
return text.encode('utf-8')
return text |
def NPL_indicator (row):
"""
Determine the indicator of NPL as one of five indicators
"""
if row < 37:
return "Excellent"
elif row <= 48:
return "Good"
elif row < 61:
return "Fair"
elif row < 93:
return "Poor"
else:
return "Hazard" |
def collapse_sided_value(value):
"""Inverses `expand_sided_value`, returning
the most optimal form of four-sided value.
"""
if not isinstance(value, (tuple, list)):
return value
elif len(value) == 1:
return value[0]
elif len(value) == 2:
if value[0] == value[1]:
return value[0]
else:
return tuple(value)
elif len(value) == 3:
if value[0] == value[2]:
return collapse_sided_value(value[0:2])
else:
return tuple(value)
elif len(value) == 4:
if value[1] == value[3]:
return collapse_sided_value(value[0:3])
else:
return tuple(value)
else:
raise ValueError('Invalid expanded four-sided value: %r' % value) |
def serialize_uint256(n: int) -> bytes:
"""
Serialize an unsigned integer ``n`` as 32 bytes (256 bits) in big-endian
order.
Corresponds directly to the "ser_256(p)" function in BIP32
(https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions).
:param n: The integer to be serialized.
:return: A byte sequence containing the serialization of ``n``.
"""
return n.to_bytes(32, 'big') |
def brensenham_line(x, y, x2, y2):
"""Modified to draw hex sides in HexCheckImage.
Assumes dy > dx, x>x2 and y2>y which is always the case for what it's
being used for."""
coords = list()
dx = abs(x2 - x)
dy = abs(y2 - y)
d = (2 * dx) - dy
for i in range(0, dy):
coords.append((x, y))
while d >= 0:
x -= 1
d -= (2 * dy)
y += 1
d += (2 * dx)
coords.append((x2, y2))
return coords |
def get_volume_of_runoff(runoff, cell_count, cell_resolution):
"""
Calculate the volume of runoff over the entire modeled area
Args:
runoff (number): Q from TR55, averaged amount of runoff in inches
per cell over a number of cells.
cell_count (integer): The number of cells included in the area
cell_resolution (number): The size in square meters that a cell
represents
Returns:
The volume of runoff liters in of the total area of interest
"""
# Runoff is in inches, so convert to meters which is the units for the cell
# area and compute the meter-cells in the group. Multiply the resolution
# of the cell to get the runoff volume in cubic meters.
inch_to_meter = 0.0254
runoff_m = runoff * inch_to_meter
meter_cells = runoff_m * cell_count
volume_cubic_meters = meter_cells * cell_resolution
liters = volume_cubic_meters * 1000
return liters |
def compare_and_get_name(a, b):
"""
If both a & b have name attribute, and they are
same return the common name.
Else, return either one of the name of a or b,
whichever is present.
Parameters
----------
a : object
b : object
Returns
-------
name : str or None
"""
a_has = hasattr(a, "name")
b_has = hasattr(b, "name")
if a_has and b_has:
if a.name == b.name:
return a.name
else:
return None
elif a_has:
return a.name
elif b_has:
return b.name
return None |
def classify(op):
"""
Given an operation name, decide whether it is a constructor or
an annihilator. The convention is that constructors are operations
starting with 'c', and all other operations are annihilators.
>>> classify("c")
'C'
>>> classify("c2df")
'C'
>>> classify("a")
'A'
>>> classify("h")
'A'
"""
if op[0] is 'c':
return 'C'
else:
return 'A' |
def OverrideToImplementCustomLogic(obj):
"""Users should override this in their sub-classes to implement custom logic.
Used in Trainer and Policy to tag methods that need overriding, e.g.
`Policy.loss()`.
Examples:
>>> from ray.rllib.policy.torch_policy import TorchPolicy
>>> @overrides(TorchPolicy) # doctest: +SKIP
... @OverrideToImplementCustomLogic # doctest: +SKIP
... def loss(self, ...): # doctest: +SKIP
... # implement custom loss function here ...
... # ... w/o calling the corresponding `super().loss()` method.
... ... # doctest: +SKIP
"""
obj.__is_overriden__ = False
return obj |
def stream_copy(read, write, size, chunk_size):
"""
Copy a stream up to size bytes using the provided read and write methods,
in chunks of chunk_size
:note: its much like stream_copy utility, but operates just using methods"""
dbw = 0 # num data bytes written
# WRITE ALL DATA UP TO SIZE
while True:
cs = min(chunk_size, size-dbw)
# NOTE: not all write methods return the amount of written bytes, like
# mmap.write. Its bad, but we just deal with it ... perhaps its not
# even less efficient
# data_len = write(read(cs))
# dbw += data_len
data = read(cs)
data_len = len(data)
dbw += data_len
write(data)
if data_len < cs or dbw == size:
break
# END check for stream end
# END duplicate data
return dbw |
def identity(size):
"""
@brief Return Identity matrix.
"""
assert size > 0, "A size should be > 0"
M = [[0 for i in range(size)] for j in range(size)]
for i in range(size):
M[i][i] = 1
return M |
def Number_Pad(Number):
"""Format Dollars amounts to strings & Pad Right 10 Spaces"""
Number_Display = f"{Number:,}"
Number_Display = f"{Number_Display:>10}"
return Number_Display |
def humantime(timedelta):
"""Converts time durations to human readable time"""
seconds = int(timedelta)
years, seconds = divmod(seconds, (3600 * 24 * 365))
days, seconds = divmod(seconds, (3600 * 24))
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if years > 0:
return "%dy, %dd, %dh, %dm, %ds" % (years, days, hours, minutes,
seconds)
elif days > 0:
return "%dd, %dh, %dm, %ds" % (days, hours, minutes, seconds)
elif hours > 0:
return "%dh, %dm, %ds" % (hours, minutes, seconds)
elif minutes > 0:
return "%dm, %ds" % (minutes, seconds)
else:
return "%ds" % seconds |
def expand_places_from_index(placename_list, idx):
"""Searches up and down from the index, collecting places with the same name."""
if idx == -1:
return []
# Find and save the initial value
word = placename_list[idx][0]
matched_places = []
matched_places.append(placename_list[idx])
# Search forward in the list (downwards)
i = idx - 1
while placename_list[i][0] == word and i > 0:
matched_places.append(placename_list[i])
i -= 1
# Search backward in the list (upwards)
i = idx + 1
length = len(placename_list)
while placename_list[i][0] == word and i < length:
matched_places.append(placename_list[i])
i += 1
return matched_places |
def get_app_instances_ids(instances):
"""
Accepts a dictionary of id: AppInstance and returns a set of keys
"""
return set(instances.keys()) |
def parse_numbers(numbers, dtype=float, sep=','):
"""Return list of numbers from string of separated numbers."""
if not numbers:
return []
try:
return [dtype(i) for i in numbers.split(sep)]
except Exception as exc:
raise ValueError(f"not a '{sep}' separated list of numbers") from exc |
def to_string(todos):
"""Convert a list of todos to a string.
:param list todos: List of :class:`todotxtio.Todo` objects
:rtype: str
"""
return '\n'.join([str(todo) for todo in todos]) |
def cut_lines(info_str, start, end):
"""Cut a number of lines from the start and the end.
Args:
info_str (str): command output from arcconf
start (int): offset from start
end (int): offset from end
Returns:
str: cutted info_str
"""
return '\n'.join(info_str.split('\n')[start:end*-1]) |
def convertPressureToPascals(mpressure):
"""
Convert pressure given in kg/cm2 to Pascals
"""
ppas = 98066.5 * mpressure
return ppas |
def compute_xi_t(return_t, risk_free_rate, sigma_t):
"""
Compute innovation xi at time t as a function of return t, rf rate, and sigma t
"""
return return_t - risk_free_rate + 1/2 * sigma_t |
def sum_dicts_values(dict1, dict2):
"""Sums the value between tow dictionnaries (with the same keys) and return the sum"""
dict = {}
for k in dict1.keys():
try:
dict[k] = int(dict1[k]) + int(dict2[k])
except:
dict[k] = int(dict1[k])
return dict |
def merge(L1, L2):
"""(list, list) -> list
Merge sorted lists L1 and L2 into a new list and return that new list.
>>> merge([1, 3, 4, 6],[1, 2, 5, 7])
[1, 1, 2, 3, 4, 5, 6, 7]
"""
newL = []
i1 = 0
i2 = 0
while i1 != len(L1) and i2 != len(L2):
if L1[i1] <= L2[i2]:
newL.append(L1[i1])
i1 += 1
else:
newL.append(L2[i2])
i2 += 1
newL.extend(L1[i1:])
newL.extend(L2[i2:])
return newL |
def str_2_list(del_str, fld_del):
"""Function: str_2_list
Description: Converts a string delimited field to a list.
Arguments:
(input) del_str -> Delimited string.
(input) fld_del -> Field delimiter.
(output) List of values from the string.
"""
return del_str.split(fld_del) |
def jobparams_postfiltering(value, exclusions={}):
"""
Perform post-filtering of raw job parameters.
Any items in the optional exclusion list will be added (space separated) at the end of the job parameters.
:param value: job parameters (string).
:param optional exclusion: exclusion dictionary from pre-filtering function (dictinoary).
:return: updated job parameters (string).
"""
for item in exclusions:
value = value.replace(item, exclusions[item])
return value |
def normalize(name):
"""
Normalizes text from a Wikipedia title/segment by capitalizing the
first letter, replacing underscores with spaces, and collapsing all
spaces to one space.
:Parameters:
name : string
Namespace or title portion of a Wikipedia page name.
:Return:
string Normalized text
"""
return name.capitalize().replace("_", " ").strip() |
def scale_factor(redshift):
"""
Calculates the scale factor, a, at a given redshift.
a = (1 + z)**-1
Parameters
----------
redshift: array-like
The redshift values.
Returns
-------
a: array-like
The scale factor at the given redshift.
Examples
--------
>>> scale_factor(1)
0.5
>>> scale_factor(np.array([0, 1, 2, 3]))
array([1, 0.5, 0.3333333, 0.25])
"""
a = (1 + redshift)**-1.0
return a |
def trans_rot_affine(matrix, u_vec, v_vec):
"""
Args:
matrix (matrix): rotation matrix
u_vec (float): x coordinate
v_vec (float): y coordinate
Returns:
int: rotated x coordinate
Returns:
int: rotated y coordinate
"""
# rotation affine transformation
x_vec = u_vec * matrix[0][0] + v_vec * matrix[0][1] + matrix[0][2]
y_vec = u_vec * matrix[1][0] + v_vec * matrix[1][1] + matrix[1][2]
return int(x_vec), int(y_vec) |
def get_rundir_name(d):
"""
Helper method to construct the result sub-directory name based on the
experiment parameters
:param d: dictionary of experiment parameters
:return: string of the sub-directory name
"""
env_str = str(d['env.kwargs.env_name'])
lr_fl = float(d['algo.kwargs.optim_kwargs.lr'])
lr_str = str(lr_fl - int(lr_fl))[2:7]
if d['algo'] == 'lsf_dqn':
lamb_fl = float(d['algo.kwargs.sf_lambda'])
if lamb_fl < 1.0:
lamb_str = str(lamb_fl - int(lamb_fl))[2:4]
else:
lamb_str = '10'
else:
lamb_str = 'None'
seed_int = int(d['training.seed'])
seed_str = str(seed_int)
hydra_time_str = '${now:%Y%m%d%H%M%S}'
return f'{env_str}/lam{lamb_str}s{seed_str}_{hydra_time_str}' |
def extractPBestPos(particleList):
"""
Returns the pBestPos of all particles in particleList as a list.
Parameters:
particleList (list): A list of Particle objects.
Returns:
list: List of pBestPos of the Particle objects in particleList, in the same
order as the input.
"""
return [particle.pBestPos for particle in particleList] |
def to_camel_case(snake_case):
"""Makes a snake case string into a camel case one
Parameters
-----------
snake_case : str
Snake-cased string (e.g., "snake_cased") to be converted to camel-case (e.g., "camelCase")
"""
output_str = ''
should_upper_case = False
for c in snake_case:
if c == '_':
should_upper_case = True
continue
output_str = output_str + c.upper() if should_upper_case else output_str + c
should_upper_case = False
return output_str |
def _compute_colocation_summary_from_dict(name, colocation_dict, prefix=""):
"""Return a summary of an op's colocation stack.
Args:
name: The op name.
colocation_dict: The op._colocation_dict.
prefix: An optional string prefix used before each line of the multi-
line string returned by this function.
Returns:
A multi-line string similar to:
Node-device colocations active during op creation:
with tf.compat.v1.colocate_with(test_node_1): <test_1.py:27>
with tf.compat.v1.colocate_with(test_node_2): <test_2.py:38>
The first line will have no padding to its left by default. Subsequent
lines will have two spaces of left-padding. Use the prefix argument
to increase indentation.
"""
if not colocation_dict:
message = "No node-device colocations were active during op '%s' creation."
message %= name
return prefix + message
str_list = []
str_list.append("%sNode-device colocations active during op '%s' creation:" %
(prefix, name))
for coloc_name, location in colocation_dict.items():
location_summary = "<{file}:{line}>".format(
file=location.filename, line=location.lineno)
subs = {
"prefix": prefix,
"indent": " ",
"name": coloc_name,
"loc": location_summary,
}
str_list.append(
"{prefix}{indent}with tf.colocate_with({name}): {loc}".format(**subs))
return "\n".join(str_list) |
def image_round(image):
"""
:param image: a grayscale image represented as a list of list of floats
:return: corresponding image, represented as a list of lists of integers, obtained by rounding the floats in the
input image and taking their absolute values and replacing numbers greater than 255 with 255
>>> from resources.image import file2image, color2gray, image2display
>>> image = color2gray(file2image("../../resources/images/Dali.png"))
>>> dictdict = forward2d(image)
>>> # sparsity2d(dictdict) # before suppression it's ~.73
>>> dictdict_suppressed = suppress2d(dictdict, 1024)
>>> # sparsity2d(dictdict_suppressed) # ~.35 for threshold of 2
>>> image2display(image_round(backward2d(dictdict_suppressed)))
"""
# image2diplay(backward2d(forward2d(image))
# image2display(image_round(backward2d(forward2d(image))))
threshold = 255
return [[int(round(e)) if abs(int(round(e))) < threshold else threshold for e in image[n]] for n in range(len(image))] |
def transformDiffCost(criterion, frRow, exRow):
"""Returns the absolute difference between their image through the 'transform' dict, normalized"""
t = criterion['transform']
q = criterion['QLabel']
return abs(t[frRow[q]] -
t[exRow[q]]) / max(t.values()) |
def calculate_mean(instance_count: int, items: list) -> float:
"""
Calculate given class mean
:param instance_count: Number of instances in class
:param items: items that related to specific class(data grouping)
:return: calculated actual mean of considered class
"""
# the sum of all items divided by number of instances
return sum(items) / instance_count |
def make_feature_collection(data):
"""Return a feature collection."""
return {"type": "FeatureCollection", "features": data} |
def GetProblemIoSetByName(problem, io_set_name):
"""Get a problem's index given its key and a problem list.
Args:
problem: Problem whose I/O set must be retrieved.
io_set_name: String with the name of the I/O set to retrieve.
Returns:
The problem I/O set with the specified name, or None if no I/O set with that
name is found.
"""
io_set_index = problem['io_set_name_to_index'].get(io_set_name)
if io_set_index is None:
return None
return problem['io_sets'][io_set_index] |
def to_discord_description_safe(text: str) -> str:
"""Convert the given string to one that will be accepted by discord as a description
for a channel.
"""
return text[:1024] |
def format_instances(instances, features):
"""
Convert a list of instances into a header list and datarows list.
`header` is just `features` e.g. ['username', 'email']
`datarows` is a list of lists, each sublist representing a row in a table
e.g. [['username1', 'email1@email.com'], ['username2', 'email2@email.com']]
for `instances` of length 2.
`instances` is a list of instances, e.g. list of User's
`features` is a list of features
a feature is a string for which getattr(obj, feature) is valid
Returns header and datarows, formatted for input in create_csv_response
"""
header = features
datarows = [[getattr(x, f) for f in features] for x in instances]
return header, datarows |
def lin_comb(clist, vlist):
"""
Compute a linear combination
:param clist: X len list of scalars
:param vlist: X len list of vectors all of domain D
:return: D domain vector whose values are summation of clist * vlist
"""
return sum([s * v for s, v in zip(clist, vlist)]) |
def amigos(x,y):
"""
amigos(x: int ,y:int) -> Boleano
amigos(x,y)
Parameters
----------
x : int
Un numero entero.
y : int
Un numero entero.
Returns
-------
output : Buleano
Verdadero si son amigos, Falso si no lo son
Examples
--------
>>> amigos(220,284)
True
>>> amigos(6,5)
False
"""
suma1=0
suma2=0
for i in range(1,x):
if x/i==int(x/i):
suma1=suma1+i
for i in range(1,y):
if y/i==int(y/i):
suma2=suma2+i
if suma1==y and suma2==x:
return True
return False |
def icon(name=None, class_name="icon", title=None, wrapped=False):
"""
Abstracts away the actual icon implementation.
Usage:
{% load wagtailadmin_tags %}
...
{% icon name="cogs" class_name="icon--red" title="Settings" %}
:param name: the icon name/id, required (string)
:param class_name: default 'icon' (string)
:param title: accessible label intended for screen readers (string)
:return: Rendered template snippet (string)
"""
if not name:
raise ValueError("You must supply an icon name")
return {"name": name, "class_name": class_name, "title": title, "wrapped": wrapped} |
def floor(x) -> int:
"""
Return the floor of x as an Integral.
:param x: the number
:return: the largest integer <= x.
>>> import math
>>> all(floor(n) == math.floor(n) for n
... in (1, -1, 0, -0, 1.1, -1.1, 1.0, -1.0, 1_000_000_000))
True
"""
return int(x) if x - int(x) >= 0 else int(x) - 1 |
def checksum2(data):
""" Calculate Checksum 2
Calculate the ckecksum 2 required for the herkulex data packet
Args:
data (int): the data of which checksum is to be calculated
Returns:
int: The calculated checksum 2
"""
return (~data)&0xFE |
def make_paired_cycle_list(cycle_list):
"""Pairs up cycles together into tuples.
cycle_list is the list of actions that need to be paired up."""
# [::2] are the even-indexed items of the list, [1::2] are the
# odd-indexed items of the list. The python zip function puts
# matching-index items from two lists into tuples.
return list(zip(cycle_list[::2], cycle_list[1::2])) |
def common_prefix(strings):
""" Find the longest string that is a prefix of all the strings.
"""
if not strings:
return ''
prefix = strings[0]
for s in strings:
if len(s) < len(prefix):
prefix = prefix[:len(s)]
if not prefix:
return ''
for i in range(len(prefix)):
if prefix[i] != s[i]:
prefix = prefix[:i]
break
return prefix |
def get_smallwords(text, min_length=1, max_length=5):
"""
Computes the smallwords of a given text
:rtype : list
:param text: The text provided
:param min_length: The minimum length of the smallwords
:param max_length: The maximum length of the smallwords
:return: The list of all the smallwords
"""
# returning the list of smallwords
return [word for word in text.split(' ') if min_length <= len(word) <= max_length] |
def strip_rule(line):
"""
Sanitize a rule string provided before writing it to the output hosts file.
Some sources put comments around their rules, for accuracy we need
to strip them the comments are preserved in the output hosts file.
Parameters
----------
line : str
The rule provided for sanitation.
Returns
-------
sanitized_line : str
The sanitized rule.
"""
split_line = line.split()
if len(split_line) < 2:
# just return blank
return ""
else:
return split_line[0] + " " + split_line[1] |
def get_reverted_rses_id_name_map(rses_id_name_map):
"""Revert k:v to v:k"""
return {v: k for k, v in rses_id_name_map.items()} |
def leap_year(year: int) -> bool:
"""Report if a year is leap.
:param year: int - year.
:return: bool
"""
return (year % 4 == 0 and not year % 100 == 0) or year % 400 == 0 |
def factors_to_dictionary(factors):
"""Transforms a list of factors into a dictionary
Args:
factors (list): List of factors
Returns:
dict: Dictionary of factors to count
"""
factor_dict = {}
for factor in factors:
if factor in factor_dict:
factor_dict[factor] = factor_dict[factor] + 1
else:
factor_dict[factor] = 1
return factor_dict |
def to_response(objects, not_found_msg):
"""
Convert namedtuple objects to dict form.
If the specified sequence of objects is non-empty, return the dict version of them.
Otherwise return 404 and the specified message.
NamedTuple objects need to be converted to a dictionary for to be serialized to
JSON.
"""
if objects:
return [o._asdict() for o in objects]
return (not_found_msg, 404) |
def dump_data(ws,headings,data):
""" Iterate over the data and write it out row by row.
"""
for i, colVal in enumerate(headings):
ws.write(0,i,colVal)
for i, row in enumerate(data):
for j, colVal in enumerate(row):
ws.write(i+1,j,colVal)
return ws |
def timestamp_str_to_seconds(timestamp):
"""Converts a timestamp string in "HH:MM:SS.XXX" format to seconds.
Args:
timestamp: a string in "HH:MM:SS.XXX" format
Returns:
the number of seconds
"""
return sum(
float(n) * m
for n, m in zip(reversed(timestamp.split(":")), (1, 60, 3600))
) |
def key_has_dot_or_dollar(d):
"""Helper function to recursively determine if any key in a
dictionary contains a dot or a dollar sign.
"""
for k, v in d.items():
if ("." in k or k.startswith("$")) or (
isinstance(v, dict) and key_has_dot_or_dollar(v)
):
return True |
def makeHeader(seqs):
"""
Make a header for the BAM file given
a dictionary of sequences and their lengths
"""
header = { 'HD': {'VN': '1.0'},
'SQ': [] }
for seqName in seqs:
# {'LN': 1575, 'SN': 'chr1'},
header['SQ'].append({'LN': seqs[seqName], 'SN': seqName })
return header |
def twod_to_oned(size, *coordinates):
"""Converts coordinates (x >= 0, y >= 0) to an int representation.
:param size: Size of the grid that (x, y) is contained in
:param coordinates: [(x0, y0), (x1, y1), ...]
:return: (int0, int1, ...)
"""
x_axis = size[0]
result = tuple(x + y * x_axis for x, y in coordinates)
if len(result) == 1:
return result[0]
return result |
def get_title(this_title):
""" xxx """
page_title = ''+\
'<title>' +\
this_title +\
'</title>'
return_data = page_title
return return_data |
def valid_mapping(mention_start, mention_end, group_indices):
"""Determine if the mention can be mapped under merging rules."""
for group_start, group_end in group_indices:
if mention_start == group_start and mention_end == group_end: # Exact match
return True
elif group_start <= mention_start <= group_end and group_start <= mention_end <= group_end: # Partial or full nested
return False
elif mention_start < group_start <= mention_end < group_end: # Partial overlap, left
return False
elif group_start < mention_start <= group_end < mention_end: # Partial overlap, right
return False
return True |
def tuple_to_string(date_tuple):
"""
Create a yyyy-mm(-dd) string from a tuple containing (yyyy, m) (or one with the day too)
"""
if len(date_tuple) == 2:
# It's yyyy-mm
return str(date_tuple[0]).zfill(4) + '-' + str(date_tuple[1]).zfill(2)
# It's yyyy-mm-dd
return str(date_tuple[0]).zfill(4) + '-' + str(date_tuple[1]).zfill(2) + '-' + str(date_tuple[2]).zfill(2) |
def sequence(find, numbers):
"""This function checks to see if an object is in a sequence
>>> sequence(1, [1,2,3,4])
1
>>> sequence("i", "Hello world")
'Nothing'
>>> sequence(4, (2,4,6))
4
"""
for n in numbers:
if find == n:
return(n)
else:
return("Nothing") |
def find_amazon_id(link):
"""Find amazon item id from a passed link
ONLY WORKS FOR BOOKS RIGHT NOW
sample book url:
http://www.amazon.com/Carbon-isotope-fractionation-trophic-transfer/dp/B000RR3CXS%3FSubscriptionId%3D1XJTRNMGKSD3T57YM002%26tag%3Dquasika-20%26linkCode%3Dxm2%26camp%3D2025%26creative%3D165953%26creativeASIN%3DB000RR3CXS
"""
return link.split('/dp/')[1].split('%3F')[0] |
def current_green_or_left(before, after, current=None):
"""
Checks if green and left green lights works well.
:param before: has to be None, "yellow" or "blink"
:param after: has to be None or "blink"
:param current: Set as default None, so it won't trigger tests as the colour is only relevant for print
:return: if any constraints not met will return False else True
"""
print(f'{before}--{current}--{after}')
accepted_before = [None, "yellow", "blink"]
accepted_after = [None, "blink"]
if before not in accepted_before or after not in accepted_after:
return False
else:
return True |
def get_values(line):
"""
Returns the portion of an INSERT statement containing values
"""
return line.partition('` VALUES ')[2] |
def rectContains(rect,pt):
"""
Count if Rect contains point
@Param rect rectangle
@Param pt point
@Return boolean
@source: https://stackoverflow.com/questions/33065834/how-to-detect-if-a-point-is-contained-within-a-bounding-rect-opecv-python
"""
return rect[0] < pt[0] < rect[0]+rect[2] and rect[1] < pt[1] < rect[1]+rect[3] |
def remove_toffoli_from_line(local_qasm_line, qubit_1, qubit_2, target_qubit):
"""
Remove a specific Toffoli gate from a line of qasm.
Args:
local_qasm_line: The line of qasm
qubit_1: The first control qubit of the Toffoli gate
qubit_2: The second control qubit
target_qubit: The target qubit
Returns: The same line of qasm without the Toffoli gate call
"""
single_application = "Toffoli q[{}],q[{}],q[{}]".format(qubit_1, qubit_2, target_qubit)
# if there is a parallel bar right
local_qasm_line = local_qasm_line.replace(single_application + " | ", "")
# else: if there is a parallel bar left
local_qasm_line = local_qasm_line.replace(" | " + single_application, "")
# else: if it is the only gate in parallelized brackets
local_qasm_line = local_qasm_line.replace("{" + single_application + "}", "")
# else: if it is not parellelized at all
local_qasm_line = local_qasm_line.replace(single_application, "")
return local_qasm_line |
def fibonacci(n):
"""Return the nth fibonacci number"""
if n < 2:
return n
return fibonacci(n - 1) + fibonacci(n - 2) |
def check_true(string):
""" Check if an English string seems to contain truth.
Return a boolean
Default to returning a False value unless truth is found.
"""
string = string.lower()
if string in ['true', 'yes', 'y', '1', 'yep', 'yeah']:
return True
else:
return False |
def isNumber(arg) -> bool:
"""Returns True if the value can be converted to a floating point"""
try:
int(arg)
return True
except:
return False |
def bget(jdb,s):
"""Better get. Check for nonetype"""
try:
return jdb.get(s)
except:
pass
return None |
def convert_to_seconds(duration):
"""
Converts a ISO 8601 unicode duration str to seconds.
:param duration: The ISO 8601 unicode duration str
:return: int seconds
"""
duration_string = duration.replace('PT', '').upper()
seconds = 0
number_string = ''
for char in duration_string:
if char.isnumeric():
number_string += char
try:
if char == 'H':
seconds += (int(number_string) * 60) * 60
number_string = ''
if char == 'M':
seconds += int(number_string) * 60
number_string = ''
if char == 'S':
seconds += int(number_string)
except ValueError:
return 0
return seconds |
def MyGrep(hash_list, index_name, iname):
"""
hash_list: (list<subdict>)
subdict: (dict)
header -> value
index_name: (str) key in subdict that maps to index names
iname: (str) the index name we want.
Returns:
list<dict> minimized such that only dicts with wanted index name
are in it.
"""
new_list = []
for h in hash_list:
if h[index_name] == iname:
new_list.append(h)
return new_list |
def sizeof_fmt(num):
"""
Returns the human readable version of a file size
:param num:
:return:
"""
for item in ['B', 'KB', 'MB', 'GB']:
if num < 1024.0:
return "%3.1f %s" % (num, item)
num /= 1024.0
return "%3.1f%s" % (num, 'TB') |
def check_guess(user_number,generated_number,count):
""" check user guess number against with genrated number """
if(user_number == generated_number):
return f"Great Job! The Correct number is {generated_number}. You have made {count} guesses.\n"
elif(user_number > generated_number):
return "The number you guessed was too high.\n"
else:
return "The number you guessed was too low.\n" |
def coordinates_list_to_BED(scaffold_name: str, coordinates: list) -> str:
"""
function to create BED format from a list of coordinates
takes list [[start, stop], [start, stop]]
"""
result = ""
for lst in coordinates:
result += (scaffold_name + '\t' + str(lst[0]) + '\t' + str(lst[1]) + '\n')
return result |
def str_to_bool(val):
"""
Helper function to turn a string representation of "true" into
boolean True.
"""
if isinstance(val, str):
val = val.lower()
return val in ["true", "on", "yes", True] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.