content stringlengths 42 6.51k |
|---|
def serialize_grades(grades):
"""
Returns an string with the representation of the grades in XML format.
:param grades: grades to serialize
:return: an string with the representation in the desired format
"""
result = '<scores>\n'
for student_id, grade in grades.items():
result += "\t<score>\n\t\t<studentId>" + str(student_id) + "</studentId>\n"
result += "\t\t<value>" + str(grade.grade) + "</value>\n\t</score>\n"
return result + '</scores>' |
def exception_handler(main_graph_hover, selection):
"""
It handles the situation where no selection is made either from the map of the filtering option
Parameters
----------
main_graph_hover : str
The value as returned by hovering on the map
selection: str
The values as returned by the filtering dropdown widget
Returns
-------
str
A college name
"""
if main_graph_hover is None:
main_graph_hover = {
'points': [{'curveNumber': 0, 'pointNumber': 40, 'pointIndex': 40, 'lon': 5.801026, 'lat': 53.198069,
'text': 'Nordwin College'}]}
if selection is None:
try:
college_name = [point['text'] for point in main_graph_hover['points']]
college_name = college_name[0]
except KeyError as e:
college_name = 'Nordwin College'
else:
college_name = str(selection)
return college_name |
def get_configured_lease_duration(node_config):
"""
Just kidding. Lease duration is hard-coded.
:return int: The number of seconds after which a newly acquired lease will
be valid.
"""
# See lots of places in Tahoe-LAFS, eg src/allmydata/storage/server.py
return 31 * 24 * 60 * 60 |
def shift(l, n):
""" Shift a list grasshopper style. """
n = n % len(l)
head = l[:n]
l[:n] = []
l.extend(head)
return l |
def _get_partial_paths(paths, include_full=False):
"""Split flag paths into groups."""
partials = set()
for path in paths:
parts = path.split(".")
limit = len(parts)
if not include_full:
limit -= 1
for i in range(limit):
partials.add(".".join(parts[0 : i + 1]))
return partials |
def ismacro(o):
""" Is the given object a macro? """
return hasattr(o, 'macroName') |
def get_num_gophers(blades, remainders, M):
"""Find no. of gophers given no. of blades and remainders."""
for i in range(1, M + 1):
congruences = all([i % b == r for b, r in zip(blades, remainders)])
if congruences:
return i
return None |
def dedupe_matching(matching):
"""
Remove duplicates node pairs from the output of networkx.algorithms.max_weight_matching since we don't care about order.
Args:
matching (dict): output from networkx.algorithms.max_weight_matching. key is "from" node, value is "to" node.
Returns:
list[2tuples]: list of node pairs from `matching` deduped (ignoring order).
"""
matched_pairs_w_dupes = [tuple(sorted([k, v])) for k, v in matching.items()]
return list(set(matched_pairs_w_dupes)) |
def month_to_quarter(month_num):
"""Convert the month, provided as an int, to a yearly quarter, also an int.
Args:
month_num (int): The month number to convert (1-12)
Returns:
int: The quarter this month is in
Examples:
>>> month_to_quarter(1)
1
>>> month_to_quarter(9)
3
"""
try:
month_num = int(month_num)
if month_num < 1 or month_num > 12:
raise Exception
except:
raise ValueError("month_num must be an int between 1 and 12, not {!r}"
.format(month_num))
quarter = ((month_num - 1) // 3) + 1
return quarter |
def fact_while(n):
"""Returns factorial of the n using while loop."""
s = 1
while n > 1:
s *= n
n -= 1
return s |
def to_similarity(distance, length):
"""Calculate a similarity measure from an edit distance.
Parameters
----------
distance : int
The edit distance between two strings.
length : int
The length of the longer of the two strings the edit distance
is from.
Returns
-------
float
A similarity value from 0 to 1.0 (1 - (length / distance)),
-1 if distance is negative
"""
return -1 if distance < 0 else 1.0 - distance / length |
def invert_dictionary(in_dict):
"""
For main_bag -> bags inside, invert to
bag_name -> contained by bags
"""
inverted_dict = dict()
for main_bag, contains_bags in in_dict.items():
if main_bag not in inverted_dict:
inverted_dict[main_bag] = set()
for _num, bag in contains_bags:
if bag not in inverted_dict:
inverted_dict[bag] = set([main_bag])
else:
inverted_dict[bag].add(main_bag)
return inverted_dict |
def minor_allele_frequency(locus, collection):
"""Suggests just 2 alleles per locus
input: locus [integer]; collection of sequences [something iterable = string, list, tuple...]
returns: frequency of the minor allele (MAF)"""
snpList = [x[locus] for x in collection]
maf = snpList.count(snpList[0])/len(snpList)
#print(maf)
# I dont know which allele is the minor one, so I return one with the lower frequency
return min(maf, 1 - maf) |
def list_comparator(o_list, t_list):
"""
Determine whether two lists contain the same elements
:param o_list:
:param t_list:
:return:
"""
try:
if sorted(o_list) == sorted(t_list):
return True
except Exception:
return False
else:
return False |
def _count_cross_inversions(P, Q):
"""
Counts the inversions across two sorted arrays.
And combine the two arrays into one sorted array
For all 1<= i<=len(P) and for all 1 <= j <= len(Q),
if P[i] > Q[j], then (i, j) is a cross inversion
Parameters
----------
P: array-like, sorted in non-decreasing order
Q: array-like, sorted in non-decreasing order
Returns
------
R: array-like, a sorted array of the elements of `P` and `Q`
num_inversion: int, the number of inversions across `P` and `Q`
Examples
--------
>>> _count_cross_inversions([1, 2, 3], [0, 2, 5])
([0, 1, 2, 2, 3, 5], 4)
>>> _count_cross_inversions([1, 2, 3], [3, 4, 5])
([1, 2, 3, 3, 4, 5], 0)
"""
R = []
i = j = num_inversion = 0
while i < len(P) and j < len(Q):
if P[i] > Q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(P) - i
R.append(Q[j])
j += 1
else:
R.append(P[i])
i += 1
if i < len(P):
R.extend(P[i:])
else:
R.extend(Q[j:])
return R, num_inversion |
def vocab(name):
"""
Given a property such as 'rel', get its fully-qualified URL in our
JSON-LD vocabulary.
"""
return "http://api.conceptnet.io/ld/conceptnet5.7/context.ld.json#" + name |
def frequency_to_band(freq):
"""
band code from frequency in GHz
"""
if freq < 1: return None
elif freq >= 1 and freq < 2: return "L"
elif freq >= 2 and freq < 4: return "S"
elif freq >= 4 and freq < 8: return "C"
elif freq >= 8 and freq < 12: return "X"
elif freq >=12 and freq < 18: return "Ka"
elif freq >=18 and freq < 26.5: return "K"
elif freq >=26.5 and freq < 40: return "Ka"
elif freq >=40 and freq < 50: return "Q"
elif freq >=50 and freq < 75: return "V"
elif freq >=75 and freq <115: return "W"
else: return "D" |
def get_XA_mapping(tags, max_mm=None):
"""
Return a list of positions from the XA sam file data, these are additional
mappings
Arguments:
- `tags`: The tags argument
"""
tlist = []
for tpair in tags:
if tpair[0] == 'XA':
for xadat in tpair[1].split(';')[:-1]:
alt_dat = xadat.split(',')
if max_mm is None or int(alt_dat[-1])<=max_mm:
tlist.append(alt_dat)
return tlist |
def get_automatic_parallelization_options(max_num_machines=1, max_wallclock_seconds=1800): # pylint: disable=invalid-name
"""Return an instance of the automatic parallelization options dictionary.
:param max_num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
"""
return {
'max_num_machines': max_num_machines,
'target_time_seconds': 0.5 * max_wallclock_seconds,
'max_wallclock_seconds': max_wallclock_seconds
} |
def valid_youtube_video(url):
"""
We don't want to process channels or user accounts, so we'll filter
those out here.
:param url: the YouTube URL we need to check.
:return: True if it's a video; false if it's a channel,
user, or playlist.
"""
banned_keywords = ['user', 'channel', 'playlist']
for keyword in banned_keywords:
if keyword in url:
return False
return True |
def EnumValueCrossRefLabel(enum_value_name):
"""Enum value cross reference label."""
return 'envoy_api_enum_value_%s' % enum_value_name |
def find_segments(node_list: list) -> str:
"""
Find segment ranges of node IDs on each rack
"""
node_list.sort()
list_len = len(node_list)
list_range = ""
if node_list:
list_range = str(node_list[0])
for i in range(list_len - 1):
if node_list[i+1] != node_list[i] + 1:
list_range = list_range + "-" + str(node_list[i]) + "," + str(node_list[i+1])
if node_list[i+1] == node_list[-1]:
list_range = list_range + "-" + str(node_list[-1])
return list_range |
def roundAllFloats(lista, l):
"""Round to 3 decimals"""
nlista = []
for ii in lista:
tt = ii[:l + 1]
for jj in ii[l + 1:]:
if jj > 100.0:
jj = round(jj, -1)
nn = round(jj, 3)
tt.append(nn)
nlista.append(tt)
return nlista |
def findBest(dictionary, comparator):
"""returns a list containing the best name and corresponding
value-work tuple. The choice depends on the comparator function."""
keyList = []
keyIndex = 0
res = []
for key in dictionary:
keyList.append(key)
compareTemp = dictionary[keyList[0]]
for i in range(1 ,len(keyList)):
if not comparator(compareTemp, dictionary[keyList[i]]) == True:
compareTemp = dictionary[keyList[i]]
keyIndex = i
res.append(keyList[keyIndex])
res.append(compareTemp)
return res |
def pid_from_context_or_data(value, context, **kwargs):
"""Get PID from marshmallow context."""
pid = (context or {}).get('pid')
if pid is None:
return value
else:
return pid.pid_value |
def _reconstruct(x, y, r1, r2, ll, gamma, rho, sigma):
"""Reconstruct solution velocity vectors."""
V_r1 = gamma * ((ll * y - x) - rho * (ll * y + x)) / r1
V_r2 = -gamma * ((ll * y - x) + rho * (ll * y + x)) / r2
V_t1 = gamma * sigma * (y + ll * x) / r1
V_t2 = gamma * sigma * (y + ll * x) / r2
return [V_r1, V_r2, V_t1, V_t2] |
def ordered_unique(ls):
"""Return the list with unique elements while keeping the elements ordered by first appearance"""
seen = set()
return [l for l in ls if not (l in seen or seen.add(l))] |
def generate_time_series_exponential_growth_data(n, x0=1.565, b=1.1194):
"""
generate exponential growth SARS active case simulation data
:param n: number of points
:param x0: initial value of infected people
:param b: growth rate
:return: time series points to show exponential growth of active cases
"""
data = []
for i in range(n):
if i == 0:
y = x0
else:
y = x0 * b ** i
data.append({
'x': i,
'y': int(y + 0.5)
})
return data |
def set_stitch_num(stitch_num: int) -> str:
"""Set the stitch number. Part of the supported Knitout extensions."""
return 'x-stitch-number {}'.format(stitch_num) |
def box_inside_box(inner,outer):
"""Return true if the inner bbox is inside or equal to the outer bbox.
Parameters:
inner (list): List of floats for inner bounding box of form [x0,y0,x1,y1]
outer (list): List of floats for outer bounding box of form [x0,y0,x1,y1]
Returns:
bool: Whether inner is insider outer
"""
if outer[0] <= inner[0] and outer[1] <= inner[1] and inner[2] <= outer[2] and inner[3] <= outer[3]:
return True
return False |
def reverse_dictionary(title_to_freebase):
"""
Create a redirect to title dictionary.
"""
redirect_to_title = {}
for link in title_to_freebase:
for redirect in title_to_freebase[link]['alternate_titles']:
redirect_to_title[redirect] = link
return redirect_to_title |
def get_hyperion_unique_id(server_id: str, instance: int, name: str) -> str:
"""Get a unique_id for a Hyperion instance."""
return f"{server_id}_{instance}_{name}" |
def _CK_VERSION_to_tuple(data):
"""Convert CK_VERSION to tuple."""
return (data['major'], data['minor']) |
def recursive_decode(integers, bits=16):
"""Turns a list of integers into a new list of integers where the values in
the first are merged if it looks like a higher order integer split over two
integers.
(Code here adapted from the official python-mmtf package.)
:param list integers: the integers to decode.
:rtype: ``list``"""
new = []
power = 2 ** (bits - 1)
cutoff = [power - 1, 0 - power]
index = 0
while index < len(integers):
value = 0
while integers[index] in cutoff:
value += integers[index]
index += 1
if integers[index] == 0: break
value += integers[index]
index += 1
new.append(value)
return new |
def atlas2aparc(atlas_name, hemi=None):
""" Find freesurfer atlas aparc from atlas key.
Valid keys: desikan, destrieux, dkt
if `hemi` is specified, it a valid filename will be returned;
otherwise a format string will be returned."""
if atlas_name == 'desikan':
annot_file_template = '%s.aparc.annot'
elif atlas_name == 'destrieux':
annot_file_template = '%s.aparc.a2009s.annot'
elif atlas_name == 'dkt':
annot_file_template = '%s.aparc.DKTatlas40.annot'
else:
raise ValueError('Unknown atlas: %s' % atlas_name)
return annot_file_template % (hemi if hemi else '%s') |
def inject(*components):
"""Injects web components.
>>> inject(
>>> '<snap>a</snap>',
>>> '<snap>b</snap>'
>>> )
>>> <snap>a</snap><snap>b</snap>
Args:
components (WebComponents): The web components to inject.
Returns:
str: The string with injected web components.
"""
return ''. join(map(str, filter(None, components))) |
def lazy_begin0(*bodys):
"""Racket-like begin0: run bodys in sequence, return the first return value.
Lazy; each body must be a thunk (0-argument function), to delay its evaluation
until begin0() runs.
g = lambda x: lazy_begin0(lambda: 23*x,
lambda: print("hi"))
print(g(1)) # 23
"""
l = len(bodys)
if not l:
return None
if l == 1:
b = bodys[0]
return b()
first, *rest = bodys
out = first()
for body in rest:
body()
return out |
def wrap_stringlist(strlist, width=75):
"""
Wraps the text lines of a list to width characters.
>>> wrap_stringlist(['REM foo bar baz foo bar baz blah blub', 'REM 2foo bar baz foo bar baz blah blub'], 36)
['REM foo bar baz foo bar baz blah\\nREM blub\\n', 'REM 2foo bar baz foo bar baz blah\\nREM blub\\n']
"""
import textwrap
wrapped = []
for num, line in enumerate(strlist):
wrapped.append('\n'.join(textwrap.wrap(line, width, subsequent_indent='REM ')) + '\n')
return wrapped |
def uniq(d):
"""Returns the unique value of a dictionary, else an empty dictionary."""
result = {}
for k, v in d.items():
if result == {} or result == v:
result = v
return result # temp hack - ITM 3 ports have slight differences.
else:
print(f"WARNING: uniq sees different values.\n" +
" val1={result}\n val2={v}")
return {}
return result |
def dest(current_command):
"""Return dest Mnemonic of current C-Command"""
if "=" in current_command:
#split string at = and return everything before the = (if = in string)
command_list = current_command.split("=")
return command_list[0]
else:
return "" |
def time_delta(t_delta: float) -> str:
"""
Convert a timestamp into a human readable timestring.
Args:
t_delta (float): Difference between two timestamps of time.time()
Returns:
Human readable timestring.
"""
hours = round(t_delta // 3600)
minutes = round(t_delta // 60 % 60)
seconds = round(t_delta % 60)
millisecs = round(t_delta % 1 * 1000)
return f"{hours} hours, {minutes} minutes, {seconds} seconds, {millisecs} milliseconds" |
def longestCommonPrefix(strs):
"""
:type strs: List[str]
:rtype: str
"""
i = 0
while i < min(len(strs[0]), len(strs[1])) and strs[0][i] == strs[1][i]:
i += 1
if i == 0:
return ""
comstr = strs[0][:i]
print(i,comstr)
for str in strs:
if str[:i] == comstr:
pass
else:
if len(str) < i:
i = len(str)
while i > 0 and str[i-1] != comstr[i-1]:
print(i)
i -= 1
if i > 0:
comstr = comstr[:i]
else:
comstr = ""
break
print(comstr)
return comstr |
def multiseq_flops(V, D):
"""
Given the number of coarse clusters and the dimension of the data,
compute the number of flops to required to rank each coarse vocabulary
to the query.
"""
# (total coarse vocabulary) * (dims per coarse split) * (flops per squared distance)
return (2 * V) * (D / 2) * 2 |
def compare_range(a, b):
"""
>>> compare_range(1, "-10")
True
>>> compare_range(1, "10-")
False
>>> compare_range(20, "-10")
False
>>> compare_range(1, "10-20")
False
>>> compare_range(1.0, "0-1.0")
True
>>> compare_range(100, "-")
True
>>> compare_range("b", "a-z")
True
>>> compare_range("b", "b")
True
>>> compare_range("b", "a")
False
"""
if "-" not in b:
return a == type(a)(b)
bmin, bmax = b.split("-", 1)
if bmin not in (None, "") and type(a)(bmin) > a:
return False
if bmax not in (None, "") and type(a)(bmax) < a:
return False
return True |
def fixDelex(filename, data, data2, idx, idx_acts):
"""Given system dialogue acts fix automatic delexicalization."""
try:
turn = data2[filename.strip('.json')][str(idx_acts)]
except:
return data
if not isinstance(turn, str):
for k, act in turn.items():
if 'Attraction' in k:
if 'restaurant_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("restaurant", "attraction")
if 'hotel_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("hotel", "attraction")
if 'Hotel' in k:
if 'attraction_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("attraction", "hotel")
if 'restaurant_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("restaurant", "hotel")
if 'Restaurant' in k:
if 'attraction_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("attraction", "restaurant")
if 'hotel_' in data['log'][idx]['text']:
data['log'][idx]['text'] = data['log'][idx]['text'].replace("hotel", "restaurant")
return data |
def get_discipline(discipline):
"""
helper
guess the discipline from the competition name / description
"""
if 'boulder' in discipline.lower():
return 'Bouldern'
elif 'lead' in discipline.lower():
return 'Lead'
elif 'speed' in discipline.lower():
return 'Speed'
elif 'combined' in discipline.lower():
return 'Combined'
else:
print("unknown categorie")
return 'Lead' |
def GetComment(plist, comments):
"""Get comment for a given property list."""
try:
label = plist["Label"]
except KeyError:
return None
if label in comments:
return comments[label]
return None |
def _get_provider_type(row):
"""
groups provider by type
"""
if row['provider_category']:
if 'npr' in row['provider_category'].lower():
return 'NPR'
elif 'getty' in row['provider_category'].lower():
return 'Getty'
elif 'istock' in row['provider_category'].lower():
return 'iStock'
elif 'corbis' in row['provider_category'].lower():
return 'Corbis'
elif 'AP' in row['provider_category'] or 'landov' in row['provider_category'].lower():
return 'Wire'
elif row['provider_category'] == 'Courtesy':
return 'Courtesy'
else:
return 'Other'
else:
return None |
def _prepare(data: dict):
""" Remove fields `_id` and `name` and return data. """
return {k: v for k, v in data.items() if k not in ('_id', 'name')} |
def m_to_inches(value):
"""Converts distance in meters to inches
Args:
value: floating point representing the distance in meters
Returns: distance in inches
"""
if value is None:
return None
return value / 39.37 |
def validate_allocation_strategy(allocation_strategy):
"""Validate allocation strategy
:param allocation_strategy: Allocation strategy for ComputeResource
:return: The provided value if valid
Property: ComputeResources.AllocationStrategy
"""
valid_strategies = [
"BEST_FIT",
"BEST_FIT_PROGRESSIVE",
"SPOT_CAPACITY_OPTIMIZED",
]
if allocation_strategy not in valid_strategies:
raise ValueError("{} is not a valid strategy".format(allocation_strategy))
return allocation_strategy |
def _get_qualified_name(obj):
"""Return the Fully Qualified Name from an instance or class."""
module = obj.__module__
if hasattr(obj, '__name__'):
obj_name = obj.__name__
else:
obj_name = obj.__class__.__name__
return module + '.' + obj_name |
def get_first_non_blacklisted(blacklist):
"""Return the first integer not in `blacklist`.
"""
i = 1
while i in blacklist:
i += 1
return i |
def init_parameters(parameter):
"""Auxiliary function to set the parameter dictionary
Parameters
----------
parameter: dict
See the above function initActivations for further information
Returns
-------
parameter: dict
"""
parameter['decay'] = 0.75 if 'decay' not in parameter else parameter['decay']
parameter['onsetOffsetTol'] = 0.025 if 'onsetOffsetTol' not in parameter else parameter['onsetOffsetTol']
return parameter |
def calculate_pos_deviation_square(error, sl):
"""
Calculate the square deviation between a given error value and a significance level
if the deviation is positive (>0)
Parameters
----------
error : error
sl : significance level
Returns
-------
square deviation or 0
"""
if error > sl:
return (error-sl)**2
else:
return 0 |
def normalize_vendor(vendor):
""" Return a canonical name for a type of database. """
if not vendor:
return 'db' # should this ever happen?
elif 'sqlite' in vendor:
return 'sqlite'
elif 'postgres' in vendor or vendor == 'psycopg2':
return "postgres"
else:
return vendor |
def clamp(value, min_value, max_value):
"""Return value constrained to be within defined limits
Parameters
----------
value : int or float
Original value
min_value : int or float
Allowed minimum value
max_value : int or float
Allowed maximum value
Returns
-------
int or float
Value constrained to be within defined limits
"""
return max(min_value, min(value, max_value)) |
def isValidWit(text):
"""
Returns True if the text is related to the weather.
Arguments:
text -- user-input, typically transcribed speech
"""
return any(d.get(u'intent', u'') == u'need_umbrella' for d in text.get(u'outcomes', [])) |
def convex_hull(points):
"""Computes the convex hull of a set of 2D points.
Input: an iterable sequence of (x, y) pairs representing the points.
Output: a list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest coordinates.
Implements Andrew's monotone chain algorithm. O(n log n) complexity.
"""
# Sort the points lexicographically (tuples are compared lexicographically).
# Remove duplicates to detect the case we have just one unique point.
points = sorted(set(points)) ## CAN REMOVE
# Boring case: no points or a single point, possibly repeated multiple times.
if len(points) <= 1:
return points
# 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.
# Returns a positive value, if OAB makes a counter-clockwise turn,
# negative for clockwise turn, and zero if the points are collinear.
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
# Concatenation of the lower and upper hulls gives the convex hull.
# Last point of each list is omitted because it is repeated at the beginning of the other list.
return lower[:-1] + upper[:-1] |
def validate_item_name(name, prefix, suffix):
"""Verifies that ``name`` starts with ``prefix`` and ends with ``suffix``.
Returns ``True`` or ``False``"""
return name[: len(prefix)] == prefix and name[-len(suffix) :] == suffix |
def calculate_shape_keeping_aspect_ratio(height: int, width: int, min_size: int, max_size: int):
"""
The function changes spatial sizes of the image keeping aspect ratio to satisfy provided requirements.
The behavior of this function is equivalent to the output shape calculation of the pre-processor block of TensorFlow
Object Detection API models with keep aspect ratio resizer.
:param height: input height.
:param width: input width.
:param min_size: size limit.
:param max_size: size limit.
:return: the tuple with scaled image height, width.
"""
ratio_min = min_size / min(height, width)
ratio_max = max_size / max(height, width)
ratio = min(ratio_min, ratio_max)
return int(round(height * ratio)), int(round(width * ratio)) |
def new_velocity(pos, vel):
"""Calculates the new velocity of the moon"""
for j, moon in enumerate(pos):
for pair_moon in pos:
for i in range(3):
if moon[i] > pair_moon[i]:
vel[j][i] -= 1
elif moon[i] < pair_moon[i]:
vel[j][i] += 1
return vel |
def create_title_abstract_col(data):
"""Create new col from title and abstract cols of api response"""
for dictionary in data:
dictionary['patent_title_abstract'] = str([dictionary['patent_title'] + '. ' + dictionary['patent_abstract']][0])
return data |
def preprocess_text(sentence):
"""Handle some weird edge cases in parsing, like 'i' needing to be capitalized
to be correctly identified as a pronoun"""
cleaned = []
words = sentence.split(' ')
for w in words:
w = w.lower()
if w == 'i':
w = 'I'
if w == "i'm":
w = "I am"
if w == "im":
w = "I am"
if '@' in w:
w = w.replace("@", "")
if '#' in w:
w = w.replace("#", "")
cleaned.append(w)
return ' '.join(cleaned) |
def is_segment(other):
"""Return true if this is a Segment.
The purpose of this helper function is for testing if something
is a segment without requiring the import of the class.
"""
return getattr(other, "is_segment", False) |
def _color_to_tuple(value):
"""Converts a color from a 24-bit integer to a tuple.
:param value: RGB LED desired value - can be a RGB tuple or a 24-bit integer.
"""
if isinstance(value, tuple):
return value
if isinstance(value, int):
if value >> 24:
raise ValueError("Only bits 0->23 valid for integer input")
r = value >> 16
g = (value >> 8) & 0xFF
b = value & 0xFF
return [r, g, b]
raise ValueError("Color must be a tuple or 24-bit integer value.") |
def is_sha256(str):
"""Return True if str is a 64-byte hex string
"""
try:
int(str, 16)
return True
except:
return False |
def parse_resource_type(resource_type):
"""Splits a resource type into it's components.
:exc:`ValueError` is raised if the resource type is invalid.
>>> parse_resource_type('AWS::ECS::Instance')
['AWS', 'ECS', 'Instance']
>>> parse_resource_type('AWS::ECS')
Traceback (most recent call last):
...
ValueError: Resource type 'AWS::ECS' is invalid
>>> parse_resource_type('AWS__ECS__Instance')
Traceback (most recent call last):
...
ValueError: Resource type 'AWS__ECS__Instance' is invalid
"""
segments = resource_type.split("::")
if len(segments) != 3:
raise ValueError("Resource type '{}' is invalid".format(resource_type))
return segments |
def get_url_param(params, var, default=None, type_=None, *args, **kwargs):
"""
Wrapper for getting a variable from a url parameter.
Necessary because url params can be registered as lists, so this will get a single value if it can be found
"""
def perform_get_url_param(params, var, *args, **kwargs):
param = params.get(var, None)
if param == None:
return None
if type(param) == list:
if len(param) > 1:
return param
else:
param = param[0]
return param
result = perform_get_url_param(params, var, *args, **kwargs)
# check for default values
if default != None and result == None:
result = default
# specified a return type
if type_ != None:
try:
result = type_(result)
except Exception:
pass
return result |
def double_factorial(input_num):
"""
Calculate the double factorial of specified number
>>>double_factorial(5)
15
>>>double_factorial(8)
384
>>>double_factorial(3)
3
>>>double_factorial(0)
1
>>>-1
Traceback (most recent call last):
File "<string>", line 11, in <module>
File "<string>", line 4, in doublefactorial
ValueError: double_factorial() not defined for negative values
"""
if (input_num<0):
raise ValueError("double_factorial() not defined for negative values")
if (input_num == 0 or input_num == 1):
return 1;
return input_num * double_factorial(input_num - 2) |
def ddm_ddd(a, sep=" "):
""" convert degree, decimal minute string to decimal degrees
: a - degree, decimal minute string
: sep - usually a space, but check
: Useage - ddm_ddd(!SourceField!, sep=" ")
: python parser, sourcefield is the input string field, destination
: field is type double
"""
d, m = [float(i) for i in a.split(sep)]
sign = [-1, 1][d > 0]
dd = sign*(abs(d) + m/60.)
return dd |
def merge_sequences(list1, list2, **kwargs):
"""
Return a new list of model objects merging the lists ``list1`` and
``list2`` keeping the original ``list1`` order and discarding
duplicates.
"""
list1 = list1 or []
list2 = list2 or []
merged = []
existing = set()
for item in list1 + list2:
key = item.to_tuple(**kwargs)
if not key in existing:
merged.append(item)
existing.add(key)
return merged |
def get_degree_of(links: list, node_i: int) -> int:
"""
computes the degree of each node and maps the node id to its degree
:param links: a list of tuples with [(src, dst), ...]
:param node_i: index i of node
:return: degree of node i
"""
deg_i = sum(1 if node_i == i else 0 for i, _ in links)
return deg_i |
def edges_between_AB(dictP, A, B):
""" Returns the list of edges (i,j) for which exactly one of i and j are in
A and the other in B.
"""
edges = [edge for edge in dictP
if (edge[0] in A and edge[1] in B)
or (edge[0] in B and edge[1] in A)]
return edges |
def prepare_data(result):
""" Prepares data to return """
header = [col['name'].strip("ga:") for col in result['columnHeaders']]
data = []
for row in result.get('rows'):
data.append(dict(zip(header, row)))
return {'data': data} |
def update_query(project,dataset,tablename,objects,condition):
"""
Function to process the query for update process
"""
if isinstance(tablename, str):
pass
else:
raise ValueError("Tablename should be a String")
if condition == None or isinstance(condition, str):
pass
else:
raise ValueError("Condition can only be either None or String")
if isinstance(objects, dict):
pass
else:
raise ValueError("Object argument must be Dictionary in format {column name : Value}")
final_tablename = "`"+project+"."+dataset+"."+tablename+"`"
# processing columns
cols = ""
for i in objects.keys():
if isinstance(objects[i],str): #prefixing and suffixing ' for string values
substr = str(i) + " = '" + str(objects[i]) + "',"
else:
substr = str(i) + " = " + str(objects[i]) + ","
cols = cols + substr
cols = cols[:len(cols) - 1]
# query creation
if condition == None:
query = "Update " + final_tablename + " set " + cols
else:
query = "Update " + final_tablename + " set " + cols + " where " + condition
return query |
def check_output(command_args):
""" Wrapper for subprocess.checkoutput with logging of the error of the failed command """
import subprocess
import logging
from subprocess import CalledProcessError
try:
output = subprocess.check_output(command_args, stderr=subprocess.STDOUT)[:-1] # [:-1] removes the trailing \n
return output
except CalledProcessError as e:
logging.warning("Failed call with Output: %s" % e.output)
raise e |
def spreadingRate(Q, beta):
"""
Calculates the spreading rate on the planet.
Inputs:
Q - pore space heat flow relative to modern Earth [dimensionless]
beta - scaling parameter [dimensionless]
Returns:
r_sr - the spreading rate relative to the modern Earth [dimensionless]
"""
r_sr = Q**beta
return r_sr |
def get_vector(a, b):
"""Given two points (3D), Returns the common vector"""
vector = ((b[0] - a[0]), (b[1] - a[1]), (b[2] - a[2]))
return vector |
def clamp(lower: int, value: int, upper: int) -> int:
""" Clamp a value between (inclusive) lower and upper """
return min(max(value, lower), upper) |
def counting_sort(elements):
"""
Use the simple counting sort algorithm to sort the :param elements.
:param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()
:return: the sorted elements in increasing order
"""
length = len(elements)
if not length or length == 1:
return elements
mini = maxi = elements[0]
for element in elements:
assert isinstance(element, int)
if element < mini:
mini = element
if element > maxi:
maxi = element
all_range = []
for i in range(maxi - mini + 1):
all_range.append(0)
for element in elements:
all_range[element - mini] += 1
length = 0
for i in range(len(all_range)):
count = all_range[i]
while count > 0:
elements[length] = i + mini
length += 1
count -= 1
return elements |
def get_chrom_name_from_bin_fn(bin_fn):
"""
Assuming that the bin_fn is like this: genome_chr2.211_binary.txt.gz
Chrom name that is returned in this function will be like thisL chr2.211
"""
return bin_fn.split("_")[1] |
def count_routes_graph(graph, source_node, dest_node):
"""
classic tree-like graph traversal
"""
if dest_node == source_node or dest_node - source_node == 1:
return 1
else:
routes = 0
for child in graph[source_node]:
routes += count_routes_graph(graph, child, dest_node)
return routes |
def mergelistmax(lst1,lst2):
"""returns the maximum value at each index comparing 2 lists"""
try:
return [max([lst1[i],lst2[i]]) for i in range(len(lst1))]
except:
print('incompatible lists') |
def foldConditions(condition) :
"""Fold conditions to convert the condition of a priority element to the
formatted string of the priority file matching the condition
"""
# If the condition is simple
if 'comparison' in condition :
return condition['type'] + '.' + condition['name'] + ' ' + condition['comparison'] + ' ' + str(condition['value'])
# If the condition is complex
else :
return '(' + (' ' + condition['logic'] + ' ').join([ foldConditions(c) for c in condition['list'] ]) + ')' |
def filename_split (p):
""" Split filename
>>> print("('%s', '%s', '%s', '%s')"%filename_split("/toto/titi/tutu.tata"))
('', '/toto/titi', 'tutu', '.tata')
"""
from os.path import split as splitPath, splitdrive, splitext
splt = splitPath (p)
disk,dir_ = splitdrive(splt[0])
try:
if disk[1] != ":":
raise IndexError
except IndexError:
disk,dir_ = "", splt[0]
name,ext = splitext(splt[1])
return disk,dir_,name,ext |
def url_path_join(*pieces):
"""Join components of url into a relative url
Use to prevent double slash when joining subpath. This will leave the
initial and final / in place
"""
initial = pieces[0].startswith("/")
final = pieces[-1].endswith("/")
stripped = [s.strip("/") for s in pieces]
result = "/".join(s for s in stripped if s)
if initial:
result = "/" + result
if final:
result = result + "/"
if result == "//":
result = "/"
return result |
def get_dim(min_mz, max_mz, bin_size):
"""
Compute the number of bins over the given mass range for the given bin
size.
Args:
min_mz: The minimum mass in the mass range (inclusive).
max_mz: The maximum mass in the mass range (inclusive).
bin_size: The bin size (in Da).
Returns:
A tuple containing (i) the number of bins over the given mass range for
the given bin size, (ii) the highest multiple of bin size lower than
the minimum mass, (iii) the lowest multiple of the bin size greater
than the maximum mass. These two final values are the true boundaries
of the mass range (inclusive min, exclusive max).
"""
min_mz, max_mz = float(min_mz), float(max_mz)
start_dim = min_mz - min_mz % bin_size
end_dim = max_mz + bin_size - max_mz % bin_size
return round((end_dim - start_dim) / bin_size), start_dim, end_dim |
def checksum (message):
"""grab the hashed checksum of ASCII text"""
s = 0
# loop taking 2 characters at a time
for i in range(0, len(message), 2):
w = ord(message[i]) + (ord(message[i+1]) << 8 )
s = s + w
# complement and mask to 4 byte short
s = ~s & 0xffff
return s |
def _convert_csv_numbers_to_list(value):
"""Converts a string containing CSV numbers to a list."""
if not value:
return []
return [float(x) for x in value.split(',')] |
def matmultiplication(A,B):
"""
Returns the product of the matrix A * B
:param A: The first matrix - ORDER MATTERS!
:param B: The second matrix
:return: The product of the two matrices
"""
if(len(A[0])!=len(B)):
return "Multiplication not possible"
result = [[sum(a * b for a, b in zip(A_row, B_col))
for B_col in zip(*B)]
for A_row in A]
return result |
def policy_v3_1(probability=0.7, magnitude=5):
"""Randomly select two transformations from {color} transformations,
and then randomly select two transformations from {shape} transformations."""
policy = {
# color augment
0: [[('Mixup', probability, magnitude)], [('Gaussian_noise', probability, magnitude)],
[('Saturation', probability, magnitude)], [('Contrast', probability, magnitude)], [('Brightness', probability, magnitude)],
[('Sharpness', probability, magnitude)], [('Color_casting', probability, magnitude)], [('Equalize_YUV', probability, magnitude)],
[('Posterize', probability, magnitude)], [('AutoContrast', probability, magnitude)], # [('SolarizeAdd', probability, magnitude)],
[('Solarize', probability, magnitude)], [('Equalize', probability, magnitude)], [('Vignetting', probability, magnitude)]],
1: [[('Mixup', probability, magnitude)], [('Gaussian_noise', probability, magnitude)],
[('Saturation', probability, magnitude)], [('Contrast', probability, magnitude)], [('Brightness', probability, magnitude)],
[('Sharpness', probability, magnitude)], [('Color_casting', probability, magnitude)], [('Equalize_YUV', probability, magnitude)],
[('Posterize', probability, magnitude)], [('AutoContrast', probability, magnitude)], # [('SolarizeAdd', probability, magnitude)],
[('Solarize', probability, magnitude)], [('Equalize', probability, magnitude)], [('Vignetting', probability, magnitude)]],
# shape augment
2: [[('Rotate', probability, magnitude)], [('Flip', probability, magnitude)], [('Cutout', probability, magnitude)],
[('Shear_x', probability, magnitude)], [('Shear_y', probability, magnitude)],
[('Scale', probability, magnitude)], [('Scale_xy_diff', probability, magnitude)],
[('Lens_distortion', probability, magnitude)]],
3: [[('Rotate', probability, magnitude)], [('Flip', probability, magnitude)], [('Cutout', probability, magnitude)],
[('Shear_x', probability, magnitude)], [('Shear_y', probability, magnitude)],
[('Scale', probability, magnitude)], [('Scale_xy_diff', probability, magnitude)],
[('Lens_distortion', probability, magnitude)]]
}
return policy |
def get_search_query(list2, t):
"""Create a query, that we will parse.
__Attributes__
list2: List, which contain start and end date.
t: if 1 change search query (look below)
__Returns__
search_query: List, which contain search query.
"""
if t > 1:
search_query = ("https://itc.ua/page/" + str(t)
+ "/?s&after=" + str(list2[0])
+ "&before=" + str(list2[1])
)
else:
search_query = ("https://itc.ua/?s&after=" + str(list2[0])
+ "&before=" + str(list2[1])
)
return search_query |
def calculate_signature_score(signature):
"""
Calculates the signature score for each signature as the sum of all the weights in it but ignoring the weights marked with "*".
Input:
A signature that contains tags of whether or not the weight should be included in calculating the signature.
Output:
An array containing the weights of all the signatures that should be considered.
Example:
Consider [
{'key1=key2':['val1', 40], 'key3=key4':['val2':90]}, //40 + 90
{'key5=key6=key7':['val3', *, 20], 'key8=key9':['val4':80]}, //80
{'key10=key11':['val5', 40]} //40
Returns [130, 80, 40].
"""
score_arr = []
for sig in signature:
score = 0
for key, value in sig.items():
for val in value:
if (val[1] != "!") and (val[1] != "*"):
score += val[1]
elif val[1] == "!":
score += val[2]
score_arr.append(score)
return score_arr |
def _remove_duplicates(values):
""" Removes duplicate values, useful in getting a concise list. (Requires re)
"""
output = []
seen = set()
for value in values:
# If value has not been encountered yet,
# add it to both list and set.
if value not in seen:
output.append(value)
seen.add(value)
return output |
def generate_user_input_table(ui):
"""Generate user input table in a format that allows for copy-paste from
LaTeX pdf to python editor"""
calculation_input = []
for key, value in ui.items():
if key != 'parameter_id':
calculation_input.append(key)
calculation_input = sorted(calculation_input)
table_user_input = []
for i in calculation_input:
if isinstance(ui[i], str):
table_user_input.append((i, ("'" + str(ui[i]) + "'")))
else:
table_user_input.append((i, str(ui[i])))
return table_user_input |
def convert_to_uni(text):
"""convert bytes to text"""
if isinstance(text, str):
return text
if isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
raise Exception("The type %s does not convert!" % type(text)) |
def has_file_allowed_extension(filename, extensions):
""" check if a file has an allowed extensio n.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions allowed (lowercase)
Returns:
bool: True if the file ends with one of the given extensions
"""
return filename.lower().endswith(extensions) |
def count_target_words(tokens):
"""
Utility function to count the total number of tokens in a dataset.
Parameters
----------
tokens : list of lists
List of sentences, where each sentence is a list of tokens. Usually
returned from ``data.loader.load_data``
Returns
-------
count : int
Total number of tokens in the given ``tokens`` structure
"""
return sum([len(t) for t in tokens["target"]]) |
def fibonacci_numbers_until_n_digits(n):
"""Return the list fibonacci numbers until the latest has at lest n digits
:type n: The minimum amount of digits for the last fibonacci number in the list
"""
result = []
a, b = 0, 1
while len(str(b)) < n:
result.append(b)
a, b = b, a + b
result.append(b)
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.