content stringlengths 42 6.51k |
|---|
def parse_time(timestring):
""" Given a string of the format YYYY-MM-DDTHH:MM:SS.SS-HH:MM (and
optionally a DST flag), convert to and return a numeric value for
elapsed seconds since midnight (date, UTC offset and/or decimal
fractions of second are ignored).
"""
date_time = timestring.split('T') # Separate into date and time
hour_minute_second = date_time[1].split('+')[0].split('-')[0].split(':')
return (int(hour_minute_second[0]) * 3600 +
int(hour_minute_second[1]) * 60 +
int(hour_minute_second[2].split('.')[0])) |
def format_cas(cas) -> str:
"""In LCIA method sheets CAS numbers are often saved as numbers. This
function formats such numbers to strings that matches the general
format of a CAS numner. It also handles other cases like None values.
"""
if cas is None:
return ""
if cas == "x" or cas == "-":
return ""
if isinstance(cas, (int, float)):
cas = str(int(cas))
if len(cas) > 4:
cas = cas[:-3] + "-" + cas[-3:-1] + "-" + cas[-1]
return cas
return str(cas) |
def in_left_halfspace2(a, b, p):
"""Returns True is point p is to the left of line a<->b.
where left is defined as standing at a and facing towards b"""
return (b[0]-a[0]) * (p[1]-a[1]) - (b[1]-a[1]) * (p[0]-a[0]) <= 0 |
def unlerp(a, b, t):
"""inverse linear interpolation"""
assert a <= t <= b and a != b
return (t - a) / (b - a) |
def view_event(user, event):
"""
Check whether a user may view a specified event.
:param User user:
:param Event event:
:return: bool
"""
if event is None:
return None
return user.has_perm("booking.view_hidden_events") or event.visible is True |
def minargmin(sequence):
"""Returns the minimum value and the first index at which it can be
found in the input sequence."""
best = (None, None)
for (i, value) in enumerate(sequence):
if best[0] is None or value < best[0]:
best = (value, i)
return best |
def subtractProductAndSum(n):
"""
:type n: int
:rtype: int
"""
str_n = [int(i) for i in str(n)]
mult = 1
for i in str_n:
mult *= i
return mult - sum(str_n) |
def decode_seat_string(seat):
"""
Test data
BFFFBBFRRR: row 70, column 7, seat ID 567.
FFFBBBFRRR: row 14, column 7, seat ID 119.
BBFFBBFRLL: row 102, column 4, seat ID 820.
Seat string is 10 bits binary
B = 1
F = 0
R = 1
L = 0
"""
bin_seat_number = seat.replace('B', '1').replace('F', '0').replace('L', '0').replace('R', '1')
return int(bin_seat_number, 2) |
def get_period_overlap(request_start, request_end, avail_start, avail_end):
"""Finds period of overlap between the requested time range and the
available period.
Parameters
----------
request_start: datetime-like
Start of the period of requested data.
request_end: datetime-like
End of the period of requested data.
avail_start: datetime-like
Start of the available data.
avail_end: datatime-like
End of available data.
Returns
-------
start, end: list of datetime or None
Start and end of overlapping period, or None if no overlap occurred.
"""
if request_start < avail_end and request_end > avail_start:
if request_start < avail_start:
start = avail_start
else:
start = request_start
if request_end > avail_end:
end = avail_end
else:
end = request_end
return [start, end]
else:
return None |
def dedupe(items):
"""Remove all duplicates from |items| (keeping order)"""
seen = {}
return [seen.setdefault(x, x) for x in items if x not in seen] |
def moving_average(ts, window):
"""
returns a moving average of a time series ts
using a window of size window
ts is a list ordered from oldest (index 0) to most
recent (index -1)
window is an integer
returns a numeric array of the moving average
"""
ts_ma = []
for i in range(len(ts)):
start = i - window + 1
if start < 0:
start = 0
ts_ma.append(sum(ts[start:i+1]) / (i+1-start))
return ts_ma |
def jaccard_distance(label1, label2):
"""Jaccard distance metric"""
union_len = len(label1 | label2)
intersection_len = len(label1 & label2)
return (union_len - intersection_len) / float(union_len) |
def LimiterG3forHYU(dU1, dU2):
"""Return the limiter for Harten-Yee Upwind TVD limiter function.
This limiter is further used to calculate the modified flux limiter
function given by Equation 6-131.
Calculated using Equation 6-134 in CFD Vol. 1 by Hoffmann.
"""
omeg = 1.e-7 # use between 1.e-7 and 1.e-5
term1 = dU2*(dU1*dU1 + omeg)
term2 = dU1*(dU2*dU2 + omeg)
denom = dU1*dU1 + dU2*dU2 + 2.0*omeg
# Equation 6-134
G = (term1 + term2)/denom
return G |
def to_gigabytes(number):
"""Convert a number from KiB to GiB
This is used mainly for the gauge, everything else uses the dynamic
`unit` function.
"""
return number / 1024 ** 2 |
def process_param(param, offset):
"""Process a single parameter produced by `get_function_parameter_names`.
Note that all nested constructs and their outer delimiters in `param` have
already been turned into whitespace."""
# Ignore args with default values, since Rope considers them assignments.
if "=" in param:
return []
# Strip off any type annotation.
first_colon_index = param.find(":")
if first_colon_index >= 0: # Variables are first in MyPy, reversed from C.
param = param[:first_colon_index]
# Strip off beginning whitespace.
first_non_whitespace_index = len(param) - len(param.lstrip())
offset += first_non_whitespace_index
param = param.strip()
if not param:
return []
return [param, offset] |
def clamp(n, smallest, largest):
"""Ensure 'n' is no smaller or larger then a certain range.
If the number is smaller than the range it will be returned as
the smallest.
If the number is larger than largest it will be returned as the
largest.
Args:
n (int): integer to inspect
smallest (int): no smaller than this is allowed
largest (int): no larger than this is allowed
Returns:
int: the "clamped" input
"""
return max(smallest, min(n, largest)) |
def format_date(month, year):
""" String format for key """
return "{}/{}".format(year, month) |
def _find_model_type(file_name):
"""Finds model type from the model filename."""
possible_keys = ('ecmwf', 'gdas')
for key in possible_keys:
if key in file_name:
return key
return '' |
def _get_name_from_query(query):
"""'select * from foo where ...' => foo
"""
query_list = query.lower().split()
idx = query_list.index('from')
return query_list[idx + 1] |
def compare_int(entry, num):
"""Return True if the integer matches the line entry, False otherwise."""
if int(entry) == num:
return True
else:
return False |
def _clean_hosts(host_list):
"""
Clean host strings to ensure no trailing whitespace, etc.
"""
return [host.strip() for host in host_list] |
def factorial(n):
"""
Returns the factorial of the input integer
Time complexity: O(n)
Space complexity: O(n)
"""
assert n >= 0, f"Input value {n} is not a positive integer."
if n <= 1:
return 1
return n * factorial(n - 1) |
def pack_vlq(n):
"""Convert an integer to a VLQ and return a list of bytes."""
value = int(n)
if value == 0:
return bytearray([0x00])
result = bytearray()
round = 0
while value > 0:
# only use the lower 7 bits of this byte (big endian)
# if subsequent length byte, set highest bit to 1, else just insert
# the value with sets length-bit to 1 on all but first
result.insert(0, value & 127 | 128 if round > 0 else value & 127)
value >>= 7
round += 1
return result |
def _flatten_nested_dict(d, sep):
""" returns a list of pairs of the form (flattened-key, value) """
result = []
for k, v in d.items():
if not isinstance(k, str):
result.append((k, v))
elif isinstance(v, dict) and all([isinstance(_k, str) for _k in v.keys()]):
v = _flatten_nested_dict(v, sep)
for _k, _v in v:
key = k + sep + _k
result.append((key, _v))
else:
result.append((k, v))
return result |
def convert_output_value(value):
"""Convert the output value to something that FTrack understands."""
if value is None:
return 'none'
elif isinstance(value, (float, int)):
return value
return '"' + str(value).replace('"', r'\"') + '"' |
def parse_endpoint(endpoint):
"""
Parse endpoint into (blueprint, endpoint).
blueprint can be :const:`None`
"""
if '.' in endpoint:
return endpoint.split('.', 1)
return None, endpoint |
def _check_degree_progress(req, courses):
"""
Checks whether the correct number of courses have been completed by the
end of semester number 'by_semester' (1-8)
"""
by_semester = req["completed_by_semester"]
num_courses = 0
if by_semester == None or by_semester > len(courses):
by_semester = len(courses)
for i in range(by_semester):
num_courses += len(courses[i])
return num_courses |
def calc_t_s(p_in, p_atm, p_s, v, L):
"""
Calculates time at which pressure reaches saturation pressure.
"""
return (p_in - p_s)/(p_in - p_atm)*L/v |
def weighted_score(sim, size, ovl):
"""
Unite the similarity measures and make a score
`(2*sim + 1*size + 1*ovl) / 3.0` scaled to 0-100
:param `sim`:
:type `sim`: float
Sequence similarity
:param `size`:
:type `size`: float
Size similarity
:param `ovl`: Reciprocal overlap
:type `ovl`: float
:return: The score
:rtype: float
"""
score = (sim + size + ovl) / 3.0 * 100
#new_score = score / 1.333333 * 100
return score |
def convert_metric(metric):
"""
Convert a metric name to a fixed name that can be used in
various scripts.
Args:
metric (str): input metric
Returns:
metric (str): output metric
"""
if metric in ["prc_auc", "prc-auc"]:
metric = "pr_auc"
elif metric in ["auc", "roc-auc"]:
metric = "roc_auc"
return metric |
def split_parent(name):
"""
Split hostgroup name in parent part and name:
>>> split_parent("a/b/c")
('c', 'a/b')
"""
if '/' in name:
parent, name = name.rsplit('/', 1)
else:
return name, None
return name, parent |
def comp_exact_match(refs, hyps):
"""
@param refs (list[list[str]]): list of reference sentences
@param hyps (list[list[str]]): list of predicted sentences (hypotheses)
@return em (float): average exact match value
"""
assert len(refs) == len(hyps)
match = 0
for ref, hyp in zip(refs, hyps):
if ref == hyp: match += 1
return match / len(refs) |
def levenshtein(s, t):
"""
Computes the levenschtein distance between two strings of text
This is borrowed code and has been checked, but stay careful
:param s: first string of text
:param t: second string of text
:return: a distance measure, not normalized
"""
if s == "":
return len(t)
if t == "":
return len(s)
if s[-1] == t[-1]:
cost = 0
else:
cost = 1
res = min([levenshtein(s[:-1], t) + 1,
levenshtein(s, t[:-1]) + 1,
levenshtein(s[:-1], t[:-1]) + cost])
return res |
def remove_path_segments(segments, remove):
"""
Removes the path segments of <remove> from the end of the path
segments <segments>.
Examples:
# ('/a/b/c', 'b/c') -> '/a/'
remove_path_segments(['','a','b','c'], ['b','c']) == ['','a','']
# ('/a/b/c', '/b/c') -> '/a'
remove_path_segments(['','a','b','c'], ['','b','c']) == ['','a']
Returns: The list of all remaining path segments after the segments
in <remove> have been removed from the end of <segments>. If no
segments from <remove> were removed from <segments>, <segments> is
returned unmodified.
"""
# [''] means a '/', which is properly represented by ['', ''].
if segments == ['']:
segments.append('')
if remove == ['']:
remove.append('')
ret = None
if remove == segments:
ret = []
elif len(remove) > len(segments):
ret = segments
else:
toremove = list(remove)
if len(remove) > 1 and remove[0] == '':
toremove.pop(0)
if toremove and toremove == segments[-1 * len(toremove):]:
ret = segments[:len(segments) - len(toremove)]
if remove[0] != '' and ret:
ret.append('')
else:
ret = segments
return ret |
def process_comment(comment, colors):
"""
Helper function that scans a text comment for any mention of a color. Returns a list containing
lists, where each sublist contains comment metadata if a color was in the comment. If no color was in the comment, empty list is returned
"""
color_info = []
for color in colors:
if ' ' + color + ' ' in comment.body.lower():
if color == 'purple':
color_info.append({'comment_id': comment.id,
'created_utc': comment.created_utc,
'color': 'violet',
'subreddit_display_name': comment.subreddit.display_name,
'body': comment.body,
'score': comment.score})
else:
color_info.append({'comment_id': comment.id,
'created_utc': comment.created_utc,
'color': color,
'subreddit_display_name': comment.subreddit.display_name,
'body': comment.body,
'score': comment.score})
return color_info |
def parse_star_count(stars_str):
"""Parse strings like 40.3k and get the no. of stars as a number"""
stars_str = stars_str.strip()
return int(float(stars_str[:-1]) * 1000) if stars_str[-1] == 'k' else int(stars_str) |
def percent(d):
"""Pretty format a decimal as a percent string.
Pretty format a decimal (ideally between 0 and 1) as a human-readable percent.
Args:
d (float): decimal number to convert to percentage.
Returns:
A human-readable formatted percentage.
"""
return f"{d * 100:.0f}" |
def unroll_extras(requirement):
"""Unroll extra requirements
>>> unroll_extras('zope.foo')
['zope.foo']
>>> unroll_extras('zope.foo[docs]')
['zope.foo', 'zope.foo[docs]']
>>> unroll_extras('zope.foo[docs,tests]')
['zope.foo', 'zope.foo[docs]', 'zope.foo[tests]']
>>> unroll_extras('zope.foo [ docs , tests ] ')
['zope.foo', 'zope.foo[docs]', 'zope.foo[tests]']
"""
base, _, extras = requirement.partition('[')
base = base.strip()
extras = extras.partition(']')[0].strip().split(',') if extras else []
requirements = [base]
for extra in extras:
requirements.append(base + '[' + extra.strip() + ']')
return requirements |
def _shape_is_broadcastable(shape_1, shape_2):
"""Check if two :mod:`numpy.array' shapes are broadcastable."""
return all((m == n) or (m == 1) or (n == 1)
for (m, n) in zip(shape_1[::-1], shape_2[::-1])) |
def check_shape(actual, expected) -> bool:
"""Verifies that the actual shape tuple matches the expected shape
tuple. Every element in actual should be an int, and every element in
expected should be a description of a dimension (str, int, or (str, int)).
Args:
actual (tuple[int]): actual shape
expected (tuple[union[int, str, tuple[int, str]]]): expected shape
Returns:
bool: True if the two shapes match, false otherwise
"""
if len(actual) != len(expected):
return False
for ind, act in enumerate(actual):
exp = expected[ind]
if isinstance(exp, int):
if act != exp:
return False
elif isinstance(exp, (tuple, list)):
if isinstance(exp[1], int) and act != exp[1]:
return False
return True |
def undo_sentence_piece(seq):
"""Undo the sentence Piece splits to make Bleu comparable."""
return seq.replace("\u2581", "") |
def filter_function_ortho(x):
"""Filter words based on punctuation and length."""
a = not set(x['orthography']).intersection({' ', "'", '.', '/', ',', '-'})
return (a and len(x['orthography']) >= 3) |
def sieve_of_eratosthenes(n):
"""
Sieve of Eratosthenes implementation
"""
primes = [True] * (n + 1)
primes[0] = False
primes[1] = False
results = []
for i in range(2, int(n ** 0.5) + 1):
if primes[i]:
# i is prime
results.append(i)
for j in range(i * i, n + 1, i):
primes[j] = False
return results + [i for i in range(int(n ** 0.5) + 1, n + 1) if primes[i]] |
def check_sec_pass(fir_password, sec_password, error_message=''):
"""
check n4: password confirmation.
It is a simplified version. I don't check for the empty string
as the previous check will refuse such an entry and if the second
string would be '', it could not match to the first
"""
if fir_password != sec_password:
error_message = "Your passwords don't match"
return error_message |
def name_standard(name):
""" return the Standard version of the input word
:param name: the name that should be standard
:return name: the standard form of word
"""
reponse_name = name[0].upper() + name[1:].lower()
return reponse_name |
def mb_to_bytes(size):
"""Takes a size in MB and returns the number of bytes.
Arguments:
size(int/float): size in Mega Bytes
Returns:
(int/float) size in bytes
"""
return size * 1024 * 1024 |
def get_num_groups(filters):
"""
Helper to select number of groups for GroupNorm.
Select 16 by default (as in paper)
Otherwise takes filters//4 so we at least have 4 groups
"""
return min(filters // 4, 16) |
def _find_continuous_segment(numbers):
"""Find the longest continuous segment in a list of numbers.
For example:
input:
1, 2, 3, 4, 5, 6
22,70,23,24,25,26
output:
number_list_sorted:
1, 3, 4, 5, 6, 2
22,23,24,25,26,70
segments:
0, 1, 5, 6
which means there are 3 segment with start and end indices
on the number_list_sorted to be: (0, 1), (1, 5), (5, 6)
Args:
numbers: List of pairs of number
Returns:
segment: a list containing the indices of the segment start point
and end point.
number_list: sorted by the first element version of the input.
"""
segments = [0]
number_list_sorted = sorted(numbers, key=lambda elem: elem[0])
for i in range(len(number_list_sorted) - 1):
# continuous segment is a segment which the number in pair is 1 unit
# more than the previous pair
if (number_list_sorted[i + 1][0] - number_list_sorted[i][0] != 1
or number_list_sorted[i + 1][1] - number_list_sorted[i][1] != 1):
segments.append(i + 1)
segments.append(len(number_list_sorted))
return segments, number_list_sorted |
def cleanStr(str):
"""
Do some cleaning, remove double spaces new lines, clean commas and others
:param str:
:return:
"""
str = str.replace("\n", ' ')
clean_space_from = ['( ', ' )', ', ', '[ ', ' ]']
for str_to_replace in clean_space_from:
actual = str_to_replace.replace(' ', '')
str = str.replace(actual, str_to_replace)
str = ' (' + str + ') '
str = ' '.join(str.split())
return str |
def getName(e):
"""Returns the name of a class or class instance"""
if getattr(e, "__name__", None) == None:
return e.__class__.__name__
else:
return e.__name__ |
def get_account_id_from_arn(trail_arn):
"""Gets the account ID portion of an ARN"""
return trail_arn.split(':')[4] |
def build_dictionary(words):
"""
Process raw inputs into a data set
:param words: list of strings where each element is a word/token
:return: dictionary
data -- list of indices corresponding to the input words
count -- list of length n_words containing the most common words
every element is a tuple ('word', index in dictionary)
dictionary -- dictionary where dictionary[word] = index in data
reversed_dictionary -- reversed_dictionary[index] = word
"""
# Create a dictionary with an entry for each of the possible words
print("Create dictionary of statement indices ...")
dictionary = dict()
for word in words.keys():
dictionary[word] = len(dictionary)
return dictionary |
def find_public(space):
"""
Determine all public names in space
:Parameters:
`space` : ``dict``
Name space to inspect
:Return: List of public names
:Rtype: ``list``
"""
if '__all__' in space:
return list(space['__all__'])
return [key for key in space.keys() if not key.startswith('_')] |
def pow2(x: float) -> float:
"""
Pow 2
"""
return pow(x, 2) |
def _one_weight(n_one, n_zero, a, alpha):
"""Get the weight of the ones."""
weight = a * (n_one / n_zero)**(-alpha)
return weight |
def count_words(texts):
"""
Counts the words in the given texts, ignoring puncuation and the like.
@param texts - Texts (as a single string or list of strings)
@return Word count of texts
"""
if type(texts) is list:
return sum(len(t.split()) for t in texts)
return len(texts.split()) |
def gcd(a,b=None):
""" Return greatest common divisor using Euclid's Algorithm.
a - can be either an integer or a list of integers
b - if 'a' is an integer, 'b' should also be one. if 'a' is a list, 'b' should be None
"""
if (b == None):
if (a == []):
return 1
g = a[0]
for i in range(1, len(a)):
g = gcd(g, a[i])
return g
else:
while b:
a, b = b, a % b
return a |
def stairs(n: int) -> int:
"""
>>> stairs(1)
1
>>> stairs(2)
2
>>> stairs(3)
3
>>> stairs(4)
5
"""
assert n >= 1
a, b = 1, 1
for _ in range(n):
a, b = b, a + b
return a |
def complex_impedance(z, XR):
"""
Returns the complex impedance from z and the X/R ratio.
"""
z = float(abs(z))
XR = float(abs(XR))
real = (z**2/(1+XR**2))**0.5
try:
imag = (z**2/(1+1/XR**2))**0.5
except ZeroDivisionError:
imag = 0.0
return complex(real, imag) |
def adda_hyperparams(lr_target=1e-5, lr_discriminator=1e-4, wd=5e-5, scheduler=False):
"""
Return a dictionary of hyperparameters for the ADDA algorithm.
Default parameters are the best ones as found through a hyperparameter search.
Arguments:
----------
lr_target: float
Learning rate for the target encoder.
lr_discriminator: float
Learning rate for the discriminator.
wd: float
Weight decay for the optimizer.
scheduler: bool
Will use a OneCycleLR learning rate scheduler if set to True.
Returns:
--------
hyperparams: dict
Dictionary containing the hyperparameters. Can be passed to the `hyperparams` argument on ADDA.
"""
hyperparams = {'learning_rate_target': lr_target,
'learning_rate_discriminator': lr_discriminator,
'weight_decay': wd,
'cyclic_scheduler': scheduler
}
return hyperparams |
def full_overlap(aIntervalA, aIntervalB):
""" Returns True if interval A falls completely within interval B otherwise returns False"""
# Check that both inputs are 3-column intervals
if not len(aIntervalA) == len(aIntervalB) == 3:
raise Exception("Regions could not be overlapped")
if aIntervalA[0] == aIntervalB[0]:
if aIntervalA[1] >= aIntervalB[1]:
if aIntervalA[2] <= aIntervalB[2]:
return True
else:
return False |
def build_markdown_header(title, date, author, categories, tags, slug, status=None,
attachments=None):
"""Build a header from a list of fields"""
header = 'Title: %s\n' % title
if date:
header += 'Date: %s\n' % date
if author:
header += 'Author: %s\n' % author
if categories:
header += 'Category: %s\n' % ', '.join(categories)
if tags:
header += 'Tags: %s\n' % ', '.join(tags)
if slug:
header += 'Slug: %s\n' % slug
if status:
header += 'Status: %s\n' % status
if attachments:
header += 'Attachments: %s\n' % ', '.join(attachments)
header += '\n'
return header |
def whitelist_keys(data, keys):
"""
Iterates over a list of dictionaries and keeps only the keys that were specified.
:param data: A list of dictionaries.
:type data: list
:param keys: a list of keys to keep in each dictionary.
:type keys: list
:return: The whitelisted list.
:rtype list
"""
# no whitelist when fields are not provided or on a list of non-dictionary items.
if not keys or any(not isinstance(i, dict) for i in data):
return data
return list(
map(lambda x: {
k: x[k]
for k in keys if k in x},
data)
) |
def other_regional_data_file_data(other_base_regional_data_file_data, data_file):
"""Return data file creation data for the other reigon."""
return {
"file": data_file,
**other_base_regional_data_file_data
} |
def linfdist(a, b):
"""Returns the :math:`L_\infty` distance between the two seqs.
This is the sum of ``abs(i-j)`` for each element.
"""
return max((abs(i-j) for i, j in zip(a,b))) |
def handle_recon_rule_params(inbound: dict) -> dict:
"""Handle the payload formatting for a single rule object."""
returned_dict = {}
if inbound.get("filter", None):
returned_dict["filter"] = inbound.get("filter", None)
if inbound.get("id", None):
returned_dict["id"] = inbound.get("id", None)
if inbound.get("name", None):
returned_dict["name"] = inbound.get("name", None)
if inbound.get("permissions", None):
returned_dict["permissions"] = inbound.get("permissions", None)
if inbound.get("priority", None):
returned_dict["priority"] = inbound.get("priority", None)
if inbound.get("topic", None):
returned_dict["topic"] = inbound.get("topic", None)
return returned_dict |
def knapsack(val, wt, wt_cap):
"""Knapsack Problem by greedy algorithm w/ max val per wt.
Time complexity: O(n*logn), where n is the number of items.
Space complexity: O(n).
"""
# Add highest value per weight first.
vals_per_wts = [(v / w, v, w) for (v, w) in zip(val, wt)]
sorted_vals_per_wts = sorted(vals_per_wts, reverse=True)
max_val = 0
total_wt = 0
for vw, v, w in sorted_vals_per_wts:
if total_wt + w <= wt_cap:
total_wt += w
max_val += v
else:
wt_remain = (wt_cap - total_wt)
max_val += vw * wt_remain
break
return max_val |
def compute_block_key(path, index, length=2):
"""
Computes a block key from a file path and an index.
Args:
path(str): Path to the file related to the blocloggerk
index(int): index of the block
length(int, optional): Length of the index part of the key for zero filling (Defaults to 2)
Returns:
str: Block key
"""
return path + "-" + str(index).zfill(length) |
def get_valid_options(processors):
"""
Returns a list containing unique valid options from a list of processors
in correct order.
"""
valid_options = []
for processor in processors:
if hasattr(processor, 'valid_options'):
valid_options.extend([opt for opt in processor.valid_options
if opt not in valid_options])
return valid_options |
def _CalcLodSizes(size, bricksize):
"""
Compute the size of the file in bricks, for all LOD levels
and all 3 dimensions. Indirectly also compute the number
of LOD levels, since by definition the last LOD level
is the one that has size (1, 1, 1) i.e. the entire cube
is decimated enought to fit inside a single brick.
"""
if min(bricksize) < 1: return [(0, 0, 0)]
size = ((size[0] + bricksize[0] - 1) // bricksize[0],
(size[1] + bricksize[1] - 1) // bricksize[1],
(size[2] + bricksize[2] - 1) // bricksize[2])
result = [ size ]
while max(result[-1]) > 1:
size = ((size[0]+1)//2, (size[1]+1)//2, (size[2]+1)//2)
result.append(size)
#print("## CalcLodSize", result)
return tuple(result) |
def clean_chars(value, cleanchars):
""" Remove chars for cleaning
:param value: String to be cleaned
:param cleanchars: Characters to remove from value
:return value: Cleaned string
"""
for char in cleanchars:
value = value.replace(char, '')
return value |
def decode_predictions(threshed, pred):
"""
Converts float pred to the output string.
"""
if int(threshed) == 0:
return f"Malignant; Confidence: {round((1-float(pred))*100, 1)}%"
elif int(threshed) == 1:
return f"Non-Malignant; Confidence: {round(float(pred)*100)}%" |
def normalize_variable_name(s):
""" Replaces spaces with underscores in order to
have a 'normal' variable name.
"""
return str.replace(s, ' ', '_') |
def linklist(title, href_fmt, args):
"""Generate unordered list of links"""
href_fmt = '<li><a href="{}">{}</a>'.format(href_fmt, '{}')
entries = [href_fmt.format(*(e + [e[-1]])) for e in sorted(args)]
fmt = '<h2>{} ({} found)</h2><ul>{}</ul>'
return fmt.format(title, len(entries), ''.join(entries)) |
def differencing_by_DD(aflx, bflx):
"""Differencing scheme based on unweighted diamond difference; aflx and
bflx are the cell-averaged and the cell-edge fluxes, respectively."""
return 2 * aflx - bflx |
def dsn_to_url(engine, dsn):
"""
Converts a DSN to a SQLAlchemy-style database URL
pytest_postgresql connection only available in DSN form, like
'dbname=tests user=postgres host=127.0.0.1 port=41663'
"""
params = dict(s.split('=') for s in dsn.split())
return '{engine}://{user}@{host}:{port}/{dbname}'.format(engine=engine,
**params) |
def filter_seq2seq_output(string_pred, eos_id=-1):
"""Filter the output until the first eos occurs (exclusive).
Arguments
---------
string_pred : list
A list containing the output strings/ints predicted by the seq2seq system.
eos_id : int, string
The id of the eos.
Returns
------
list
The output predicted by seq2seq model.
Example
-------
>>> string_pred = ['a','b','c','d','eos','e']
>>> string_out = filter_seq2seq_output(string_pred, eos_id='eos')
>>> string_out
['a', 'b', 'c', 'd']
"""
if isinstance(string_pred, list):
try:
eos_index = next(
i for i, v in enumerate(string_pred) if v == eos_id
)
except StopIteration:
eos_index = len(string_pred)
string_out = string_pred[:eos_index]
else:
raise ValueError("The input must be a list.")
return string_out |
def coerce_row_to_dict(schema, row):
"""
>>> from datashape import dshape
>>> schema = dshape('{x: int, y: int}')
>>> coerce_row_to_dict(schema, (1, 2)) # doctest: +SKIP
{'x': 1, 'y': 2}
Idempotent
>>> coerce_row_to_dict(schema, {'x': 1, 'y': 2}) # doctest: +SKIP
{'x': 1, 'y': 2}
"""
if isinstance(row, dict):
return row
return dict((name, item) for name, item in zip(schema[0].names, row)) |
def find_next_multi_line_comment_start(lines, line_index):
"""Find the beginning marker for a multiline comment."""
while line_index < len(lines):
if lines[line_index].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[line_index].strip().find('*/', 2) < 0:
return line_index
line_index += 1
return len(lines) |
def monthly_soil_heat_flux(t_month_prev, t_month_next):
"""
Estimate monthly soil heat flux (Gmonth) from the mean air temperature of
the previous and next month, assuming a grass crop.
Based on equation 43 in Allen et al (1998). If the air temperature of the
next month is not known use ``monthly_soil_heat_flux2()`` instead. The
resulting heat flux can be converted to equivalent evaporation [mm day-1]
using ``energy2evap()``.
:param t_month_prev: Mean air temperature of the previous month
[deg Celsius]
:param t_month2_next: Mean air temperature of the next month [deg Celsius]
:return: Monthly soil heat flux (Gmonth) [MJ m-2 day-1]
:rtype: float
"""
return 0.07 * (t_month_next - t_month_prev) |
def str_to_type(arg):
"""Determine if a string can be converted into an int or float
Arguments:
arg: (string)
Returns:
str, float or int version of the input, depending on whether the input
is numeric and then whether it contains a decimal point.
"""
try:
float(arg)
except ValueError:
if arg == 'None':
return None
return str(arg)
if arg.find('.') == -1 and arg.find('e') == -1:
return int(arg)
return float(arg) |
def split_path(path,compare_string):
"""
Returns type: (boolean,string).
Function searches for "compare_string" in "path" with the highest index.
if "compare_string" is found in "path" split_path will remove everything in front of it and return (True,Remaining string).
For example "path" = 6.0.12.1020/sdk/platform and "compare_string" = /sdk/, (True,"/sdk/platform") will be returned
If "compare_string" was not found in "path" split_path will return (False,path)
"""
index = path.rfind(compare_string)
if index == -1:
return (False,path)
return (True,path.replace(path[:index],"")) |
def hr_sub(match):
"""Matches a horizontal rule."""
return '\n\n<hr/>\n\n' |
def make_entry(bus_index, phase_index, P, Q, V_real, V_imag):
"""Make a state estimation entry to be sent to the GA.
Parameters
----------
bus_index : int
Index of the bus that was sensed.
phase_index : {1, 2, 3}
Index of the phase shift that was used. `1` denotes 0
degrees, `2` denotes +120 degrees, and `3` denotes -120
degrees.
P : float
Real power (P, in W).
Q : float
Reactive power (Q, in Var).
V_real : float
Real part of the voltage.
V_imag : float
Imaginary part of the voltage.
Returns
-------
entry : dict
Entry to be sent to the GA.
"""
assert phase_index in {1, 2, 3}
return {
'bus_index': bus_index,
'phase_index': phase_index,
'P': P,
'Q': Q,
'v_bus_real': V_real,
'v_bus_imag': V_imag
} |
def human_bytes(n):
"""
return the number of bytes 'n' in more human readable form
"""
if n < 1024:
return '%i B' % n
k = (n - 1) / 1024 + 1
if k < 1024:
return '%i KB' % k
return '%.2f MB' % (float(n) / (2 ** 20)) |
def basin_finder(loc):
"""
Finds basin to load data from.
Input
loc: list of coordinates [lat, lon] or string refering to an area.
Output
basin , string: name of the basin.
"""
basin_dic = {'indus': 'indus', 'uib': 'indus', 'sutlej': 'indus',
'beas': 'indus', 'beas_sutlej': 'indus', 'khyber': 'indus',
'ngari': 'indus', 'gilgit': 'indus'}
if type(loc) is str:
basin = basin_dic[loc]
return basin
if type(loc) is not str: # fix to search with coords
print('Not a string') |
def clamp(value, mn, mx):
"""Clamp value between minimun, maximum."""
assert mn <= mx, (mn, mx)
return max(min(value, mx), mn) |
def input_parser(inputdata):
"""Given the input of the puzzle represent it with a list of tuples
Parameters
----------
inputdata : list
A list of strings, each being a line of the raw input file.
Returns
----------
inputdata : list
A list of strings, each being a line of the raw input file.
max_x : int
Max x coordinate value from all points
max_y : int
Max y coordinate value from all points
"""
res = []
max_x = 0
max_y = 0
for line in inputdata:
pointpair = ()
for strpoint in line.split('->'):
strpoint = strpoint.strip()
point = ()
for indexcoord, strcoord in enumerate(strpoint.split(',')):
valuecoord = int(strcoord)
point += (valuecoord,)
if(indexcoord==0 and max_x<valuecoord):
max_x = valuecoord
elif(0<indexcoord and max_y<valuecoord):
max_y = valuecoord
pointpair += (point,)
res.append(pointpair)
# return a list of points-pair (x1,y1) and (x2,y2)
# each point is a pair x,y coordinates
return res, max_x, max_y |
def _encode_edge_attributes(order, par):
""" encode bond attributes as an integer (or "color")
scheme:
bond order <=> tens place
parity <=> ones place (None->0, False->1, True->2)
"""
id2 = order
id1 = 0 if par is None else 1 + int(par)
color = id2 * 10 + id1 * 1
return color |
def pf_contact(phi):
""" Phase field contact function. """
return (2. + 3.*phi - phi**3)/4. |
def is_increasing(l):
"""
Given a list, return True if the list elements are monotonically
increasing, and False otherwise.
"""
for a, b in zip(l, l[1:]):
if a >= b:
return False
return True |
def find_triple(s):
"""Returns abc where a^2+b^2=c^2 with a+b+c=s."""
a, b, c = 998, 1, 1
while b < 999:
if a**2 + b**2 == c**2:
return a*b*c
if a == 1:
c += 1
b = 1
a = 1000 - b - c
else:
b += 1
a -= 1 |
def is_descriptor_schema(data):
"""Return ``True`` if passed object is DescriptorSchema and ``False`` otherwise."""
return type(data).__name__ == 'DescriptorSchema' |
def heaviside(x):
"""
Heaviside step function: x -> x'\in{0,1}
Parameters
----------
x : float
Argument, to be corrected using the step function.
Returns
-------
integer
Binary step output.
"""
if (x < 0):
return 0
else:
if (x > 0):
return 1 |
def parsed_bowtie_args(bowtie2_args):
""" Parses Bowtie2 args and returns relevant information about reporting.
bowtie2_args: string of Bowtie 2 arguments to parse. Parsed arguments
include -k and -a.
Return value: tuple (-1 iff all alignments are to be reported; else the
number of alignments to be reported, --seed parameter,
--non-deterministic parameter)
"""
import argparse
bowtie_parser = argparse.ArgumentParser()
'''By default, report primary alignment; this is regarded as '-k 1'. Note
that Bowtie2 does not guarantee that the best alignment is reported when
-k 1 is invoked, but Rail does here since it has all alignments to work
with.'''
bowtie_parser.add_argument('-k', type=int, required=False, default=1)
bowtie_parser.add_argument('-a', action='store_const', const=True,
default=False
)
bowtie_parser.add_argument('--seed', type=int, required=False, default=0)
bowtie_parser.add_argument('--non-deterministic', action='store_const',
const=True, default=False
)
if bowtie2_args is None: bowtie2_args = ''
split_args = bowtie2_args.split(' ')
parsed_args = bowtie_parser.parse_known_args(split_args)[0]
try:
# If both -k and -a are present, last argument takes precedence
if split_args.index('-a') > split_args.index('-k'):
return -1, parsed_args.seed, parsed_args.non_deterministic
else:
return (
parsed_args.k, parsed_args.seed,
parsed_args.non_deterministic
)
except ValueError:
# Both -a and -k are not present
pass
if parsed_args.a:
return -1, parsed_args.seed, parsed_args.non_deterministic
return parsed_args.k, parsed_args.seed, parsed_args.non_deterministic |
def ugly_number(n: int) -> int:
"""
return bilangan aneh
>>> ugly_number(100)
1536
>>> ugly_number(0)
1
>>> ugly_number(20)
36
"""
ugly_num = [1]
i2, i3, i5 = 0, 0, 0
next_2 = ugly_num[i2] * 2
next_3 = ugly_num[i3] * 3
next_5 = ugly_num[i5] * 5
for i in range(1, n):
next_num = min(next_2, next_3, next_5)
ugly_num.append(next_num)
if next_num == next_2:
i2 += 1
next_2 = ugly_num[i2] * 2
if next_num == next_3:
i3 += 1
next_3 = ugly_num[i3] * 3
if next_num == next_5:
i5 += 1
next_5 = ugly_num[i5] * 5
return ugly_num[-1] |
def get_getmodel_cmd(number_of_samples, gul_threshold, use_random_number_file, coverage_output, item_output, **kwargs):
"""
Gets the getmodel ktools command
:param number_of_samples: The number of samples to run
:type number_of_samples: int
:param gul_threshold: The GUL threshold to use
:type gul_threshold: float
:param use_random_number_file: flag to use the random number file
:type use_random_number_file: bool
:param coverage_output: The coverage output
:type coverage_output: str
:param item_output: The item output
:type item_output: str
:return: The generated getmodel command
"""
cmd = 'getmodel | gulcalc -S{} -L{}'.format(number_of_samples, gul_threshold)
if use_random_number_file:
cmd = '{} -r'.format(cmd)
if coverage_output != '':
cmd = '{} -c {}'.format(cmd, coverage_output)
if item_output != '':
cmd = '{} -i {}'.format(cmd, item_output)
return cmd |
def to_id_pandoc(input_: str) -> str:
"""
Quote from docs:
The default algorithm used to derive the identifier from the heading text is:
* Remove all formatting, links, etc.
* Remove all footnotes.
* Remove all non-alphanumeric characters, except underscores, hyphens, and periods.
* Replace all spaces and newlines with hyphens.
* Convert all alphabetic characters to lowercase.
* Remove everything up to the first letter (identifiers may not begin with a number or punctuation mark).
* If nothing is left after this, use the identifier section.
"""
def accept(char: str) -> bool:
if char in ALPHA:
return True
elif char.isalpha():
return True
elif char.isdigit():
return True
return False
ALPHA = '_-.'
result = ''
source = input_.lower()
accum = False
# strip everything before first letter
while source and not source[0].isalpha():
source = source[1:]
for char in source:
if accept(char):
if accum:
accum = False
result += f'-{char.lower()}'
else:
result += char.lower()
elif char.isspace():
accum = True
else:
pass
if not result:
return 'section'
else:
return result |
def _get_overview_content(account, database_name, doc_count, elapsed_time):
"""
Generate overview content
"""
line = '=' * 80
result = [
"",
line,
"Overview",
line,
"",
"- Cloudant Account: {0}".format(account),
"- Cloudant Database: {0}".format(database_name),
"- Total Documents: {0}".format(doc_count),
"- Elapsed Time: {0}".format(elapsed_time),
""
]
return "\n".join(result) |
def div(n):
"""
Just divide
"""
res = 10 / n
return res |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.