content stringlengths 42 6.51k |
|---|
def get_circular(
locus_text: str) -> bool:
"""
Args:
locus_text:
The LOCUS (i.e. header section) of genbank file
"""
line1 = locus_text[:locus_text.find('\n')]
if ' circular ' in line1: return True
return False |
def transform_color(color1, color2, skipR=1, skipG=1, skipB=1):
"""
transform_color(color1, color2, skipR=1, skipG=1, skipB=1)
This function takes 2 color1 and color2 RGB color arguments, and then returns a
list of colors in-between the color1 and color2
eg- tj.transform_color([0,0,0],[10,10,20]) returns a list:-
[[0, 0, 0], [1, 1, 1], [2, 2, 2] ... [9, 9, 9], [10, 10, 10], [10, 10, 11] ... [10, 10, 20]]
This function is very useful for creating color fade or color transition effects in pygame.
There are 3 optional arguments, which are skip arguments set to 1 by default.
"""
L = []
if (color1[0] < color2[0]):
i = list(range(color1[0],
color2[0] + 1,
skipR))
else:
i = list(range(color2[0], color1[0] + 1, skipR))[::-1]
if i == []:
i = [color1[0]]
if (color1[1] < color2[1]):
j = list(range(color1[1],
color2[1] + 1,
skipG))
else:
j = list(range(color2[1], color1[1] + 1, skipG))[::-1]
if j == []:
j = [color1[1]]
if (color1[2] < color2[2]):
k = list(range(color1[2],
color2[2] + 1,
skipB))
else:
k = list(range(color2[2], color1[2] + 1, skipB))[::-1]
if k == []:
k = [color1[2]]
x = max(len(i), len(j), len(k))
for m in range(len(i), x):
i += [i[-1]]
for m in range(len(j), x):
j += [j[-1]]
for m in range(len(k), x):
k += [k[-1]]
for m in range(x):
l = [i[m], j[m], k[m]]
L += [l]
return L |
def parse_style(style_string: str) -> dict:
"""[summary]
Args:
style_string (str): [description]
Returns:
dict: [description]
"""
return {kv.split(':')[0]:int(kv.split(':')[1]) if kv.split(':')[1].isdigit() else kv.split(':')[1] for kv in style_string.split(';')} |
def expand_locations(ctx, values, targets = []):
"""Expands the `$(location)` placeholders in each of the given values.
Args:
ctx: The rule context.
values: A list of strings, which may contain `$(location)` placeholders.
targets: A list of additional targets (other than the calling rule's
`deps`) that should be searched for substitutable labels.
Returns:
A list of strings with any `$(location)` placeholders filled in.
"""
return [ctx.expand_location(value, targets) for value in values] |
def not_(x):
"""Support `not x` syntax"""
if bool(x):
return False
return True |
def get_chromosome_number(chrom: str) -> int:
"""
Get chromosome number (index) of the supplied chromosome:
'1' -> 1, chr1 -> 1, returns -1 when not available, chrM -> -1
"""
try:
return int(chrom.replace('chr',''))
except Exception as e:
return -1 |
def _get_time_split(timing_line):
"""Extracts timing information from a boost-formatted timestamp, i.e.:
<timestamp> GeometricCalibration.cpp:<line_number> Aggregate timing: <wall>s wall, <user>s user + <system>s system = <cpu>s CPU (<pct>%)
Args:
timing_line (str): Line of the above format.
Returns:
dict[str, float]: Map with keys "cpu" and "wall" for the respective readings.
"""
cpu_time_half = timing_line.split("CPU")[0].strip()
cpu_time = float(cpu_time_half.split(" ")[-1].replace("s", ""))
wall_time_half = timing_line.split("wall")[0].strip()
wall_time = float(wall_time_half.split(" ")[-1].replace("s", ""))
return {"cpu": cpu_time, "wall": wall_time} |
def groupBy(keyFn, row_data):
"""Group rows in row_data by the result of keyFn.
Arguments:
keyFn -- A function that returns the cell data of the column to group by.
Needs to be tailored to the structure of row_data.
row_data -- A table organized as a list of row data structures.
Returns:
{ 'key_id1': [ row1, row2, row3, ...],
'key_id2': [ row4, row5, ... ],
...}
"""
temp_dict = {}
for row in row_data:
key = keyFn(row) # Get key/cell data corresponding to the groupBy column for this row
a = temp_dict.get(key,[]) # Get the sub-table for whatever we have so far for the given key
a.append(row) # Attach this row to the grouped sub-table
temp_dict[key] = a # Put sub-table back into temp_dict
return temp_dict |
def get_prob(date):
"""get the file name of the probability"""
date = str(date)
ret = date + 'prob.csv'
return ret |
def connect_(pairs, n=1):
"""connect_
Connects two adjacent clusters if their distance is <= n
:param pairs: Clusters of iterateables e.g., [(1,5),(7,10)]
:param n: distance between two clusters
"""
if len(pairs) == 0:
return []
start_, end_ = pairs[0]
new_pairs = []
for i, (next_item, cur_item) in enumerate(zip(pairs[1:], pairs[0:])):
end_ = next_item[1]
if next_item[0] - cur_item[1] <= n:
pass
else:
new_pairs.append((start_, cur_item[1]))
start_ = next_item[0]
new_pairs.append((start_, end_))
return new_pairs |
def _hashable_bytes(data):
"""
Coerce strings to hashable bytes.
"""
if isinstance(data, bytes):
return data
elif isinstance(data, str):
return data.encode('ascii') # Fail on anything non-ASCII.
else:
raise TypeError(data) |
def parse_directions(input_directions):
"""Parse input_directions for single tile from str to list of str."""
directions = []
index = 0
while index != len(input_directions):
left = input_directions[index:]
if left.startswith('s') or left.startswith('n'):
directions.append(left[:2])
index += 2
else:
directions.append(left[0])
index += 1
return directions |
def header_from_line(line):
"""
Parse a tsv header
:param line: the header text
:return: (column_names list, column_indices dict)
"""
if len(line) == 0:
return [], {}
column_names = line.rstrip("\n").split("\t")
column_indices = dict([(name, index) for index, name in enumerate(column_names)])
return column_names, column_indices |
def sortable_label(label, separator='-'):
""" Create a sortable tuple out of a label.
Converts a dashed label into a tuple based on the following rules:
- If a segment is numeric, it will get three leading zero places
- If a segment is alphabetic and is already uppercase, it is
returned as is.
- If a segment is alphabetic but is not all uppercase, it is
lowercased entirely.
- Anything else is returned as-is.
Intended to be used like `sorted(sections, key=Section.sortable_label)`
"""
segments = []
for segment in label.split(separator):
if segment.isdigit():
segments.append(segment.zfill(4))
elif segment.isalpha() and segment.isupper():
segments.append(segment)
elif segment.isalpha():
segments.append(segment.lower())
else:
segments.append(segment)
return tuple(segments) |
def wrap_text(text, font, width):
# ColdrickSotK
# https://github.com/ColdrickSotK/yamlui/blob/master/yamlui/util.py#L82-L143
"""Wrap text to fit inside a given width when rendered.
:param text: The text to be wrapped.
:param font: The font the text will be rendered in.
:param width: The width to wrap to."""
text_lines = text.replace('\t', ' ').split('\n')
if width is None or width == 0:
return text_lines
wrapped_lines = []
for line in text_lines:
line = line.rstrip() + ' '
if line == ' ':
wrapped_lines.append(line)
continue
# Get the leftmost space ignoring leading whitespace
start = len(line) - len(line.lstrip())
start = line.index(' ', start)
while start + 1 < len(line):
# Get the next potential splitting point
next = line.index(' ', start + 1)
if font.size(line[:next])[0] <= width:
start = next
else:
wrapped_lines.append(line[:start])
line = line[start + 1:]
start = line.index(' ')
line = line[:-1]
if line:
wrapped_lines.append(line)
return wrapped_lines |
def is_pangram(s):
"""Return True if str `s` is a pangram, False otherwise
>>> is_pangram("The quick brown fox jumped over the lazy dog.')
True
>>> is_pangram("The slow brown fox jumped over the lazy dog.')
False
"""
return len(set([c for c in s.lower() if 'a' <= c <= 'z'])) == 26 |
def reverse_linked_list_recursive(head):
"""Excercise 1.3.30 Recursively reverese a linked list."""
if head is None:
return
if head.next is None:
return head
second = head.next
rest = reverse_linked_list_recursive(second)
second.next = head
head.next = None
return rest |
def round_nearest(value, multiple_of):
"""round to `multiple_of` value.
based on: https://stackoverflow.com/a/28425782/574981
"""
return round(value / multiple_of) * multiple_of |
def zero2minimum(single_data):
"""change the value which is zero into 0.0000001
:param single_data: data point
:return: data which may be 0.0000001
"""
single_data = 0.0000001 if single_data == 0 else single_data
return single_data |
def trimText(message):
"""
This functions cleans the string
It removes all the characters without A-Z, a-z
:param message: The string which I want to trim
:return: returns a string
"""
trimmed_text = []
for i in range(len(message)):
if 'A' <= message[i] <= 'Z' or 'a' <= message[i] <= 'z':
trimmed_text.append(message[i])
return "".join(trimmed_text) |
def _get_gl_version(_lib):
"""Helper to get the GL version string"""
try:
return _lib.glGetString(7938).decode('utf-8')
except Exception:
return 'unknown' |
def program_counter(c):
"""program counter"""
v = "c.pc"
return v |
def divide_dict(a_dict, divide_func):
"""Divide a dict like object into two parts.
- a_dict: dict like object
- divide_func: the divide function to return True/False
Return two parts of the dict.
Example:
divide({'a': 1, 'b': 2}, lambda k, v: v > 1) -> {'b': 2}, {'a': 1}
"""
suit, not_suit = {}, {}
for key, value in a_dict.items():
result = suit if divide_func(key, value) else not_suit
result[key] = value
return suit, not_suit |
def is_good_candidate(input_string, guess_word):
"""
input_string: string, the user input to be spellchecked
guess_word: string, the word from the wordlist to be checked for candidacy
returns: bool, True if the guess is of good length and the beginnings and
endings of both of the strings match up correctly
"""
return len(guess_word) >= 5 and input_string[0] == guess_word[0] and input_string[-1] == guess_word[-1] |
def check_for_repeats(string):
"""
Returns a tuple of booleans, the first if duplicates exist, the second if triples do.
"""
double = False
triple = False
counts = {}
for c in sorted(string):
if c in counts:
counts[c] += 1
else:
counts[c] = 1
values = counts.values()
return (2 in values, 3 in values) |
def normalize(value: float, min_value: float, max_value: float) -> float:
"""Normalize value to range [0..1]"""
if min_value == max_value:
return 0
if not min_value <= value <= max_value:
raise ValueError(f"Value {value} is not in range [{min_value}; {max_value}]")
return (value - min_value) / (max_value - min_value) |
def get_adjacents(row, col, max_row, max_col):
"""
Obtains the directions of the adjacent spaces
"""
result = []
up_row = row - 1
left_col = col - 1
down_row = row + 1
right_col = col + 1
if up_row >= 0:
result.append((up_row, col))
if left_col >= 0:
result.append((row, left_col))
if down_row < max_row:
result.append((down_row, col))
if right_col < max_col:
result.append((row, right_col))
return result |
def get_disabled_vhost_path(version, domain):
"""
Get the path for a disabled PHP vhost file regardless of wether or not it exists.
Args:
version - The PHP version used in the file path
domain - The domain used in the file path
"""
return '/opt/php-' + version + '/etc/php-fpm.d/' + domain + '.conf.disabled' |
def _add_algorithm_defaults(algorithm):
"""Central location specifying defaults for algorithm inputs.
Converts allowed multiple inputs into lists if specified as a single item.
"""
defaults = {"archive": [],
"min_allele_fraction": 10.0,
"tools_off": []}
convert_to_list = set(["archive", "tools_off"])
for k, v in defaults.items():
if k not in algorithm:
algorithm[k] = v
for k, v in algorithm.items():
if k in convert_to_list:
if not isinstance(v, (list, tuple)):
algorithm[k] = [v]
return algorithm |
def compute_cycle_length_with_denom(denom):
"""
Compute the decimal cycle length for unit fraction 1/denom (where denom>1)
e.g. 1/6 has decimal representation 0.1(6) so cycle length is 1, etc
"""
digit_pos = 0
# Remaining fraction after subtracting away portions of earlier decimal digits
frac_numer = 1
frac_numer_history = dict()
while True:
digit_pos += 1
# For this digit position, compute the common denominator and numerator
# for the remaining fraction. E.g. if we started with denom = 7, then:
# digit_pos=1: frac = 1/7 => 10/70 - 1*7/70 = 3/70 [0.1]
# digit_pos=2: frac = 3/70 => 30/700 - 4*7/700 = 2/700 [0.14]
# digit_pos=3: frac = 2/700 => 20/7000 - 2*7/7000 = 6/7000 [0.142]
# It's clear that we can ignore the denominator (it's known from digit_pos):
# digit_pos=4: frac = 6/d => 60/10d - 8*7/10d = 4/10d [0.1428]
# digit_pos=5: frac = 4/d => 40/10d - 5*7/10d = 5/10d [0.14285]
# digit_pos=6: frac = 5/d => 50/10d - 7*7/10d = 1/10d [0.142857]
# digit_pos=7: frac = 1/d => we're back to digit_pos=1! Seq length is 7-1 = 6.
# Another example for 1/6:
# digit_pos=1: frac = 1/d => 10/10d - 1*6/10d = 4/10d [0.1]
# digit_pos=2: frac = 4/d => 40/10d - 6*6/10d = 4/10d [0.16]
# digit_pos=3: frac = 4/d => we're back to digit_pos=2! Seq length is 3-2 = 1.
minuend = 10 * frac_numer
subtrahend = denom
digit = minuend // subtrahend
difference = minuend - digit * subtrahend
# Has it terminated?
if difference == 0:
return 0
# Have we found a repeating sequence?
if frac_numer in frac_numer_history:
return digit_pos - frac_numer_history[frac_numer]
# Add this digit to the "seen before" dict
frac_numer_history[frac_numer] = digit_pos
# Update remaining fraction numerator
frac_numer = difference |
def get_contiguous_pairs(sequence: list, pair_size: int) -> list:
""""
DOCTEST
>>> get_contiguous_pairs(sequence=[1, 2, 3, 4], pair_size=2)
[[1, 2], [2, 3], [3, 4]]
>>> get_contiguous_pairs(sequence=[1, 2, 3, 4], pair_size=3)
[[1, 2, 3], [2, 3, 4]]
"""
return [sequence[i:i + pair_size] for i in range(len(sequence) - (pair_size - 1))] |
def coulomb_force(q1, q2, r):
"""
Calculates the force between two point charges.
Applying coulombs law of F = k * q_1 * q_2 / r ** 2 to find the force of attraction/repulsion between two charges
based on their distance from each other.
:param q1: Scalar: Charge given in Coulombs
:param q2: Scalar: Charge given in Coulombs
:param r: Scalar: Distance between two charges given in meters
:return: Scalar: Force between the two charges, given as Newtons
"""
k = 8.987e9 # N * m ** 2 / C ** 2
force = k * (q1 * q2) / r ** 2
return force |
def format_tuple(tup, join_char="."):
"""Formats a tuple of Version numbers for printing.
Example:
(4, 2, 0) turns into 4.2.0
Args:
tup (tuple): Version as tuple.
join_char (char): Character by which numbers are joined (default: ".")
Returns:
str: Joined version number.
"""
return str(join_char.join(map(str, tup))) |
def rk4(x, v, t, h, deriv):
"""
Core Rk4 algo for calculating one step ahead.
This version is limited to 2nd order eq's of the type
dx^2/dt^2 + p(t,x,v)*dx/dt + q(t,x,v)*x = r(t,x,v)
Input:
- x: x (t)
- v: v (t)
- t: Initial time (t_n)
- h: Stepsize (t_{n+1} = t_n + h)
- deriv: Pointer to a function that calculates
and returns the derivatives of x and v
Output:
(x,v): Tuple containing the calculated
values of x and v at time t+h
"""
(dxdt,dvdt) = deriv(t,x,v)
kv1 = h*dvdt
kx1 = h*dxdt
(dxdt,dvdt) = deriv(t+h/2,x+kx1/2,v+kv1/2)
kv2 = h*dvdt
kx2 = h*dxdt
(dxdt,dvdt) = deriv(t+h/2,x+kx2/2,v+kv2/2)
kv3 = h*dvdt
kx3 = h*dxdt
(dxdt,dvdt) = deriv(t+h,x+kx3,v+kv3)
kv4 = h*dvdt
kx4 = h*dxdt
x = x + (kx1 + 2*(kx2+kx3) + kx4)/6
v = v + (kv1 + 2*(kv2+kv3) + kv4)/6
return(x,v) |
def c3_merge(bases):
""" Merge together the list of base classes into the mro that will be
created using the C3 linearisation algorithm
"""
# Protect against empty base class lists (although this should never happens
# because everyone derives from object *right*?)
if not bases:
return []
mro = []
# The input to c3 is the linearisation of each base class and the list of
# bases itself
to_merge = [b.mro() for b in bases] + [bases]
# Non-empty lists evaluate to True, so the while loop here goes until all
# lists are exhausted, which is the endpoint of the c3 algorithm
while to_merge:
# First, we have to find the first 'good' head.
# A good head is the head of a list that does not appear in the tails of
# any of the other lists
try:
head = next(l[0] for l in to_merge if not any(l[0] in l2[1:] for l2 in to_merge) )
except StopIteration:
raise TypeError(
"Failed to calculate MRO - cannot order classes {0}".format(
", ".join([l[0].__name__ for l in to_merge])))
# append it to the mro and remove it from the heads of any list
mro.append(head)
to_merge = [l for l in (l2[1:] if l2[0] == head else l2 for l2 in to_merge) if l]
return mro |
def fetchPID( pid ):
""" Read in the HST visit status page for the given program ID.
"""
import sys
try:
from bs4 import BeautifulSoup as bs
except ImportError :
print("Error: hstMonitor requires BeautifulSoup4.")
print(" http://www.crummy.com/software/BeautifulSoup")
print("Install it via pip (or, if you prefer, easy_install)")
print(" pip install beautifulsoup4")
sys.exit()
try:
import requests
except ImportError :
print("Error: hstMonitor requires .")
print(" http://docs.python-requests.org/en/latest")
print("Install it via pip (or, if you prefer, easy_install)")
print(" pip install requests")
sys.exit()
r = requests.get("http://www.stsci.edu/cgi-bin/get-visit-status?id=%i&markupFormat=html&observatory=HST"%pid)
data = r.text
soup = bs(data)
return( soup ) |
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (dictionary) raw structured data to process
Returns:
List of dictionaries. Each dictionary represents a row in the csv file:
[
{
csv file converted to a Dictionary
https://docs.python.org/3/library/csv.html
}
]
"""
# No further processing
return proc_data |
def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
for x in na_values:
result.append(str(x))
result.append(x)
try:
v = float(x)
# we are like 999 here
if v == int(v):
v = int(v)
result.append(f"{v}.0")
result.append(str(v))
# pandas\io\parsers.py:3522: error: Argument 1 to "append" of
# "list" has incompatible type "float"; expected "str" [arg-type]
result.append(v) # type: ignore[arg-type]
except (TypeError, ValueError, OverflowError):
pass
try:
# pandas\io\parsers.py:3526: error: Argument 1 to "append" of
# "list" has incompatible type "int"; expected "str" [arg-type]
result.append(int(x)) # type: ignore[arg-type]
except (TypeError, ValueError, OverflowError):
pass
return set(result) |
def rivers_with_station(stations):
"""Returns a list containing the names of all the rivers that have at least one of the stations provided as input"""
# Add the rivers of all stations to a set so they are not repeated.
rivers = set()
for station in stations:
rivers.add(station.river)
# Return rivers ordered alphabetically
return sorted(list(rivers)) |
def distance_to_buckets(d):
"""converts distance to one-hot vector"""
ohe = [0] * 10
if d == 0:
ohe[0] = 1
elif d == 1:
ohe[1] = 1
elif d == 2:
ohe[2] = 1
elif d == 3:
ohe[3] = 1
elif d == 4:
ohe[4] = 1
elif 5 <= d < 8:
ohe[5] = 1
elif 8 <= d < 16:
ohe[6] = 1
elif 16 <= d < 32:
ohe[7] = 1
elif 32 <= d < 64:
ohe[8] = 1
elif 64 <= d:
ohe[9] = 1
assert sum(ohe) == 1, (d, ohe)
return ohe |
def extract_keys(inp_dict, keys):
"""
Return dictionary witxh selected keys
:param inp_dict: dictionary
:param keys: list or string
:return: dictionary
"""
if not isinstance(keys, list):
keys = [keys]
return {your_key: inp_dict[your_key] for your_key in keys} |
def get_mips_per_model(models_and_mips, cmip6_model):
"""
Identifies the mips for that specific model input.
:param models_and_mips:
:param cmip6_model:
:return: mips
"""
for model, mips in models_and_mips.items():
if model == cmip6_model:
return mips
raise Exception('Model not in mips') |
def pair_gcd(a,b):
"""Return greatest common divisor using Euclid's Algorithm."""
while b:
a, b = b, a % b
return a |
def get_task_name(request, default):
"""Use 'shadow' in request for the task name if applicable."""
# request.shadow could be None or an empty string.
# If so, we should use default.
return getattr(request, 'shadow', None) or default |
def split_network_line(line):
"""Parses line of /proc/virtual/<xid>/cacct for network usage.
The cacct file has a header followed by usage counts for multiple protocols.
Header (provided for context):
Type recv #/bytes send #/bytes fail #/bytes
Each protocol's usage counts are formatted like this example:
INET: 32/9909 43/253077 0/0
Args:
line: str, a line of text from cacct file.
Returns:
4-tuple of int: representing
('recv' syscalls, received octets, 'send' syscalls, sent octets).
"""
fields = line.strip().split()
receive_field = fields[1]
transmit_field = fields[2]
(recv_calls, rx_octets) = receive_field.split('/')
(send_calls, tx_octets) = transmit_field.split('/')
return (int(recv_calls), int(rx_octets), int(send_calls), int(tx_octets)) |
def str_to_positive_int(text):
""" Convert a string representation back to PositiveInt.
"""
if text.strip():
val = int(text)
if val >= 0.:
return int(text)
else:
raise ValueError("Value should be positive.")
else:
return 0 |
def significant_magnitude(num):
""" Return an integer of -1, 0, or 1 based on magnitude with an arbitrary
significance of 0.07 """
if -0.08 < num < 0.08:
return False
return True |
def get_public_STATE_transitions(self, field="state"):
"""
Returns the transitions which are meant to be seen by the customer.
The admin on the other hand should be able to see everything.
:param str field: the name of the :class:`~django_states.fields.StateField`
"""
if getattr(self, "_%s_log_model" % field, None):
transitions = getattr(self, "get_%s_transitions" % field)
return [t for t in transitions() if t.is_public and t.completed]
else:
return [] |
def join_deps(deps, commasep=True):
"""
Take the result from explode_dep_versions and generate a dependency string
"""
result = []
for dep in deps:
if deps[dep]:
if isinstance(deps[dep], list):
for v in deps[dep]:
result.append(dep + " (" + v + ")")
else:
result.append(dep + " (" + deps[dep] + ")")
else:
result.append(dep)
if commasep:
return ", ".join(result)
else:
return " ".join(result) |
def is_overlapped(end1, start2):
"""Returns True if segments are overlapping.
Arguments
---------
end1 : float
End time of the first segment.
start2 : float
Start time of the second segment.
Returns
-------
overlapped : bool
True of segments overlapped else False.
Example
-------
>>> from speechbrain.processing import diarization as diar
>>> diar.is_overlapped(5.5, 3.4)
True
>>> diar.is_overlapped(5.5, 6.4)
False
"""
if start2 > end1:
return False
else:
return True |
def search4vowels(phrase: str) -> set:
"""Returns the set of vowels found in 'phrase'."""
return set('aeiou').intersection(set(phrase)) |
def excel_column_name(n):
"""Number to Excel-style column name, e.g., 1 = A, 26 = Z, 27 = AA, 703 = AAA."""
name = ''
while n > 0:
n, r = divmod (n - 1, 26)
name = chr(r + ord('A')) + name
return name |
def remove_index(a, index):
"""Remove element at index of a sequence and return it as a list"""
a = list(a)
a.pop(index)
return a |
def _CompareLocaleLists(list_a, list_expected, list_name):
"""Compare two lists of locale names. Print errors if they differ.
Args:
list_a: First list of locales.
list_expected: Second list of locales, as expected.
list_name: Name of list printed in error messages.
Returns:
On success, return False. On error, print error messages and return True.
"""
errors = []
missing_locales = sorted(set(list_a) - set(list_expected))
if missing_locales:
errors.append('Missing locales: %s' % missing_locales)
extra_locales = sorted(set(list_expected) - set(list_a))
if extra_locales:
errors.append('Unexpected locales: %s' % extra_locales)
if errors:
print('Errors in %s definition:' % list_name)
for error in errors:
print(' %s\n' % error)
return True
return False |
def hex_to_rgb(hex_color):
"""
Converts a 6 digit hex number to RGB.
@param: hex_color - A 6 digit string with values in the range [a-fA-F0-9].
@return: a tuple containing 3 integers.
"""
if not isinstance(hex_color, str):
raise TypeError("'hex_color' must be of type 'str'.")
if len(hex_color) != 6:
raise ValueError("'hex_color' must 6 characters in length (excluding '#') e.g. FF1919.")
r = int(hex_color[0:2], base=16)
g = int(hex_color[2:4], base=16)
b = int(hex_color[4:6], base=16)
return (r,g,b) |
def factorize(n):
"""Returns list of prime factors of positive integer n."""
i = 2
factors = []
while True:
q, r = divmod(n, i)
if r:
i += 1
if i > n:
return factors
else:
factors.append(i)
if q < i:
return factors
n = q |
def date_span_intersection(date_span_1, date_span_2):
"""Return a tuple of dates representing the overlap between `date_span_1`
and `date_span_2`. If the date spans do not overlap, return `None`.
"""
intersection_first_date = max(date_span_1[0], date_span_2[0])
intersection_second_date = min(date_span_1[1], date_span_2[1])
if intersection_first_date > intersection_second_date:
# Date spans don't overlap
return None
else:
return (intersection_first_date, intersection_second_date) |
def lr_schedule(epoch: int, lr: float):
"""
Scheduler function for keras callback.
:param epoch: Epoch number.
:param lr: Initial learning rate.
:return: Updated LR.
"""
if epoch < 40:
lr_ = lr
elif epoch < 60:
lr_ = lr / 3
elif epoch < 70:
lr_ = lr / 5
elif epoch < 80:
lr_ = lr / 7
elif epoch < 90:
lr_ = lr / 9
elif epoch < 100:
lr_ = lr / 11
elif epoch < 110:
lr_ = lr / 13
else:
lr_ = lr / 100
return lr_ |
def average(a, b):
"""
:param a: int, sum of total value
:param b: int, the amount of temp information
:return: a / b
"""
avg = a / b
return avg |
def validate_target_port(value):
"""Raise exception if target port fails to match length constraint."""
if value and value["Port"] > 65535:
return "have value less than or equal to 65535"
return "" |
def s2n(s):
"""
s2n(string) -> int
String to Number
"""
return int(s.encode('hex'),16) if len(s) else 0 |
def getScriptName(programName):
"""
Splits a given string suspected to be a file name and returns the prefix with out the file extension.
Args:
programName: String file name candidate.
Returns: prefix of file.
"""
# Script name
scriptName = programName.split('.')[0].upper()
return scriptName |
def dict_access(d, key):
"""Access a key in a dict using . notation (key.subkey1.subkey2...subkeyn)
returns None if the key path does not exist in the object"""
current = d
for subkey in key.split("."):
try:
current = current[subkey]
except KeyError:
return None
return current |
def findXDeltaFromDirection(direction):
""" Returns delta X for jumping, when given a direction value """
if direction in (2, 4):
return 2
elif direction in (6, 8):
return -2
else:
error_template = "Unexpected direction value of: {0}"
raise ValueError(error_template.format(direction)) |
def single_line(value):
"""Returns the given string joined to a single line and trimmed."""
return " ".join(filter(None, map(str.strip, value.splitlines()))) |
def decompose_code(code):
"""
Decomposes a MARC "code" into tag, ind1, ind2, subcode
"""
code = "%-6s" % code
ind1 = code[3:4]
if ind1 == " ": ind1 = "_"
ind2 = code[4:5]
if ind2 == " ": ind2 = "_"
subcode = code[5:6]
if subcode == " ": subcode = None
return (code[0:3], ind1, ind2, subcode) |
def get_sh_resource_type(iot_finding):
"""Return ASFF Resource type based on IoT Device Defender finding"""
return "AwsIamRole" if iot_finding['nonCompliantResource']['resourceType'] == "IAM_ROLE" else "Other" |
def camel_to_snake(word: str, depublicize: bool = False):
"""Convert came case to snake case."""
word = "".join(["_" + i.lower() if i.isupper() else i for i in word])
if not depublicize:
word = word.lstrip("_")
return word |
def rankine2kelvin(K):
"""
Convert Rankine Temperature to Kelvin
:param K: Temperature in K Kelvin
:return: Temperature in R Rankine
"""
return 5.0 / 9.0 * K |
def getBucketName(year, month, day, radar):
""" Get the name of a specific bucket where radar data is stored.
Args:
year: Year as an integer.
month: Month as an integer.
day: Day as an integer.
radar: The 4 letter name of the radar, a string.
Returns:
The bucket name as a string (e.g. YYYY/MM/DD/KLMN/).
"""
# Get the name of the buck for a year, month, day and radar
try:
return "%04d/%02d/%02d/%s/" % (year, month, day, radar)
except:
return "%s/%s/%s/%s/" % (year, month, day, radar) |
def calc_results_progress(
number_of_users: int,
number_of_users_required: int,
cum_number_of_users: int,
number_of_tasks: int,
number_of_results: int,
) -> int:
"""
for each project the progress is calculated
not all results are considered when calculating the progress
if the required number of users has been reached for a task
all further results will not contribute to increase the progress
"""
if cum_number_of_users <= number_of_users_required:
# this is the simplest case, the number of users is less than the required
# number of users all results contribute to progress
number_of_results_progress = number_of_results
elif (cum_number_of_users - number_of_users) < number_of_users_required:
# the number of users is bigger than the number of users required
# but the previous number of users was below the required number
# some results contribute to progress
number_of_results_progress = (
number_of_users_required - (cum_number_of_users - number_of_users)
) * number_of_tasks
else:
# for all other cases: already more users than required
# all results do not contribute to progress
number_of_results_progress = 0
return number_of_results_progress |
def cube(number):
""" Returns True if number is cube """
if number == 1:
return True
root = round(number ** (1. / 3))
return root**3 == number |
def get_next_run_id(root, scheme_entry):
"""Cycles through a root object to get the next unique run_id string for a new measurement"""
i = 1
while True:
run_id = 'run_' + str(i)
try:
existing_weighing = root['Circular Weighings'][scheme_entry]['measurement_' + run_id]
i += 1
except KeyError:
break
return run_id |
def cmd_if(ctx, cond, true_block, false_block=None):
"""
It returns true block if condition occur
"""
return true_block if cond else false_block or [] |
def CheckForExistence(requested_data, available_data):
"""Determine whether the requested data exists.
Args:
requested_data: The specific data being requested.
available_data: The full set of available data to tell whether it is just
the requested data that does not exist, or no data exists at all.
Returns:
1 if the requested data exists.
-1 if the requested data does not exit.
0 if no data exits at all.
"""
if requested_data is not None:
return 1 # The requested data exists.
elif available_data:
return -1 # The requested data does not exist.
else:
return 0 # No data exists at all. |
def raise_to_list(input):
"""
This will take an input and raise it to a List (if applicable). It will preserve None values as None.
Parameters
----------
input : object
The object to raise to a list.
Returns
-------
list
The object as a list, or None.
"""
if input is None:
return None
elif isinstance(input, list):
return input
else:
return [input] |
def list_union(start_list, then_include) -> None:
"""
This method ensures an output that includes everything from both lists
"""
if type(then_include) is str:
if then_include not in start_list:
start_list.append(then_include)
elif type(then_include) is list:
for item in then_include:
if item not in start_list:
start_list.append(item)
return start_list |
def compress_data(data):
"""Remove all keys from ``data`` that refer to themselves::
>>> data = {'a': 'a', 'b': 'c'}
>>> compress_data(data)
{'b': 'c'}
"""
compressed = {}
for k, v in data.items():
if not k == v:
compressed[k] = v
return compressed |
def byte_to_signed_int(x):
"""
Returns a signed integer from the byte
:param x: Byte
"""
if (x & 0x80) >> 7 == 1:
return -((x-1) ^ 0xff)
else:
return x |
def format72(value):
"""
Adapt string to fit on a 72 line
@param value (string) The string to adapt
@param (string) The updated string
"""
val72 = ''
for i in range(int(len(value)/71)+1):
val72 = val72 + (value+71*' ')[71*i:71*i+71] + '\n'
return val72.rstrip() |
def replace_variables_in_text(function_with_text, variables):
""" Perform variable replacement in text """
return function_with_text.__doc__.format(**variables) |
def cleanup_salary(row):
"""to clean salary to replace $ sign """
salary = row["Salary"].replace("$", "")
salary = float(salary)
return salary |
def to_reduced_row_echelon_form(matrix):
"""
This method computes the reduced row echelon form (a.k.a. row canonical
form) of a matrix. The code is taken from
https://rosettacode.org/wiki/Reduced_row_echelon_form#Python and edited with
minor changes.
:param matrix matrix: matrix to be reduced.
:return matrix: the reduced matrix.
:rtype: matrix
:Example:
>>> import pygem.affine as at
>>> matrix = [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]
>>> rref_matrix = at.to_reduced_row_echelon_form(matrix)
.. note::
`matrix` will change after calling this function.
"""
lead = 0
row_count = len(matrix)
column_count = len(matrix[0])
for r in range(row_count):
if lead >= column_count:
return matrix
i = r
while matrix[i][lead] == 0:
i += 1
if i == row_count:
i = r
lead += 1
if column_count == lead:
return matrix
matrix[i], matrix[r] = matrix[r], matrix[i]
lv = matrix[r][lead]
matrix[r] = [mrx / float(lv) for mrx in matrix[r]]
for i in range(row_count):
if i != r:
lv = matrix[i][lead]
matrix[i] = [
iv - lv * rv for rv, iv in zip(matrix[r], matrix[i])
]
lead += 1
return matrix |
def indent(text, numtabs=1, spacespertab=4, tab=None):
"""
Indents a given multiline string.
By default, indentation is done using spaces rather than tab characters.
To use tab characters, specify the tab character explictly, e.g.::
indent(text, tab='\t')
Note that in this case ``spacespertab`` is ignored.
Examples
--------
>>> multiline = '''def f(x):
... return x*x'''
>>> print(multiline)
def f(x):
return x*x
>>> print(indent(multiline))
def f(x):
return x*x
>>> print(indent(multiline, numtabs=2))
def f(x):
return x*x
>>> print(indent(multiline, spacespertab=2))
def f(x):
return x*x
>>> print(indent(multiline, tab='####'))
####def f(x):
#### return x*x
"""
if tab is None:
tab = ' '*spacespertab
indent = tab*numtabs
indentedstring = indent+text.replace('\n', f"\n{indent}")
return indentedstring |
def remove_item(thislist, item):
"""
Remove item from list without complaining if absent
@param thislist : list of items
@param item : item which may or may not be in the list
@return: reduced list
"""
try:
thislist.remove(item)
except ValueError:
pass
return thislist |
def readgenefile(genefile):
""" Get the gene list """
success = False
try:
with open(str(genefile), 'r') as f:
lines = [line.rstrip('\n').split(',') for line in f]
genes = [line[0] for line in lines]
linkers = [line[1:] for line in lines]
success = True
except IOError:
print("Gene list file not found. Try again.")
genes = []
linkers = []
return success, genes, linkers |
def is_2numbers(astring):
"""
(str) -> Boolean
returns True if astring has at least two numbers.
else return False.
>>> is_2numbers('CIS122')
True
>>> is_2numbers('Ducks')
False
>>> is_2numbers('ABC-1')
False
"""
digits_ctr = 0
for c in astring:
if c.isdigit():
digits_ctr += 1
return digits_ctr >= 2 |
def left_remove(text, to_remove):
""" Removes a part of a string, if it starts with it.
Similar to str.lstrip, see note below on right_remove
"""
if text.startswith(to_remove):
return text.replace(to_remove, '', 1)
else:
return text |
def get_engagement_rate_max(selectpicker_id: str) -> int:
"""
Max-Delegate-target of get_engagement_rates_min_max (dac-pattern)
:param selectpicker_id:
:return: int -> max-value of engagement-rate
"""
max_values = {
"1": 1,
"2": 2,
"3": 3,
"4": 4,
"5": 5,
"6": 6,
"7": 7,
"8": 8,
"9": 9,
"10": 10,
"11": 15,
"12": 20,
"13": 30,
"14": 40,
"15": 50,
"16": 99
}
return max_values.get(selectpicker_id, 99) |
def format_author_ed(citation_elements):
"""Standardise to (ed.) and (eds.)
e.g. Remove extra space in (ed. )
"""
for el in citation_elements:
if el['type'] == 'AUTH':
el['auth_txt'] = el['auth_txt'].replace('(ed. )', '(ed.)')
el['auth_txt'] = el['auth_txt'].replace('(eds. )', '(eds.)')
return citation_elements |
def make_dict_from_tree(element_tree):
"""Traverse the given XML element tree to convert it into a dictionary.
:param element_tree: An XML element tree
:type element_tree: xml.etree.ElementTree
:rtype: dict
"""
def internal_iter(tree, accum):
"""Recursively iterate through the elements of the tree accumulating
a dictionary result.
:param tree: The XML element tree
:type tree: xml.etree.ElementTree
:param accum: Dictionary into which data is accumulated
:type accum: dict
:rtype: dict
"""
if tree is None:
return accum
if tree.getchildren():
accum[tree.tag] = {}
for each in tree.getchildren():
result = internal_iter(each, {})
if each.tag in accum[tree.tag]:
if not isinstance(accum[tree.tag][each.tag], list):
accum[tree.tag][each.tag] = [
accum[tree.tag][each.tag]
]
accum[tree.tag][each.tag].append(result[each.tag])
else:
accum[tree.tag].update(result)
else:
accum[tree.tag] = tree.text
return accum
return internal_iter(element_tree, {}) |
def str2int(val):
"""
Converts a decimal or hex string into an int.
Also accepts an int and returns it unchanged.
"""
if type(val) is int:
return int(val)
if len(val) < 3:
return int(val, 10)
if val[:2] == '0x':
return int(val, 16)
return int(val, 10) |
def assign_rank_group(atom_num, assign, rank, rank_num):
"""
|
**Description:** Assign rank to each group
**Input:**
- atom_num: Atom respectively which we are producing the rank from
- assign: Group of molecules grouped on clusters dependiong on the ligand connectivity
- rank: list of numbers for each atom which will show
which atoms are closer to the atom we are making the rank from.
- rank_num: ----
**Output**:
- rank: rank of all atoms
"""
# Add assignments for all atom in the same group as atom_num
for i in range(len(assign)):
if (assign[i] == assign[atom_num]):
rank[i] = rank_num
return rank |
def sanitise(dic):
"""Adds underscores to keys in dictionary"""
new_dic = dict()
for key, value in dic.items():
new_dic["_{}".format(key)] = value
return new_dic |
def mutual_information_calc(response_entropy, conditional_entropy):
"""
Calculate Mutual information.
:param response_entropy: response entropy
:type response_entropy: float
:param conditional_entropy: conditional entropy
:type conditional_entropy: float
:return: mutual information as float
"""
try:
return response_entropy - conditional_entropy
except Exception:
return "None" |
def is_full_connected (v: int, r: int) -> bool:
"""
This function checks if graph is full connected.
:param v: number of nodes graph has
:param r: number of edges graph has
:return: True if all nodes are connected, otherwise False
:example:
>>> is_full_connected(0, 1)
False
>>> is_full_connected(4, 6)
True
>>> is_full_connected(4, 3)
False
"""
full = v*(v - 1) // 2
if r == full:
return True
return False |
def _create_tree_from_edges(edges):
"""
Examples
--------
>>> from pprint import pprint
>>> pprint(_create_tree_from_edges([[1,2],[0,1],[2,3],[8,9],[0,3]]))
{0: [1, 3], 1: [2, 0], 2: [1, 3], 3: [2, 0], 8: [9], 9: [8]}
Parameters
----------
edges : list of pairs
"""
tree = {}
for v1, v2 in edges:
tree.setdefault(v1, []).append(v2)
tree.setdefault(v2, []).append(v1)
return tree |
def _is_spec_data(spec, spectype):
"""
Checks to see if the spec is data only
Args:
spec: to check
spectype: if any available
Returns:
true if only data, false if it is a spec
"""
if spec == 'nested' or spectype == 'nested':
return False
# if it is not a dictionary, then it is definitely not a spec
if not isinstance(spec, dict):
return True
for core_field in ['type', 'data', 'config', 'ref', 'refs', 'fields']:
if core_field in spec:
return False
# if empty, then may be using abbreviated notation i.e. field:type?param=value...
if len(spec) == 0:
return False
# didn't find any core fields, and spec is not empty, so this must be data
return True |
def _prepare_toc(toc):
"""Prepare the TOC for processing."""
# Un-nest the TOC so it's a flat list
new_toc = []
for chapter in toc:
sections = chapter.get('sections', [])
new_toc.append(chapter)
for section in sections:
subsections = section.get('subsections', [])
new_toc.append(section)
new_toc.extend(subsections)
# Omit items that don't have URLs (like dividers) or have an external link
return [
item for item in new_toc
if 'url' in item and not item.get('external', False)
] |
def smart_rename(dic):
"""
assign MW in MDa or KDa depending on the string
"""
for k in dic:
if len(dic[k].split(".")[0]) > 6:
dic[k] = str(round(float(dic[k]) / 1000000, 2)) + " MDa"
else:
dic[k] = str(round(float(dic[k]) / 1000, 2)) + " KDa"
return dic |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.