content
stringlengths 42
6.51k
|
|---|
def rms(x):
"""Return the root mean square of x, instead of doing
matplotlib.mlab.rms_flat.
"""
import numpy as np
return np.sqrt(np.mean(x*x))
|
def matrix_to_edge_list(matrix, n):
"""Keep only the upper triangle of the adjacency matrix without its
diagonal. Probably, should only be used in the case of an undirected graph.
Parameters
----------
matrix : list of a list
Square adjacency matrix of an undirected graph.
n : int
The shape of the matrix should be (n, n, ).
Returns
-------
list
List of the graph edges. It's a list of dicts.
i.e [{'nodes': (i1, j2), 'similarity': matrix[i1][j2]}, ...]
"""
edge_list = []
for i in range(n):
for j in range(n):
# consider only the upper triangle without the diagonal
if j <= i:
continue
edge_list.append({
'nodes': (i, j),
'similarity': max(0, min(1, matrix[i][j]))
})
return edge_list
|
def get_stage_suffix(stage):
"""
Which suffix to use (i.e. are we packaging a checkpoint?) - maps a stage to a suffix.
:param stage:
:return:
"""
if stage == 'final':
return ''
else:
return ".%s" % stage
|
def chebyshev(dx, dy):
""" Chebyshev distance. """
return max(dx, dy)
|
def posFromPVT(pvt):
"""Return the position of a PVT, or None if pvt is None or has no valid position.
Inputs:
- PVT: an opscore.RO.PVT.PVT position, velocity, time object.
This is a convenience function to handle the case that the input is None
"""
if pvt is None:
return None
return pvt.getPos()
|
def avg_for_the_day(data):
"""Retuns average values for each day in a list.
Accepts a list of lists, each containing a datatime in position 0
and a numerical (int or float) value in position 1.
Returns a list of lists with a datetime in position 0 and a float
in position 1.
"""
new_list = []
for entry in data:
new_list.append([entry[0].date(), entry[1]])
calc_dict = {}
for entry in new_list:
if not calc_dict.get(entry[0]):
calc_dict[entry[0]] = entry[1]
else:
calc_dict[entry[0]] = round(((float(calc_dict[entry[0]]) + \
float(entry[1])) / 2))
return sorted(calc_dict.items())
|
def make_range(probability, factor, current_value):
"""Return a tuple to be the new value in the histogram dictionary.
Param: probability (float): num in the sample space currently
factor (float): the fraction of the source text for a token
current_value(int): appearances of a type of word in text
Return: (tuple) of 3 elements:
1. lower end of the sample space a word comprises
2. higher end of the sample space a word comprises
3. new value to update probability to
"""
low_end = probability
high_end = low_end + (factor * current_value)
return tuple((low_end, high_end))
|
def _parse_text(text, dtype):
"""Parse the text of an element or an attribute."""
if dtype == str:
return text
elif dtype == float:
ret = [float(word) for word in text.split(' ')]
if len(ret) == 1:
ret = ret[0]
return ret
elif dtype == bool:
return bool(text)
else:
raise ValueError('Unrecognized type {} in Element{}'.format)
|
def encode_output_str(value):
"""Takes value and converts to an string byte string"""
return bytes(value, 'utf-8')
|
def filterdict(origdict, keyset):
"""returns the subset of origdict containing only the keys in keyset and their corresponding values """
newdict = {}
# this cunningly works with cidict (case-insensitive dictionary class)
# since we just check if key in origdict, not the reverse...
# the keys are those from keyset, not origdict
for key in keyset:
if key in origdict:
newdict[key] = origdict[key]
return newdict
|
def last_index_not_in_set(seq, items):
"""Returns index of last occurrence of any of items in seq, or None.
NOTE: We could do this slightly more efficiently by iterating over s in
reverse order, but then it wouldn't work on generators that can't be
reversed.
"""
found = None
for i, s in enumerate(seq):
if s not in items:
found = i
return found
|
def load_words_file(filename):
"""Load an ignore or only file as a list of words"""
if not filename:
return []
with open(filename, "r") as file:
return file.read()
|
def _use_constant_crc_init(sym):
"""
Return True if the inintial value is constant.
"""
return sym['crc_init_value'] is not None
|
def pack_str(string: str) -> bytes:
"""Packs a string into a byte object.
Arguments:
string: The string to pack.
Returns:
The bytes object representing the string in network order (big endian).
"""
return bytes(map(ord, string))
|
def sum_str_to_list_izq(s, lst):
"""
Suma un string a una lista u otro string por la izquierda
:param s: String
:param lst: Lista o strng
:return: Lista o string sumado
"""
assert isinstance(s, str)
if isinstance(lst, str):
return s + lst
elif isinstance(lst, list):
for i in range(len(lst)):
lst[i] = s + lst[i]
return lst
else:
raise Exception('Tipo l incorrecto')
|
def s2b(s):
"""
String to binary.
"""
ret = []
for c in s:
ret.append(bin(ord(c))[2:].zfill(8))
return "".join(ret)
|
def _is_breakline(statement):
"""Returns True if statement is a breakline, False otherwise."""
return len(statement) == 1 and statement[0].strip() == ''
|
def get_lines(blocks):
"""Convert list of text blocks into a nested list of lines, each of which contains a list of words.
:param list[TextBlock] blocks: List of text blocks.
:return: List of sentences
:rtype: list[list[TextWord]]
"""
lines = []
for block in blocks:
for para in block:
for line in para:
words = []
for word in line:
words.append(word)
lines.append(words)
return lines
|
def bbox_to_geojson(bounds):
"""Convert coordinates of a bounding box to a geojson.
Args:
bounds (list): A list of coordinates representing [left, bottom, right, top].
Returns:
dict: A geojson feature.
"""
return {
"geometry": {
"type": "Polygon",
"coordinates": [
[
[bounds[0], bounds[3]],
[bounds[0], bounds[1]],
[bounds[2], bounds[1]],
[bounds[2], bounds[3]],
[bounds[0], bounds[3]],
]
],
},
"type": "Feature",
}
|
def above_threshold(student_scores, threshold):
"""
:param student_scores: list of integer scores
:param threshold : integer
:return: list of integer scores that are at or above the "best" threshold.
"""
above_threshold = []
for score in student_scores:
if score >= threshold:
above_threshold.append(score)
return above_threshold
|
def get_latest_snapshot_identifier(snapshot_list, engine):
"""Return snapshot to use based off latest available snapshot from a list of snapshots"""
latest_date = None
latest_snapshot = ''
for snapshot in snapshot_list:
if not snapshot['Status'] == 'available':
continue
if latest_date is None or snapshot['SnapshotCreateTime'] > latest_date:
latest_date = snapshot['SnapshotCreateTime']
if 'aurora' in engine:
latest_snapshot = snapshot['DBClusterSnapshotIdentifier']
else:
latest_snapshot = snapshot['DBSnapshotIdentifier']
return latest_snapshot
|
def get_valid_coastal_variant(split, possible_orders):
"""Find a variation on the `split` order that is in `possible_orders`
Args:
- split: a list of order string components,
e.g. ["F", "AEG", "S", "F", "BUL", "-", "GRE"]
- possible_orders: a list of order strings,
e.g. ["F AEG S F BUL/SC - GRE", "F AEG H", "F AEG - GRE", ...]
This function tries variations (e.g. "BUL", "BUL/SC", etc.) of the `split`
order until one is found in `possible_orders`.
Returns a split order, or None if none is found
e.g. for the example inputs above, this function returns:
["F", "AEG", "S", "F", "BUL/SC", "-", "GRE"]
"""
for idx in [1, 4, 6]: # try loc, from_loc, and to_loc
if len(split) <= idx:
continue
for variant in [split[idx].split("/")[0] + x for x in ["", "/NC", "/EC", "/SC", "/WC"]]:
try_split = split[:idx] + [variant] + split[(idx + 1) :]
if " ".join(try_split) in possible_orders:
return try_split
return None
|
def make_hashable_params(params):
"""
Checks to make sure that the parameters submitted is hashable.
Args:
params(dict):
Returns:
"""
tuple_params = []
for key, value in params.items():
if isinstance(value, dict):
dict_tuple = tuple([(key2, value2) for key2, value2 in value.items()])
tuple_params.append(dict_tuple)
else:
if isinstance(value, (list, set)):
tuple_params.append((key, tuple(value)))
else:
tuple_params.append((key, value))
tuple_params = tuple(tuple_params)
try:
hash(tuple_params)
except TypeError:
raise TypeError('The values of keywords given to this class must be hashable.')
return tuple_params
|
def generate(random, pid, autogen_tools, n):
"""
Generate an instance of the problem
Needs to return a list of files to copy to particular instance.
"""
return {
"resource_files": {
"public": [
],
},
"static_files": {
},
"problem_updates": {
"description": "<p>Having tried all of the algorithms that we can think of, we set our best engineer on creating a custom algorithm</p><p>They came up with a <a href=/problem-static/crypto/custom/algorithm.txt>custom algorithm</a> based on doing a <a href=/problem-static/crypto/custom/description.txt>number of substitutions and permutations</a> on the input</p><p>Can you implement the algorithm in order to encrypt the password 'abcdefghi'</p>"
}
}
|
def pairwise(iterable):
"""
s -> (s0,s1), (s2,s3), (s4, s5), ...
>>> liste = ['C1', 'C2', 'C2', 'C3', 'C4', 'C5', 'C5', 'C6']
>>> pairwise(liste)
[('C1', 'C2'), ('C2', 'C3'), ('C4', 'C5'), ('C5', 'C6')]
"""
a = iter(iterable)
return list(zip(a, a))
|
def in_decl_set(decls, c):
""" True if the cursor is in an sequence of cursor
"""
for d in decls:
if c == d:
return True
return False
|
def spring1s(ep,ed):
"""
Compute element force in spring element (spring1e).
Parameters:
ep = k spring stiffness or analog quantity
ed = [u1 u2] element displacements
u1, u2: nodal displacements
Returns:
es element force [N]
"""
k = ep
return k*(ed[1]-ed[0]);
|
def parse_molecule_gcmc(dlstr):
"""Grand Canonical MC includes chemical potential/partial pressure"""
try:
tok = dlstr.split()
molecule = {"id": tok[0], "molpot": float(tok[1])}
except (IndexError, TypeError):
raise ValueError("Unrecognised GCMC Molecule: {!r}".format(dlstr))
return molecule
|
def idf(term, corpus):
"""
computes inverse document frequency. IDF is defined as the
logarithm of the total number of documents in the corpus over the
number of documents containing the search term:
log(all documents/documents containing the search term)
Note that if *no* document contains the search term, it would result
in a division by zero. This is mitigated by adding 1 to the
denominator in that case.
Parameters:
term: a string containing the search term
corpus: a list of lists; the outer list is the corpus, while the
inner lists should represent the document texts, split into
tokens (make sure that punctuation is split, too!)
Return Value: a float representing the idf value
"""
from math import log
documents_with_term = 0
for document in corpus:
for token in document:
if token == term:
documents_with_term += 1
break
try:
return log(len(corpus)/documents_with_term)
except ZeroDivisionError:
return log(len(corpus) / 1 + documents_with_term)
|
def getpath(data, path):
"""`getpath()` returns the value at a dot-separated getitem path.
`data` can be a nested combination of dicts and arrays. Each part of the
path is interpreted as an integer if it looks like an integer.
"""
for p in path.split('.'):
try:
p = int(p)
except ValueError:
pass
data = data[p]
return data
|
def calculate_cigar_operations_lady(current_readlength, insertions, deletions, substitutions):
"""
Given a read length, and three lists of positions
with insertions, deletions, substitutions, respectively,
calculate the cigar string for one read.
Return list of pairs of (cigar operation codes, count) for pysam.
Updated because current_readlength is not equal to generated read length any more.
"""
MATCH, DELETION, INSERTION, SUBST = (7, 2, 1, 8) # PySam CIGAR Operation Codes
cigar = []
count = 0
last_op = MATCH
point_ins = 0
point_del = 0
point_sub = 0
for i in range(current_readlength):
if point_ins < len(insertions) and i == insertions[point_ins]:
# multiple insertions get the same index
cigar.append((last_op, count))
count = 1
last_op = INSERTION
point_ins += 1
while point_ins < len(insertions) and i == insertions[point_ins]:
count += 1
point_ins += 1
if point_del < len(deletions) and i == deletions[point_del]:
# del
point_del += 1
if last_op == DELETION:
count += 1
else:
cigar.append((last_op, count))
count = 1
last_op = DELETION
elif point_sub < len(substitutions) and i == substitutions[point_sub]:
point_sub += 1
if last_op == SUBST:
count += 1
else:
cigar.append((last_op, count))
count = 1
last_op = SUBST
else:
if last_op == MATCH:
count += 1
else:
cigar.append((last_op, count))
count = 1
last_op = MATCH
cigar.append((last_op, count))
if cigar[0][1] == 0:
cigar = cigar[1:]
return cigar
|
def get_previous_blank_line_no(lines, idx):
"""Find a blank line before the object definition"""
while True:
if not lines[idx].strip():
# found a blank line
break
else:
idx -= 1
return idx
|
def overrides(method):
"""Decorator to indicate that the decorated method overrides a method in superclass.
The decorator code is executed while loading class. Using this method should have minimal runtime performance
implications.
This is based on my idea about how to do this and fwc:s highly improved algorithm for the implementation
fwc:s algorithm : http://stackoverflow.com/a/14631397/308189
my answer : http://stackoverflow.com/a/8313042/308189
How to use:
from overrides import overrides
class SuperClass(object):
def method(self):
return 2
class SubClass(SuperClass):
@overrides
def method(self):
return 1
:raises AssertionError if no match in super classes for the method name
:return method with possibly added (if the method doesn't have one) docstring from super class
"""
# nop for now due to py3 compatibility
return method
# for super_class in _get_base_classes(sys._getframe(2), method.__globals__):
# if hasattr(super_class, method.__name__):
# if not method.__doc__:
# method.__doc__ = getattr(super_class, method.__name__).__doc__
# return method
# raise AssertionError('No super class method found for "%s"' % method.__name__)
|
def _clean_metrics(metrics, output_format="float"):
"""
Reformat metrics dictionary
"""
new_dict = {}
for k, v in metrics.items():
if isinstance(v, dict):
v = v["string"]
if isinstance(v, str):
if v.endswith("%"):
v = v[:-1]
if output_format == "float":
v = float(v)
new_dict[k] = v
return new_dict
|
def get_local_type(xmltype):
"""
Simplifies types names, e.g. XMLInteger is
presented as int.
This is used for nice printing only.
"""
if xmltype == "XMLBoolean":
return 'bool'
elif xmltype == "XMLDecimal":
return 'decimal'
elif xmltype == "XMLInteger":
return 'int'
elif xmltype == "XMLDouble":
return 'float'
elif xmltype == "XMLString":
return 'str'
elif xmltype == "XMLDate":
return 'date'
elif xmltype == "XMLDateTime":
return 'datetime'
else:
return xmltype
|
def raw_string(txt):
"""
Python automatically converts escape characters (i.e. \\n), which causes
problems when inputing latex strings since they are full of backslashes.
This function returns a raw string representation of text
Parameters
----------
txt : string
string that possibly contains escape characters
Returns
-------
new_text : string
same as 'text' but without any escape characters
"""
escape_dict={'\a':r'\a',
'\b':r'\b',
'\c':r'\c',
'\f':r'\f',
'\n':r'\n',
'\r':r'\r',
'\t':r'\t',
'\v':r'\v',
'\'':r'\'',
'\"':r'\"'}
#I used to have '\1' thru '\9' in the list above, but for some strange
#reason '\a' and '\7' both get mapped to '\x07'. Thus, when someone inputs
#'\alpha', it gets mapped to '\x07lpha'. If '\7' is listed in the escape
#dictionary after '\a', then '\x07lpha' gets translated to '\\7lpha', which
#LaTeX cannot understand. As far as I know, LaTeX never starts a command
#with a number, so I just got rid of all the numbers in the escape
#dictionary.
new_txt=''
for char in txt:
try: new_txt+=escape_dict[char]
except KeyError: new_txt+=char
return new_txt
|
def distance(a, b):
"""Calculate the distance between two points a and b."""
return ((a[0] - b[0])**2 + (a[1] - b[1])**2)**0.5
|
def calculate_temperature_rise_input_power_weight(
power_input: float,
weight: float,
) -> float:
"""Calculate the temperature rise based on input power and xfmr weight.
.. attention:: input power must be calculated by the calling function from
voltage and current as it is not an attribute of an inductive device.
:param power_input: the input power in W.
:param weight: the weight of the xfmr in lbf.
:retur: _temperature_rise; the calculated temperature rise in C.
:rtype: float
:raise: ZeroDivisionError if passed a weight=0.0.
"""
return 2.1 * (power_input / weight**0.6766)
|
def write_new_frag_file(barcode_cluster,list_parts_quality, new_fragment_file):
""" writing a file
input: a dictionary of splited
output: a dictionary, key: barcode values:
"""
alleles_parts = list_parts_quality[0]
quality = list_parts_quality[1]
if len(quality)>1: # it can be improved by adding the single allele to previous part ??
new_fragment_file.write("{} BX::{}".format(len(alleles_parts), barcode_cluster))
for idx, part in enumerate(alleles_parts):
new_fragment_file.write(" {} {}".format(part[0], part[1]))
new_fragment_file.write(" {}\n".format(quality))
return 1
|
def generate_negative_proposition(proposition):
"""
Get negative proposition:
1:red -> ~(1:red)
"""
negative = "~(" + proposition + ")"
return negative
|
def simple_solution_sum(solutions, *args, **kwargs):
"""
Returns a simple sum of scores of the given solutions, neglecting the given user.
"""
return sum([s.score or 0 for s in solutions if s is not None])
|
def copy_grid(grid):
"""Copy grid."""
return [row[:] for row in grid]
|
def filter_geom(geom, _type):
"""Find all elements of type _type in geom iterable"""
return list(filter(lambda x: isinstance(x, _type), geom))
|
def from_pt(value, units, dpi=96):
"""
convert length from pt to given units
Arguments
---------
value : float
length in measurement units
units : str
unit type (e.g. "pt", "px", "in", "cm", "mm")
dpi : float / int
dots per inch (conversion between inches and px)
Return
------
length given units
"""
if units == "pt":
return value
# metric to inches
if units == "cm":
value = value * 2.54
units = "in"
if units == "mm":
value = value * 25.4
units = 'in'
# inches to pixels
if units == "in" or units == "inches":
value = value / dpi
units = "px"
# pt to px
if units == "px":
value = value * 4/3
return value
|
def compactRange(values):
"""Build the range string that lists all values in the given list in a compacted form.
*values* is a list of integers (may contain duplicate values and does not have
to be sorted). The return value is a string that lists all values (sorted)
in a compacted form.
The returned range string can be passed into a :class:`Range` object to create
the expanded integer sequence again.
Examples:
>>> compactRange([1,2,3,4,5,6])
'1-6'
>>> compactRange([2,4,6,8])
'2-8x2'
>>> compactRange([1,2,3,12,11,10])
'1-3,10-12'
"""
if len(values)==0:
return ""
values.sort()
# Set the initial value of the range list. The list contains
# lists [start,end,step].
v = values[0]
rangeList = [[v,v,None]]
# Build the range list
for v in values[1:]:
r = rangeList[-1]
begin,end,step = r
if v!=end:
if begin==end:
step = v-begin
r[2] = step
if end+step==v:
r[1] = v
else:
rangeList.append([v,v,None])
# Go through all individual ranges and check if ranges that only contain
# two values can be changed so that the end value is put into the
# subsequent range (e.g. 1-100x99,101 -> 1,100-101)
for i in range(len(rangeList)-1):
begin,end,step = rangeList[i]
# Is this a range containing 2 values? Then check if it's advantageous
# second value can be moved into the subsequent range
if begin!=end and (end-begin)//step==1:
begin2,end2,step2 = rangeList[i+1]
# The second range only contains 1 value? Then only move
# when the new step is smaller than the old step in the first range
if begin2==end2:
step2 = begin2-end
if step2<step:
begin2 = end
rangeList[i+1][0] = begin2
rangeList[i+1][2] = step2
rangeList[i][1] = begin
# The second range contains several values, so check if actually
# can add the end value from the previous range
else:
if begin2-step2==end:
begin2 = end
rangeList[i+1][0] = begin2
rangeList[i][1] = begin
# Collapse the range list into strings (such as "1-99,110,200-220x2", etc)
rs = []
for r in rangeList:
begin,end,step = r
if begin==end:
rs.append(str(begin))
else:
# Step is 1? Then leave it out
if step==1:
rs.append("%s %s"%(begin,end))
# This sub-range only consists of two values (and step is not 1)? Then list individually
elif (end-begin)//step==1:
rs.append("%s,%s"%(begin,end))
# Full sub-range, including step
else:
rs.append("%s %s %s"%(begin,end,step))
return ",".join(rs)
|
def factors(number):
"""Returns a mapping of factors for the given int"""
if not isinstance(number, int):
raise TypeError('number must be an int')
factors = {}
for num in range(1, number + 1):
if number % num == 0: # if no remainder
factors[num] = int(number / num)
return factors
|
def copy_letter(letter: str, source: str, target: str) -> str:
"""
Copy letters from source string to same positions to target.
Arguments:
letter - copied letter
source - source string
target - target string
Returns:
Target string with letter copied from source
"""
low_letter: str = letter.lower()
result: str = target
for idx, value in enumerate(source):
if value.lower() == low_letter:
result = result[:idx] + letter + result[idx + 1:]
return result
|
def dedup(seq):
""" Remove duplicates from a sequence, but:
1. don't change element order
2. keep the last occurence of each element instead of the first
Example:
a = [1, 2, 1, 3, 4, 1, 2, 6, 2]
b = dedup(a)
b is now: [3 4 1 6 2]
"""
out = []
for e in reversed(seq):
if e not in out:
out.insert(0, e)
return out
|
def findSmallestInt(arr):
"""First sort the list : return first item."""
result = sorted(arr)
return result[0]
|
def _is_audio_link(link):
"""Checks if a given link is an audio file"""
if "type" in link and link["type"][:5] == "audio":
return True
if link["href"].endswith(".mp3"):
return True
return False
|
def num_to_varint(a):
"""
Based on project: https://github.com/chaeplin/dashmnb
"""
x = int(a)
if x < 253:
return x.to_bytes(1, byteorder='big')
elif x < 65536:
return int(253).to_bytes(1, byteorder='big') + \
x.to_bytes(2, byteorder='little')
elif x < 4294967296:
return int(254).to_bytes(1, byteorder='big') + \
x.to_bytes(4, byteorder='little')
else:
return int(255).to_bytes(1, byteorder='big') + \
x.to_bytes(8, byteorder='little')
|
def dict_to_sequence(d):
"""
Returns an internal sequence dictionary update.
"""
if hasattr(d, "items"):
d = d.items()
return d
|
def safe_string(value):
"""
consistently converts a value to a string
:param value:
:return: str
"""
if isinstance(value, bytes):
return value.decode()
return str(value)
|
def domain_to_aol_attr_convert(quad_attr):
"""Convert an attribute from the domain-level syntax (which should be a
valid Python name) to the AOL-level syntax.
"""
if not quad_attr.startswith('is_'):
quad_attr = f'has_{quad_attr}'
return quad_attr.replace('_', '-')
|
def six_digit(password):
"""Check if password has 6 digits
Parameters
----------
password : int
password number
Returns
-------
is_six_dig: bool
True if password has six digits, False if not
"""
# Test if password is a 6 digit number
if password > 99999 and password < 1000000:
return True
else:
return False
|
def csv(arg):
"""Returns a list from a `csv` input argument.
"""
return [x.strip() for x in arg.split(',')]
|
def format_value(value):
"""
This function contains heuristics to improve results, e.g. by transforming an empty string value ('') to the word empty.
The goal is to input known values into the (transformer)-encoder, so he can learn the attention to the question.
The heuristic in this method should stay as little as possible.
"""
# at this point, a value needs to be a string to use the transformers tokenizing magic.
# Any logic using numbers, needs to happen before.
value = str(value)
# convert empty strings to the word "empty", as the model can't handle them otherwise.
if "".__eq__(value):
value = 'empty'
return value
|
def parse_player_id(data: dict) -> int:
"""Parse the player ID from the data payload."""
return int(data["pid"])
|
def transform_pagerduty_results(results):
"""Filters the PagerDuty API results to a subset of fields we care about."""
transform = []
for entry in results['oncalls']:
transform.append({
'name' : entry['user']['name'],
'email' : entry['user']['email'],
'level' : entry['escalation_level'],
'start' : entry['start'],
'end' : entry['end']
})
return transform
|
def format_number_latex(number: float, sig_figures: int = 3) -> str:
"""
Formats the number in latex format and round it to defined significant figures.
If the result is in the exponential format, it will be formatted as
``[number] \\times 10^{[exponent]}``.
Parameters
----------
number :
Number to format.
sig_figures:
Number of significant figures. Optional. Default 3.
"""
formatted_num = f'{number:#.{sig_figures}g}'
if 'e' in formatted_num:
num_str, exponent = formatted_num.split('e')
return f'{num_str} \\times 10^{{{int(exponent)}}}'
return formatted_num
|
def _calc_c(H, r_eq):
"""
Calculates the b coefficient used to calculate the distance from the satellite
to a point P
Parameters
----------
H : int
GOES-16 projection perspective point height, in meters
r_eq : int
GOES-16 semi-major axis of projection, in meters
Returns
-------
float
"""
return (H**2 - r_eq**2)
|
def format_repo_info(vcs_name, vcs_path, vcs_type, integration_status):
"""Helper function for creating dict containing repository information
:param str vcs_name: name of the repository
:param str vcs_path: absolute path to the repository
:param str vcs_type: type of the underlying vcs
:param str integration_status: status of perun integration into the underlaying vcs
:return: dict containing repository information
"""
return {
'name' : vcs_name,
'path': vcs_path,
'vcs_type' : vcs_type,
'integration': integration_status
}
|
def _read_dict(instream, until=None):
"""Read key-value pairs."""
result = {}
for line in instream:
if not line:
continue
if ' ' not in line:
break
keyword, values = line[:-1].split(' ', 1)
result[keyword] = values.strip()
if keyword == until:
break
return result
|
def median(values):
"""Calculates the median of the values.
:param values: The values.
:return: The median.
"""
values = sorted(values)
if len(values) % 2:
return values[len(values) // 2]
else:
return (values[len(values) // 2 - 1] + values[len(values) // 2]) / 2
|
def return_network_layers(connectivity):
"""
return_network_layers takes a string, that describes the connectivity
of the network i.e. "BLT", and returns the number of layers the
network has.
"""
if "D" in connectivity:
return (7-1)*2+1
else:
return 7
|
def maximum_mutation_frequency(H, F, D):
"""
# ========================================================================
MAXIMUM MUTATION FREQUENCY
PURPOSE
-------
Calculates the maximum mutation frequency.
INPUT
-----
[INT] [H]
The number of haplotypes.
[FLOAT LIST] [F]
A list of (relative) frequencies.
[2D ARRAY] [D]
A distance matrix of haplotypes pair-wise genetic distances
(fraction of nt differences).
RETURN
------
[FLOAT]
The maximum mutation frequency.
# ========================================================================
"""
Mfmax = 0
for i in range(0, H):
Mfmax += F[i] * D[0][i]
return Mfmax
|
def fahrenheit_to_celsius(temp):
"""
Convert degrees Fahrenheit to degrees Celsius
:param float temp: The temperature in Fahrenheit
:return: The temperature in Celsius
:rtype: float
"""
return (temp - 32.0) / 1.8
|
def pid_filename(component: str) -> str:
"""Obtain the canonical pid filename for the specified component name."""
return f".lta-{component}-pid"
|
def summary_data(name_list, weight_list):
""" Reads the names and weights of fruits in two list and puts them in a
string format to be written into a pdf file.
name: Fruit names
weight: Fruit Weight lbs
Args:
name_list(list): list of fruit names.
weight_list(list): list of the weight of each fruit.
Returns:
summary(string): A string with the name and wight of each fruit on each
line.
"""
summary = ""
for i in range(len(name_list)):
summary += 'name: {} <br /> weight: {} <br /><br />'.format(name_list[i],
weight_list[i])
return summary
|
def is_section(line: str, pattern: str) -> bool:
"""Returns a boolean
Checks if line matches the pattern and returns True or False
"""
return line.find(pattern) > -1
|
def lorenz(t, x, sigma=10, beta=2.66667, rho=28):
"""
parameters:
:sigma : 10
:beta : 2.7
:rho : 28
"""
return [
sigma * (x[1] - x[0]),
x[0] * (rho - x[2]) - x[1],
x[0] * x[1] - beta * x[2],
]
|
def urljoin(*args: str) -> str:
"""
Join an array of strings using a forward-slash representing an url.
:param args: list of strings to join
:return: joined strings
"""
return "/".join(map(lambda x: str(x).rstrip('/'), args))
|
def _handle_special_yaml_cases(v):
"""Handle values that pass integer, boolean or list values.
"""
if ";" in v:
v = v.split(";")
else:
try:
v = int(v)
except ValueError:
if v.lower() == "true":
v = True
elif v.lower() == "false":
v = False
return v
|
def InterferenceDict(data_list):
"""Creates an interferenceReturns a double dict from a list of lat,lng, interferences."""
if not isinstance(data_list, list):
data_list = [data_list]
result = {}
for lat, lon, data in data_list:
if lat not in result: result[lat] = {}
result[lat][lon] = data
return result
|
def generate_challenge(user: str, passwd: str, device_token: str, sendviasms: bool) -> str:
"""
Generate a 2FA challenge ID. Can be rolled into other packages later.
For now, generates a mock ID.
"""
if sendviasms:
challenge_type = "sms"
else:
challenge_type = "email"
#Placeholder mock challange ID. To be filled in later with 2FA service.
return 'mock_challangeid1'
|
def _concat_fasls(ctx, inputs, output):
"""Concatenates several FASLs into a combined FASL.
Args:
ctx: Rule context
inputs: List of files to concatenate.
output: File output for the concatenated contents.
"""
if not inputs:
return None
elif len(inputs) == 1:
return inputs[0]
else:
cat_command = "cat ${@:2} > $1"
cat_args = ctx.actions.args()
cat_args.add(output)
cat_args.add_all(inputs)
ctx.actions.run_shell(
inputs = inputs,
outputs = [output],
progress_message = "Combining %{output}",
mnemonic = "LispConcatFASLs",
command = cat_command,
arguments = [cat_args],
)
return output
|
def zone_current_temperature(zone):
"""
Get current temperature for this zone
"""
return zone["currenttemperature"]
|
def convertfrom_hex_notation(bytes_string: bytes):
"""Takes a given bytes object containing hex notation (e.g. b'\\x00\\x01\\x02\\x03') and returns a bytes object with the hexadecimal notation '\\x' glyphs removed from the output (or, transforming the output to the "Hexadecimal representation of binary data", as the binascii module terms it).
Examples:
>>> eth_dst = b"\\x00\r\\xb9'\\x07\\x80"\n
>>> eth_src = b'\\x00\\x0c)%l\\x15'\n
>>> convertfrom_hex_notation(eth_dst)\n
b'000db9270780'
>>> eth_dst\n
b"\\x00\r\\xb9'\\x07\\x80"
>>> convertfrom_hex_notation(eth_src)\n
b'000c29256c15'
>>> eth_src\n
b'\\x00\\x0c)%l\\x15'
References:
https://stackoverflow.com/questions/20556139/how-do-i-parse-a-captured-packet-in-python
Args:
bytes_string (bytes): Reference a bytes object with hexadecimal notation.
Returns:
bytes: Returns a bytes object.
"""
import binascii
# This ".b2a_hex()" is the same as ".hexlify()", so this also works:
# - slash_x_removed = binascii.b2a_hex(bytes_string)
slash_x_removed = binascii.hexlify(bytes_string)
return slash_x_removed
|
def undo_replace_underscores(ranger_outputs):
"""
opens all the ranger output files, replaces the XX0XX in each tipname
with an underscore like it was originally
args: list of file names
output: modifies the contents of those files to have correct underscore placement.
"""
for file in ranger_outputs:
with open("ranger_outputs/"+file) as old:
text = old.read()
newtext = text.replace("XX0XX", "_")
with open("ranger_outputs/"+file,"w") as new:
new.write(newtext)
return ranger_outputs
|
def make_segment(segment, discontinuity=False):
"""Create a playlist response for a segment."""
response = []
if discontinuity:
response.append("#EXT-X-DISCONTINUITY")
response.extend(["#EXTINF:10.0000,", f"./segment/{segment}.m4s"]),
return "\n".join(response)
|
def build_folder_names(result, folder_name=None):
"""Build list of folder names from a hierarchical dictionary."""
folders = []
folder_name = "/".join((folder_name or "", result.get("name", ""))).replace("//", "/")
folders.append(folder_name)
if not result.get("children", []):
return folders
for child in result["children"]:
folders.extend(build_folder_names(child, folder_name))
return folders
|
def search_greater_ten(a, b):
"""
Operator function that searches for greater-than-10 values within its inputs.
Inputs
a, b: integers or booleans
Outputs
True if either input is equal to True or > 10, and False otherwise
"""
if type(a) == type(b):
#in case both a and b are the same type
if (a or b) > 10:
return True
elif (a or b) == True:
return True
else:
return False
else:
#in case a and b aren't the same type either a is a bool and b and int
#or viceversa
if a == True or b > 10:
return True
elif a > 10 or b == True:
return True
else:
return False
|
def removeBadBands(spectrum, wavelengths, bbl):
"""
Remove bands that are marked as bad in bbl list.
Parameters
----------
spectrum : list of int
Spectrum as a list.
wavelengths : list of int
List of measured wavelength bands
bbl : list of str/int/bool
List of bbl values that say which wavelengths are measured in
good quality (1) and which are not (0)
Returns
-------
newwavelengths : list of int
List of "good" wavelength bands
newspectrum : list of int
Spectrum of all "good" bands as a list
"""
newwavelengths = []
newspectrum = []
for i, refl in enumerate(spectrum):
if int(bbl[i]) == 1:
newwavelengths.append(wavelengths[i])
newspectrum.append(refl)
return newwavelengths, newspectrum
|
def len_after_key(field, operation):
"""."""
if operation['key'] in field:
result = field[operation['key']]
return len(result)
return 0
|
def human_readable(bytes, units=[' bytes','kB','MB','GB','TB', 'PB', 'EB']):
""" Returns a human readable string reprentation of bytes"""
return str(bytes) + units[0] if bytes < 1024 else human_readable(bytes>>10, units[1:])
|
def concentrations_std(concentrations, standards):
""" Returns only species with known concentrations, i.e. standards.
This is a subset of the data generated from concentrations. It
provides a simple way get the standards from the dataset.
Args
----
concentrations : pandas.DataFrame
Compiled is a dataframe containing identified species and an
associated area with unknown concentrations. It can be generated
from match_area
standards : pandas.DataFrame
Standards is a dataframe containing all species for which there is
calibration standard. The first column should be 'library_id' and
each subsequent column should contain the file name for a stanards
vial. The value of each row for file should be the concentration
in Molar for that species in that vial.
Returns
-------
pandas.DataFrame
A dataframe is returned which contains only data for standards/
"""
if concentrations is None or standards is None:
print('Not enough info for `concentrations_std`.')
return None
std_keys = list(standards.keys())[1:]
conc_df = concentrations.reset_index()
return conc_df[conc_df['key'].isin(std_keys)].set_index('key')
|
def is_intable(an_object):
"""Return True if the object can be converted to an int, False otherwise."""
intable = False
try:
int(an_object)
intable = True
except Exception:
# Sorry for the naked except, but I don't care why the above failed.
pass
return intable
|
def is_iterable(value):
# noinspection PyUnresolvedReferences
"""
Returns ``True`` if *value* is an iterable container (e.g. ``list`` or ``tuple`` but not a **generator**).
Note that :func:`is_iterable` will return ``False`` for string-like objects as well, even though they are iterable.
The same applies to **generators** and **sets**:
>>> my_list = [1, 2, 3]
>>> is_iterable(my_list)
True
>>> is_iterable(set(my_list))
False
>>> is_iterable(v for v in my_list)
False
>>> is_iterable(str(my_list))
False
:param value: The value to check.
:rtype: bool
"""
return hasattr(value, '__iter__') and hasattr(value, '__getitem__')
|
def get_list_from_ranges_str(ranges_str):
"""Convert the range in string format to ranges list
And yield the merged ranges in order. The argument must be a
string having comma separated vlan and vlan-ranges.
get_list_from_ranges_str("4,6,10-13,25-27,100-103")
[4, 6, 10, 11, 12, 13, 25, 26, 27, 100, 101, 102, 103]
"""
return sum(((list(range(*[int(range_start) + range_index
for range_index, range_start in
enumerate(range_item.split('-'))]))
if '-' in range_item else [int(range_item)])
for range_item in ranges_str.split(',')), [])
|
def pth(path):
"""Prepends root icon path to path."""
return 'img/icons/ratings/' + path
|
def by_length(arr):
"""
Given an array of integers, if the number is an integer between 1 and 9 inclusive,
replace it by its corresponding name from "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine",
otherwise remove it, then sort the array and return a reverse of sorted array.
For example:
arr = [2, 1, 1, 4, 5, 8, 2, 3]
-> sort arr -> [1, 1, 2, 2, 3, 4, 5, 8]
-> reverse arr -> [8, 5, 4, 3, 2, 2, 1, 1]
return ["Eight", "Five", "Four", "Three", "Two", "Two", "One", "One"]
If the array is empty, return an empty array:
arr = []
return []
If the array has any strange number ignore it:
arr = [1, -1 , 55]
-> sort arr -> [-1, 1, 55]
-> reverse arr -> [55, 1, -1]
return = ['One']
"""
dic = {
1: "One",
2: "Two",
3: "Three",
4: "Four",
5: "Five",
6: "Six",
7: "Seven",
8: "Eight",
9: "Nine",
}
sorted_arr = sorted(arr, reverse=True)
new_arr = []
for var in sorted_arr:
try:
new_arr.append(dic[var])
except:
pass
return sorted_arr
|
def _is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
|
def _cleanUpAllPullRequests(results, all_pull_requests):
"""
Helper function for _getAllPullRequests(). Clean up pull requests and strip out cursor info.
GIVEN:
results (dict) -- initial query response with metadata
all_pull_requests (list) -- nested list of pull_requests with comments and metadata
RETURN:
results (dict) -- cleaned up results
"""
results["data"]["repository"]["pullRequests"]["edges"] = all_pull_requests
results["data"]["repository"]["pullRequests"].pop("pageInfo")
return results
|
def default(value, default):
"""
Return `default` is `value` is :data:`None`, otherwise return `value`.
"""
if value is None:
return default
return value
|
def text_to_bits(text: str, encoding="utf-8", errors="surrogatepass") -> str:
"""
Thanks jfs - convert-binary-to-ascii-and-vice-versa
>>> text_to_bits('Jive Turkey')
'010010100110100101110110011001010010000001010100011101010111\
0010011010110110010101111001'
>>> text_to_bits('j1>3_t|_|rK3Y')
'011010100011000100111110001100110101111101110100011111000101\
11110111110001110010010010110011001101011001'
"""
bits = bin(int.from_bytes(text.encode(encoding, errors), "big"))[2:]
return bits.zfill(8 * ((len(bits) + 7) // 8))
|
def istag(arg, symbol='-'):
"""Return true if the argument starts with a dash ('-') and is not a number
Parameters
----------
arg : str
Returns
-------
bool
"""
return arg.startswith(symbol) and len(arg) > 1 and arg[1] not in '0123456789'
|
def validate_country_codes(parser, arg):
"""Check that the supplied 2 country code is correct."""
country_codes = ["us", "de"]
if arg.strip().lower() not in country_codes:
parser.error("Invalid country code. Available codes are: %s" %
", ".join(country_codes))
else:
return arg.strip().lower()
|
def post_report_users_update(reporter_check, reported_check):
"""
Prepares message to send to guild owner in event of save failure.
Helper method for generating user/guild member profiles upon a report
"""
msg = ""
if not reporter_check[0]: msg += "reporting user info failed to save!\n"
if not reporter_check[1]: msg += "reporting user member info failed to save!\n"
if not reported_check[0]: msg += "reported user info failed to save!\n"
if not reported_check[1]: msg += "reported user member info failed to save!\n"
return "" if not msg else "**whistlebot update!**\n" + msg
|
def list_sra_accessions(reads):
"""
Return a list SRA accessions.
"""
accessions = []
if reads is not None:
accessions = reads.keys()
return accessions
|
def educations(education_block):
"""
:param bs4.Tag education_block: education block
:return: list
"""
page_educations = []
if education_block is not None:
education_block = education_block.find("div", {"class": "resume-block-item-gap"}) \
.find("div", {"class": "bloko-columns-row"})
for education_item in education_block.findAll("div", {"class": "resume-block-item-gap"}):
year = education_item.find("div", {"class": "bloko-column bloko-column_xs-4 bloko-column_s-2 bloko-column_m-2 bloko-column_l-2"}) \
.getText()
item_name = education_item.find("div", {"data-qa": "resume-block-education-name"}) \
.getText()
item_organization = education_item.find("div", {"data-qa": "resume-block-education-organization"})
if item_organization is not None:
item_organization = item_organization.getText()
page_educations.append(
{"year": int(year),
"name": item_name,
"organization": item_organization}
)
return page_educations
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.