content stringlengths 42 6.51k |
|---|
def reaumur_to_fahrenheit(reaumur: float, ndigits: int = 2) -> float:
"""
Convert a given value from reaumur to fahrenheit and round it to 2 decimal places.
Reference:- http://www.csgnetwork.com/temp2conv.html
>>> reaumur_to_fahrenheit(0)
32.0
>>> reaumur_to_fahrenheit(20.0)
77.0
>>> reaumur_to_fahrenheit(40)
122.0
>>> reaumur_to_fahrenheit("reaumur")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'reaumur'
"""
return round((float(reaumur) * 2.25 + 32), ndigits) |
def bytes_to_int_signed(b):
"""Convert big-endian signed integer bytearray to int."""
return int.from_bytes(b, 'big', signed=True) |
def make_PEM_filename(cert_id: str) -> str:
"""Create a filename for PEM certificate"""
return cert_id + '.pem' |
def is_safe(value: str) -> bool:
""" Evaluate if the given string is a fractional number save for eval() """
return len(value) <= 10 and all(c in "0123456789./ " for c in set(value)) |
def pt_agent_country(country):
"""Clean the country"""
c = country.strip()
if c.lower() == 'unknown':
return ''
return c |
def cut_off_str(obj, max_len):
"""
Creates a string representation of an object, no longer than
max_len characters
Uses repr(obj) to create the string representation.
If this is longer than max_len -3 characters, the last three will
be replaced with elipsis.
"""
s = repr(obj)
if len(s) > max_len - 3:
s = s[:max_len - 3] + '...'
return s |
def diamond_coord_test(x, y, z): # dist2 = 3
"""Test for coordinate in diamond grid"""
return (((x % 2 + y % 2 + z % 2) == 0 and (x//2+y//2+z//2) % 2 == 0) or
((x % 2 + y % 2 + z % 2) == 3 and (x//2+y//2+z//2) % 2 == 0)) |
def GetBoundaries(n, line):
"""
Parse and extract a boundary of n+1 elements from a line
of text
Parameters
----------
n: int
Number of elements
line: string
line containing boundary data
Returns
-------
Array of n+1 floats representing the boundary
"""
split = line.split(" ")
split = [x for x in split if x] # remove empty lines
boundaries = []
# we assume boundaries and n is equal
if len(split) != n+1:
raise RuntimeError("GetBoundaries: Wrong number of boundaries")
for i in range(0,n+1):
d = float(split[i])
boundaries.append(d)
if boundaries.count == 0:
return None
return boundaries |
def to_psychopy_coord(normx, normy):
"""Transform coordinates from normalized to psychopy-format."""
psychopyx = normx*2-1
psychopyy = 2-normy*2-1
return psychopyx, psychopyy |
def is_contained_in(frst, scnd):
"""
Is the first region contained in the second.
:param frst: a tuple representing the first region
with chromosome, start, end as the first
3 columns
:param scnd: a tuple representing the second region
with chromosome, start, end as the first
3 columns
:return: True or False
"""
if frst[0] != scnd[0]:
return False
if frst[1] >= scnd[1] and frst[2] <= scnd[2]:
return True
return False |
def find_key(d: dict, key: str, default: None):
""" Search for the first occurence of the given key deeply in the dict.
When not found is returned the default value """
if key in d:
return d[key]
for k, v in d.items():
if isinstance(v, dict):
item = find_key(v, key, default)
if item is not None:
return item
return default |
def combination(a: int, b: int) -> int:
"""
Choose b from a. a >= b
"""
b = min(b, a - b)
numerator = 1
dominator = 1
for i in range(b):
numerator *= (a - i)
dominator *= (b - i)
return int(numerator / dominator) |
def pdh_signal(
ff,
power,
gamma,
finesse,
FSR,
fpole
):
"""Laser frequency Pound-Drever-Hall signal response to a cavity in W/Hz.
"""
pdh = 2 * power * gamma * finesse / FSR / (1 + 1j * ff / fpole)
return pdh |
def _us_to_s(time_us):
"""Convert [us] into (float) [s]
"""
return (float(time_us) / 1e6) |
def bytes_to_english(num_bytes):
"""Converts integers into standard computer byte names, i.e. a kilobyte is NOT 10^3, a kilobyte is 2^10. This
function requires a byte size to be ten (10) times a base unit size before it will convert, e.g. a 2,097,152 will
NOT convert to "2 megabytes" because it is only two (2) times the size of a megabyte, but 11,000,000 will convert
to 10.49 megabytes, because it is more than ten (10) times greater than 1,048,576 (a megabyte)
Args:
num_bytes (int): Number to be formatted.
Returns:
string: Prettified string of appropriate byte name, with 3 significant figures (places after the decimal)
"""
YOTA = 2 ** 80
ZETA = 2 ** 70
EXA = 2 ** 60
PETA = 2 ** 50
TERA = 2 ** 40
GIG = 2 ** 30
MEG = 2 ** 20
KAY = 2 ** 10
return_value = str(num_bytes) + " bytes"
shorthand_names = [[KAY, "kilobytes"], [MEG, "megabytes"], [GIG, "gigabytes"], [TERA, "terabytes"],
[PETA, "petabytes"], [EXA, "exabytes"], [ZETA, "zetabytes"], [YOTA, "yottabytes"]]
for name in shorthand_names:
if num_bytes > 10 * name[0]:
value = num_bytes/name[0]
return_value = "{:.3f}".format(value) + " " + name[1]
return return_value |
def validate_int_or_None(s):
"""if not None, tries to validate as an int"""
if s=='None':
s = None
if s is None:
return None
try:
return int(s)
except ValueError:
raise ValueError('Could not convert "%s" to int' % s) |
def slice_page(path):
"""
Function removes proper amount of characters from the end of a given path,
so that it can later be appended in an altered form
:param path: string (url path with filter and page parameters)
:return: string (sliced path)
"""
index = len(path) - 7
if path.count('&page=') >= 1:
while True:
tmp = path.find('&page=', index)
if tmp != -1:
break
else:
index -= 1
return path[:-(len(path) - index)]
else:
return path |
def dict2list(data,order=None):
"""
Converts a dictionary to a list of keys and a list of values
Parameters
----------
data : dict
dictionary with name, value pairs
order : list
list of keys representing the order of the data in the saved file
Examples
--------
>>> dict2list({'time':[0,43200,86400],'u':[1000,5000,2000]})
"""
names = []
# first add the keys in order
if order!=None:
for key in order:
names.append(key)
# add the rest
for key in data:
if not key in names:
names.append(key)
# create the values list
values = []
for key in names:
values.append(data[key])
return names,values |
def narrow_to_non_space(text, start, end):
"""
Narrow down text indexes, adjusting selection to non-space characters
@type text: str
@type start: int
@type end: int
@return: list
"""
# narrow down selection until first non-space character
while start < end:
if not text[start].isspace():
break
start += 1
while end > start:
end -= 1
if not text[end].isspace():
end += 1
break
return start, end |
def format_time(time_str):
"""
Properly format a run-time string for the sbatch file
Examples:
15 -> 15:00:00
2:30:5 -> 02:30:05
:30 -> 00:30:00
::30 -> 00:00:30
"""
time_str = str(time_str)
split = time_str.split(':')
hours = split[0].zfill(2)
if len(split) > 1:
minutes = split[1].zfill(2)
else:
minutes = '00'
if len(split) > 2:
seconds = split[2].zfill(2)
else:
seconds = '00'
return ':'.join([hours, minutes, seconds]) |
def beta_mode(alpha, beta):
"""Calculate the mode of a beta distribution.
https://en.wikipedia.org/wiki/Beta_distribution
When the distribution is bimodal (`alpha`, `beta` < 1), this function returns
`nan`.
:param alpha: first parameter of the beta distribution
:type alpha: float
:param beta: second parameter of the beta distribution
:type beta: float
:return: mode of the beta distribution
:rtype: float
"""
pi = float('nan')
# We ignore the following two cases:
# If alpha=1 and beta=1, mode=any value in (0, 1)
# If alpha<1 and beta<1, mode=0, 1 (bimodal)
if alpha > 1 and beta > 1:
pi = (alpha - 1) / (alpha + beta - 2)
elif alpha > 1 and beta <= 1:
pi = 1
elif alpha <= 1 and beta > 1:
pi = 0
return pi |
def utf8ify(s):
"""Create a representation of the string that print() is willing to use"""
return s.encode("utf8", "replace").decode("utf8") |
def padTime(timestring):
"""
Returns a 12 digit string as time by padding any missing month, day, hour
or minute values.
"""
padder = "000001010000"
if len(timestring) < 12:
timestring = timestring + (padder[len(timestring):])
return timestring |
def get_entry_dictionary(resource, vos, cpus, walltime, memory):
"""Utility function that converts some variable into an xml pilot dictionary"""
# Assigning this to an entry dict variable to shorten the line
edict = {} # Entry dict
edict["gridtype"] = "condor"
edict["attrs"] = {}
edict["attrs"]["GLIDEIN_Site"] = {"value": resource}
if resource:
edict["attrs"]["GLIDEIN_ResourceName"] = {"value": resource}
if len(vos) > 0:
edict["attrs"]["GLIDEIN_Supported_VOs"] = {"value": ",".join(vos)}
edict["submit_attrs"] = {}
if cpus != None:
edict["attrs"]["GLIDEIN_CPUS"] = {"value": cpus}
edict["submit_attrs"]["+xcount"] = cpus
if walltime != None:
glide_walltime = walltime * 60 - 1800
edict["attrs"]["GLIDEIN_Max_Walltime"] = {"value": glide_walltime}
edict["submit_attrs"]["+maxWallTime"] = walltime
if memory != None:
edict["attrs"]["GLIDEIN_MaxMemMBs"] = {"value": memory}
edict["submit_attrs"]["+maxMemory"] = memory
return edict |
def _parent(i):
"""
Returns the parent node of the given node.
"""
return (i - 1) // 2 |
def python_type_name(type_info):
"""Given a type instance parsed from ast, return the right python type"""
# print(type_info)
if type_info is None:
return "None"
type_map = {
"void" : "None",
"std::string" : "str",
}
if "unique_ptr" in type_info.name or "shared_ptr" in type_info.name:
return python_type_name(type_info.templated_types[0])
if type_info.name in type_map:
return type_map[type_info.name]
else:
return type_info.name |
def reverse(un_list):
"""This function aims to reverse a list"""
empty_list = []
if un_list == []:
return []
else:
for i in range(len(un_list)-1,0,-1):
empty_list += [un_list[i]]
return empty_list + [un_list[0]] |
def get_dust_attn_curve_d1(wave,d1=1.0):
""" Calculate birth cloud dust attenuation curve
Parameters
----------
wave: Float or 1-D Array
Wavelengths (Angstroms) at which attenuation curve should be evaluated
d1: Float
Birth cloud dust optical depth
Returns
-------
Birth dust attenuation curve at given wavelength(s); inverse law from Charlot+Fall 00 assumed
"""
return d1*(wave/5500.0)**(-1) |
def hash_dict(d):
"""Construct a hash of the dict d. A problem with this kind of hashing
is when the values are floats - the imprecision of floating point
arithmetic mean that values will be regarded as different which
should really be regarded as the same. To solve this problem we
hash to 8 significant digits, by multiplying by 10**8 and then
rounding to an integer. It's an imperfect solution, but works
pretty well in practice.
"""
l = []
for k, v in d.items():
if type(v) == float:
l.append((k, round(v*(10**8))))
else:
l.append((k, v))
return hash(frozenset(l)) |
def get_track_id_from_json(item):
""" Try to extract video Id from various response types """
fields = ['contentDetails/videoId',
'snippet/resourceId/videoId',
'id/videoId',
'id']
for field in fields:
node = item
for p in field.split('/'):
if node and isinstance(node, dict):
node = node.get(p)
if node:
return node
return '' |
def bytesto(bytes, to, bsize=1024):
"""convert bytes to megabytes, etc.
sample code:
print('mb= ' + str(bytesto(314575262000000, 'm')))
sample output:
mb= 300002347.946
"""
a = {'k' : 1, 'm': 2, 'g' : 3, 't' : 4, 'p' : 5, 'e' : 6 }
r = float(bytes)
for i in range(a[to]):
r = r / bsize
return(r) |
def get_amr_line(infile):
""" Read an entry from the input file. AMRs are separated by blank lines. """
cur_comments = []
cur_amr = []
has_content = False
for line in infile:
if line[0] == "(" and len(cur_amr) != 0:
cur_amr = []
if line.strip() == "":
if not has_content:
continue
else:
break
elif line.strip().startswith("#"):
cur_comments.append(line.strip())
else:
has_content = True
cur_amr.append(line.strip())
return ("".join(cur_amr), cur_comments) |
def create_gitbom_doc_text(infile_hashes, db):
"""
Create the gitBOM doc text contents
:param infile_hashes: the list of input files, with its hashes, essentially a dict
:param db: gitBOM DB with {file-hash => its gitBOM hash} mapping
"""
if not infile_hashes:
return ''
lines = []
for afile in infile_hashes:
ahash = infile_hashes[afile]
line = "blob " + ahash
if ahash in db:
gitbom_hash = db[ahash]
line += " bom " + gitbom_hash
lines.append(line)
lines.sort()
return '\n'.join(lines) + '\n' |
def guess_number(f_guess, f_turns_left):
"""
Function to obtain player guess
Params:
f_guess: str
f_turns_left: int
Returns:
int, int or str, int
"""
try:
f_guess = int(f_guess)
if f_guess < 1 or f_guess > 9:
raise ValueError
return f_guess, f_turns_left - 1
except ValueError:
return '\nRULES: Please enter a number between 1 and 9.', f_turns_left - 1 |
def generate_winner_list(winners):
""" Takes a list of winners, and combines them into a string. """
return ", ".join(winner.name for winner in winners) |
def find_aruba(aps, ap):
"""
This function is needed to go all over the list of APs comparing their serial numbers to make a match
:param aps: a list of objects, that represent the APs that we are looking for
:param ap: an object, representing one of the APs found on the controller
:return: an index of ap in aps if there is any
"""
for i, v in enumerate(aps):
if v["serNum"] == ap["serNum"]:
return i
return -1 |
def qbinomial(n, k, q = 2):
"""
Calculate q-binomial coefficient
"""
c = 1
for j in range(k):
c *= q**n - q**j
for j in range(k):
c //= q**k - q**j
return c |
def DebugStructToDict(structure):
"""Converts a structure as printed by the debugger into a dictionary. The
structure should have the following format:
field1 : value1
field2 : value2
...
Args:
structure: The structure to convert.
Returns:
A dict containing the values stored in the structure.
"""
ret = dict()
for entry in structure:
if not entry.find(':'):
continue
key = entry[:entry.find(':')]
value = entry[entry.find(':') + 1:]
ret[key.rstrip().lstrip()] = value.rstrip().lstrip()
return ret |
def _get_message(status, backend, hrsp_5xx_ratio, warning_ratio, critical_ratio, interval):
"""Return a message conveying the check results.
:return: Informational message about the check
:rtype: :py:class:`str`
"""
return ("""{backend} traffic has HTTP 5xx ratio of {hrsp_5xx_ratio:.4f} in the past {interval} seconds."""
"""Thresholds: warning: {warning_ratio}, critical: {critical_ratio}""").format(
backend=backend,
hrsp_5xx_ratio=hrsp_5xx_ratio,
interval=interval,
warning_ratio=warning_ratio,
critical_ratio=critical_ratio, ) |
def correct_other_vs_misc(rna_type, _):
"""
Given 'misc_RNA' and 'other' we prefer 'other' as it is more specific. This
will only select 'other' if 'misc_RNA' and other are the only two current
rna_types.
"""
if rna_type == set(["other", "misc_RNA"]):
return set(["other"])
return rna_type |
def second_half(dayinput):
"""
second half solver:
"""
suffix = [17, 31, 73, 47, 23]
knot = [i for i in range(256)]
sub_lengths = []
for x in dayinput:
sub_lengths.append(ord(x))
print(sub_lengths)
sub_lengths += suffix
current = skip = 0
for x in range(64):
for length in sub_lengths:
skip_base, index, counter = length, current, 0
sub = []
while counter < length:
if index == len(knot):
index = 0
sub.append(knot[index])
index += 1
counter += 1
sub = list(reversed(sub))
index, counter = current, 0
while counter < length:
if index == len(knot):
index = 0
knot[index] = sub[counter]
index += 1
counter += 1
current += skip_base + skip
while current >= len(knot):
current = current - len(knot)
skip += 1
print(knot)
dense_hash = []
index = 0
while index < len(knot):
value = knot[index]
index += 1
count = 0
while count < 15:
value ^= knot[index]
index += 1
count += 1
dense_hash.append(value)
print(dense_hash)
knot_hash = []
for val in dense_hash:
hex_val = hex(val).split('x')[-1]
if len(hex_val) == 1:
hex_val = '0' + hex_val
knot_hash.append(hex_val)
return ''.join(knot_hash) |
def tuple_4d(x, y, z, w):
"""Returns a 4D tuple with x, y, z and w coordinates."""
return [x, y, z, w] |
def scale_formula(k, m=5, s_min=0.2, s_max=0.9):
""" Scale formula.
Args:
k: K-th feature map level.
m: Number of feature map levels.
s_min: Scale factor of the lowest layer.
s_max: Scale factor of the highest layer.
Returns: Scale value for the k-th feature map.
"""
return s_min + ((s_max - s_min) / (m - 1)) * (k - 1) |
def _start_stop_block(size, proc_grid_size, proc_grid_rank):
"""Return `start` and `stop` for a regularly distributed block dim."""
nelements = size // proc_grid_size
if size % proc_grid_size != 0:
nelements += 1
start = proc_grid_rank * nelements
if start > size:
start = size
stop = start + nelements
if stop > size:
stop = size
return start, stop |
def get_space_packet_header(
packet_id: int, packet_sequence_control: int, data_length: int
) -> bytearray:
"""Retrieve raw space packet header from the three required values"""
header = bytearray()
header.append((packet_id & 0xFF00) >> 8)
header.append(packet_id & 0xFF)
header.append((packet_sequence_control & 0xFF00) >> 8)
header.append(packet_sequence_control & 0xFF)
header.append((data_length & 0xFF00) >> 8)
header.append(data_length & 0xFF)
return header |
def truecase(word, case_counter):
"""
Truecase
:param word:
:param case_counter:
:return:
"""
lcount = case_counter.get(word.lower(), 0)
ucount = case_counter.get(word.upper(), 0)
tcount = case_counter.get(word.title(), 0)
if lcount == 0 and ucount == 0 and tcount == 0:
return word #: we don't have enough information to change the case
if tcount > ucount and tcount > lcount:
return word.title()
if lcount > tcount and lcount > ucount:
return word.lower()
if ucount > tcount and ucount > lcount:
return word.upper()
return word |
def _package_exists(module_name: str):
"""Check if a package exists"""
mod = __import__(module_name)
return mod is not None |
def hexify(number):
"""
Convert integer to hex string representation, e.g. 12 to '0C'
"""
if number < 0:
raise ValueError('Invalid number to hexify - must be positive')
result = hex(int(number)).replace('0x', '').upper()
if divmod(len(result), 2)[1] == 1:
# Padding
result = '0{}'.format(result)
return result |
def mergelistadd(lst1,lst2):
"""returns the sum at each index comparing 2 lists"""
try:
return [lst1[i]+lst2[i] for i in range(len(lst1))]
except:
print('incompatible lists') |
def remove_private_prefix(attr_name: str, prefix: str) -> str:
"""Return the specified attribute name without the specified prefix."""
return attr_name[len(prefix):] |
def sgn(x):
"""a simple sign function"""
if(x < 0):
return -1
return 1 |
def _qt_list(secondary_dict_ptr, secondary_key_list_ptr, cols, key):
"""
This sub-function is called by view_utils.qt to add keys to the secondary_key_list and
is NOT meant to be called directly.
"""
if cols[key]:
if cols[key] not in secondary_key_list_ptr:
secondary_key_list_ptr[cols[key]] = {}
return secondary_dict_ptr[cols[key]], secondary_key_list_ptr[cols[key]]
else:
return secondary_dict_ptr, secondary_key_list_ptr |
def parseOneDigit(n):
"""Given a single digit 1-9, return its name in a word"""
if n == 1:
return "One "
elif n == 2:
return "Two "
elif n == 3:
return "Three "
elif n == 4:
return "Four "
elif n == 5:
return "Five "
elif n == 6:
return "Six "
elif n == 7:
return "Seven "
elif n == 8:
return "Eight "
elif n == 9:
return "Nine "
return "" |
def div(x, y):
"""
Compute integer division x//y.
"""
# find largest shift less than x
i = 0
s = y
while s < x:
s <<= 1
i += 1
s >>= 1
i -= 1
d = 0
rem = x
while i >= 0:
if s < rem:
rem -= s
d += 1<<i
i -= 1
s >>= 1
return d |
def det(a: list) -> int:
"""
Calculates the determinant of a 2x2 Matrix, via the shortcut
(a*d) - (b*c)
:param a: The matrix A.
:return: The determinant.
"""
d= (a[0][0] * a[1][1]) - (a[0][1] * a[1][0])
return d |
def sequence_identity(a, b, gaps='y'):
"""Compute the sequence identity between two sequences.
The definition of sequence_identity is ambyguous as it depends on how gaps are treated,
here defined by the *gaps* argument. For details and examples, see
`this page <https://pyaln.readthedocs.io/en/latest/tutorial.html#sequence-identity>`_
Parameters
----------
a : str
first sequence, with gaps encoded as "-"
b : str
second sequence, with gaps encoded as "-"
gaps : str
defines how to take into account gaps when comparing sequences pairwise. Possible values:
- 'y' : gaps are considered and considered mismatches. Positions that are gaps in both sequences are ignored.
- 'n' : gaps are not considered. Positions that are gaps in either sequences compared are ignored.
- 't' : terminal gaps are trimmed. Terminal gap positions in either sequences are ignored, others are considered as in 'y'.
- 'a' : gaps are considered as any other character; even gap-to-gap matches are scored as identities.
Returns
-------
float
sequence identity between the two sequences
Examples
--------
>>> sequence_identity('ATGCA',
... 'ATGCC')
0.8
>>> sequence_identity('--ATC-GGG-',
'AAATCGGGGC',
gaps='y')
0.6
Note
----
To compute sequence identity efficiently among many sequences, use :func:`~pyaln.Alignment.score_similarity` instead.
See also
--------
pyaln.Alignment.score_similarity, weighted_sequence_identity
"""
if len(a)!=len(b):
raise IndexError('sequence_identity ERROR sequences do not have the same length')
if gaps=='y':
pos_to_remove=[i for i in range(len(a)) if a[i]=='-' and b[i]=='-' ]
elif gaps=='n':
pos_to_remove=[i for i in range(len(a)) if a[i]=='-' or b[i]=='-' ]
elif gaps=='t':
pos_to_remove=[i for i in range(len(a)) if a[i]=='-' and b[i]=='-' ]
for s in [a,b]:
for i,c in enumerate(s):
if c=='-':
pos_to_remove.append(i)
else:
break
for i, c in reversed(list(enumerate(s))):
if c=='-':
pos_to_remove.append(i)
else:
break
elif gaps=='a':
count_identical=sum([int(ca == b[i]) for i,ca in enumerate(a)])
return count_identical/len(a) if len(a) else 0.0
else:
raise Exception('sequence_identity ERROR gaps argument must be one of {a, y, n, t}')
exclude_pos=set(pos_to_remove)
count_identical=sum([int(ca == b[i] and ca!='-' ) for i,ca in enumerate(a) if not i in exclude_pos])
denominator= len(a) - len(exclude_pos)
return count_identical/denominator if denominator else 0.0 |
def linear_full_overlap(dep_t, dep_h):
"""Checks whether both the head and dependent of the triplets match."""
return (dep_h[0] in dep_t[0]) and (dep_h[2] in dep_t[2]) |
def min_square_area(x: int, y: int):
"""
"""
smallest_side = min(x, y)
longest_side = max(x, y)
square_side = 2*smallest_side
if square_side < longest_side:
square_side = longest_side
return square_side * square_side |
def weighted_mean(x,w):
"""
Given equal length vectors of values and weights
"""
return sum(xi*wi for xi,wi in zip(x,w)) / sum(w) |
def get_unique(items):
"""
Get a list of unique items, even for non hashable items.
"""
unique_list = []
for item in items:
if not item in unique_list:
unique_list.append(item)
return unique_list |
def CSVWriter (iterable, outLoc, header="", ):
"""
Writes an iterable to a CSV file.
:param iterable: List of list
:param outLoc: file location. Where to place it.
:param header: header of the CSV file
:return: 1
"""
if not iterable:
print ("nothing to write")
return 0
out = open(outLoc, 'w')
if header:
out.write(header+'\n')
#Only works if iterable is a nested list
for member in iterable:
for item in member:
out.write(str(item)+',')
out.write('\n')
print("write to "+outLoc+" successful.")
return 1 |
def contar_letras (cadena: str, letras: str):
"""Cuenta la cantidad de letras especificas en la cadena
Argumentos:
cadena (str) -- cadena sobre la que contar
letra (str) -- letra que quiero contar
"""
cuenta = 0
for caracter in cadena:
if caracter == letras:
cuenta += 1
return cuenta |
def simple_two_params(one, two):
"""Expected simple_two_params __doc__"""
return "simple_two_params - Expected result: %s, %s" % (one, two) |
def _get_date_or_none(panda_date_or_none):
""" Projection Null value is a string NULL so if this date value is a string,
make it none. Otherwise convert to the python datetime. Example
of this being null is when there is no bed shortfall, the shortfall dates is none """
if isinstance(panda_date_or_none, str):
return None
return panda_date_or_none.to_pydatetime() |
def Swap(x, **unused_kwargs):
"""Swap the first two element on the stack."""
if isinstance(x, list):
return [x[1], x[0]] + x[2:]
assert isinstance(x, tuple)
return tuple([x[1], x[0]] + list(x[2:])) |
def maxTabCount(edgeLen, width, minDistance):
"""
Given a length of edge, tab width and their minimal distance, return maximal
number of tabs.
"""
if edgeLen < width:
return 0
c = 1 + (edgeLen - minDistance) // (minDistance + width)
return max(0, int(c)) |
def get_extrapolated_flux(flux_ref, freq_ref, spectral_index):
"""
Computes the flux density at 843 MHz extrapolated from a higher/lower flux
density measurement & some assumed spectral index.
input:
------
flux_ref: float
Reference flux density, usually S400 or S1400 [mJy].
freq_ref: float
Refrence frequency, usually 400 or 1400 MHz.
output:
-------
S843: float
Extrapolated flux density at 843 MHz [mJy]
"""
S843 = flux_ref * (843.0 / freq_ref)**(spectral_index)
return S843 |
def canonicalize_job_spec(job_spec):
"""Returns a copy of job_spec with default values filled in.
Also performs a tiny bit of validation.
"""
def canonicalize_import(item):
item = dict(item)
item.setdefault('in_env', True)
if item.setdefault('ref', None) == '':
raise ValueError('Empty ref should be None, not ""')
item['before'] = sorted(item.get('before', []))
return item
result = dict(job_spec)
result['import'] = [
canonicalize_import(item) for item in result.get('import', ())]
result['import'].sort(key=lambda item: item['id'])
result.setdefault("env", {})
result.setdefault("env_nohash", {})
result.setdefault("script", [])
return result |
def max_value(knapsack_max_weight, items):
"""
Get the maximum value of the knapsack.
"""
values = [0 for _ in range(knapsack_max_weight+1)]
for item in items:
for weight in range(knapsack_max_weight, item.weight-1, -1):
values[weight] = max(values[weight], values[weight - item.weight] + item.value)
return values[-1] |
def decifrador(lista, senha=0):
"""
:param lista: recebe a lista com os elementos separados
:param senha: numeros de trocas de letras
:return: a lista com as palavras traduzidas
"""
varTemp = list ()
stringTemp = ''
alfabeto = ['a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
for palavra in lista:
for caracter in palavra:
if caracter in alfabeto:
mensagemIndex = alfabeto.index (caracter)
stringTemp += alfabeto[(mensagemIndex + senha) % len (alfabeto)]
else:
stringTemp += caracter
varTemp.append (stringTemp)
stringTemp = ''
return varTemp |
def get_formatted_rule(rule=None):
"""Helper to format the rule into a user friendly format.
:param dict rule: A dict containing one rule of the firewall
:returns: a formatted string that get be pushed into the editor
"""
rule = rule or {}
return ('action: %s\n'
'protocol: %s\n'
'source_ip_address: %s\n'
'source_ip_subnet_mask: %s\n'
'destination_ip_address: %s\n'
'destination_ip_subnet_mask: %s\n'
'destination_port_range_start: %s\n'
'destination_port_range_end: %s\n'
'version: %s\n'
% (rule.get('action', 'permit'),
rule.get('protocol', 'tcp'),
rule.get('sourceIpAddress', 'any'),
rule.get('sourceIpSubnetMask', '255.255.255.255'),
rule.get('destinationIpAddress', 'any'),
rule.get('destinationIpSubnetMask', '255.255.255.255'),
rule.get('destinationPortRangeStart', 1),
rule.get('destinationPortRangeEnd', 1),
rule.get('version', 4))) |
def calculate_mass(
mono_mz,
charge
):
"""
Calculate the precursor mass from mono mz and charge
"""
M_PROTON = 1.00727646687
prec_mass = mono_mz * abs(charge) - charge * M_PROTON
return prec_mass |
def next_collatz_number(int_value, k=3, c=1):
"""
This method calculates the next Collatz number for a given int value.
:param int_value: The int value to calculate the next Collatz number for. The value
must be a natural number > 0.
:param k: The factor by which odd numbers are multiplied in the sequence (default is 3).
:param c: The summand by which odd numbers in the sequence are increased (default is 1).
:return: The next Collatz number as int.
"""
assert int_value > 0, "Value > 0 expected"
mod_result = int_value % 2
assert mod_result in (0, 1), "Not a natural number"
# odd number
if mod_result == 1:
next_number = int_value * k + c
# even number
else:
# Use integer division here, in order to handle big numbers
next_number = int_value // 2
return int(next_number) |
def remove_blanks(d):
"""
Returns d with empty ('' or None) values stripped
"""
empty_keys = []
for key in d:
if d[key]=='' or d[key]==None:
# del d[key] raises runtime exception, using a workaround
empty_keys.append(key)
for key in empty_keys:
del d[key]
return d |
def truncate_response_data(response_data, block_size=4):
"""
Truncates pagination links.
We don't want to show a link for every page if there are lots of pages.
This replaces page links which are less useful with an ``...`` ellipsis.
:param response_data:
Data supposed to be passed to :class:`rest_framework.response.Response`.
:type response_data: dict
:param block_size:
How many page links should be kept at each end of the truncated portion.
:type block_size: int
:returns: The response data with updated ``'pages'`` key
:rtype: dict
"""
pages = response_data["pages"]
if len(pages) <= block_size:
return response_data
current_page_num = response_data["current_page"]
current_page_index = response_data["current_page"] - 1
first_page = pages[0]
last_page = pages[-1]
block_pivot = block_size // 2
start_of_current_block = abs(current_page_num - block_pivot)
start_of_last_block = last_page["page_number"] - block_size
block_start_index = min(start_of_current_block, start_of_last_block, current_page_index)
truncated_pages = pages[block_start_index:][:block_size]
first_of_truncated_pages_num = truncated_pages[0]["page_number"]
last_of_truncated_pages_num = truncated_pages[-1]["page_number"]
if first_of_truncated_pages_num > 3:
truncated_pages = [{"page_number": "..."}] + truncated_pages
if first_of_truncated_pages_num == 3:
truncated_pages = [pages[1]] + truncated_pages
if first_of_truncated_pages_num > 1:
truncated_pages = [first_page] + truncated_pages
if last_of_truncated_pages_num < last_page["page_number"] - 2:
truncated_pages.append({"page_number": "..."})
if last_of_truncated_pages_num == last_page["page_number"] - 2:
truncated_pages.append(pages[-2])
if last_of_truncated_pages_num < last_page["page_number"]:
truncated_pages.append(last_page)
response_data["pages"] = truncated_pages
return response_data |
def site_url(request, registry, settings):
"""Expose website URL from ``tm.site_url`` config variable to templates.
.. note ::
You should not use this variable in web page templates. This variable is intended for cases where one needs templating without running a web server.
The correct way to get the home URL of your website is:
.. code-block:: html+jinja
<a href="{{ request.route_url('home') }}">Home</a>
"""
return settings["tm.site_url"] |
def parse_by_category(category, data):
"""
filters database content by category from dumps
:param category: accepts string
:param data: accepts multi-dimensional iterable data type
:return: returns filtered multi-dimensional LIST containing TUPLES
"""
new_dat = []
for entry in data:
if entry[1] == category:
new_dat.append(entry)
return new_dat |
def binary_search(a, key, index=0, iteration=0):
"""
a (list) : a sorted list
"""
if len(a) == 1 and a[0] != key:
return -1
m = len(a) // 2 # m for middle of the array
if a[m] == key: return index + m
elif a[m] > key: return binary_search(a[0:m], key, index, iteration + 1)
elif a[m] < key: return binary_search(a[m:], key, index + m, iteration + 1) |
def messpf_inp_str(globkey_str, spc_str):
""" Combine various MESS strings together to combined MESSPF
"""
return '\n'.join([globkey_str, spc_str]) + '\n' |
def convert_to_tq_format(topic, question):
"""creates a string that is uniform in the "T?.Q?.A? format; without the A"""
return "T"+str(topic)+".Q"+str(question) |
def class_counts(y, n):
"""
>>> class_counts([2, 1, 1, 0, 1, 2], 3)
[1, 3, 2]
"""
return [len([yj for yj in y if yj == yi]) for yi in range(n)] |
def translate_confidence_level(level):
""" return confidence level
"""
if level is None or level == 'LOW':
return '-i'
if level == 'MEDIUM':
return '-ii'
if level == 'HIGH':
return '-iii'
raise ValueError(f'{level} is not a valid confidence level') |
def get_gene_name(line):
"""
Input: A line read in from a txt or csv file from some proteomic data
that contains a 'GN=' part before the gene name
Output: The gene name pulled out of the line
"""
gene = ""
start = line.find("GN=")
while line[start+3] != " ":
gene += line[start+3]
start += 1
return gene |
def two_adjacent_digits_same(number: int) -> bool:
"""Two adjacent digits are the same (like 22 in 122345)."""
previous = None
for digit in str(number):
if digit == previous:
return True
previous = digit
return False |
def integer_to_binary_str(n):
"""
Returns a string representing the conversion into binary of the integer entered as a parameter.
:param: *(int)*
:rctype: *str*
:UC: n >= 0
:Examples:
>>> integer_to_binary_str(0)
'0'
>>> integer_to_binary_str(8)
'1000'
>>> integer_to_binary_str(-8)
Traceback (most recent call last):
AssertionError: Entrez un entier positif!
"""
# Tests of n
assert(n >= 0), "Entrez un entier positif!"
if(n == 0):
return "0"
else:
# Initialisation
res = ''
temp = n
i = 0
# Extract all bits from temp and concatenate the result
while(temp > 0):
res = res + str(((n >> i) & 1))
temp = temp >> 1
i += 1
# Reverse the resulting string
res = res[::-1]
return res |
def summarize_metrics(all_metrics):
"""
Returns a subset of the metrics dictionary with only f1,f0.5,recall and precision values
:param all_metrics:
:return:
"""
metric_names = ['f1', 'recall', 'precision', 'f0.5', 'num_samples','num_metrics'] # The keys to keep
return dict((k, all_metrics[k]) for k in metric_names if k in all_metrics) |
def parse_accept_header(accept):
"""
Parse the Accept header *accept*, returning a list with 3-tuples of
[(str(media_type), dict(params), float(q_value)),] ordered by q values.
If the accept header includes vendor-specific types like::
application/vnd.yourcompany.yourproduct-v1.1+json
It will actually convert the vendor and version into parameters and
convert the content type into `application/json` so appropriate content
negotiation decisions can be made.
Default `q` for values that are not specified is 1.0
# Based on https://gist.github.com/samuraisam/2714195
# Also, based on a snipped found in this project:
# https://github.com/martinblech/mimerender
"""
result = []
if not accept:
return result
for media_range in accept.split(","):
parts = media_range.split(";")
media_type = parts.pop(0).strip()
media_params = []
# convert vendor-specific content types into something useful (see
# docstring)
typ, subtyp = media_type.split('/')
# check for a + in the sub-type
if '+' in subtyp:
# if it exists, determine if the subtype is a vendor-specific type
vnd, sep, extra = subtyp.partition('+')
if vnd.startswith('vnd'):
# and then... if it ends in something like "-v1.1" parse the
# version out
if '-v' in vnd:
vnd, sep, rest = vnd.rpartition('-v')
if len(rest):
# add the version as a media param
try:
media_params.append(('version', rest))
except ValueError:
pass # return no version value; use rest default
# add the vendor code as a media param
media_params.append(('vendor', vnd))
# and re-write media_type to something like application/json so
# it can be used usefully when looking up emitters
media_type = '{}/{}'.format(typ, extra)
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
key = key.strip()
value = value.strip()
if key == "q":
q = float(value)
if q > 1.0:
# Not sure what to do here. Can't find spec
# about how to handle q > 1.0. Since invalid
# I choose to make it lowest in priority.
q = 0.0001
else:
media_params.append((key, value))
result.append((media_type, dict(media_params), q))
result.sort(key=lambda x: x[2], reverse=True)
return result |
def rectangles_intersect(rect1, rect2):
"""Returns True if two rectangles intersect."""
return all([(rect1[1][i] >= rect2[0][i]) and
(rect2[1][i] >= rect1[0][i]) for i in range(2)]) |
def _get_sstable_proto_dict(*input_values):
"""Returns table key -> serialized proto map.
This function exists because the create_parse_tf_example_fn operates on
dequeued batches which could be 1-tuples or 2-tuples or dictionaries.
Args:
*input_values: A (string tensor,) tuple if mapping from a RecordIODataset or
TFRecordDataset, or a (key, string tensor) tuple if mapping from a
SSTableDataset, or (Dict[dataset_key, values],) if mapping from multiple
datasets.
Returns:
dict_extracted: dictionary mapping each sstable (or '' for singular) to the
batch of string tensors for the corresponding serialized protos.
"""
dict_extracted = {}
if isinstance(input_values[0], dict):
for key, serialized_proto in input_values[0].items():
if isinstance(serialized_proto, tuple):
# Assume an SSTable key, value pair.
_, dict_extracted[key] = serialized_proto
else:
dict_extracted[key] = serialized_proto
else:
if len(input_values) == 2:
_, dict_extracted[''] = input_values
else:
dict_extracted[''], = input_values
return dict_extracted |
def get_color_from_score(score):
"""Returns color depending on the score"""
color = "hsl(184, 77%, 34%)"
if score < 20:
color = "hsl(360, 67%, 44%)"
elif score < 50:
color = "hsl(360, 71%, 66%)"
elif score < 80:
color = "hsl(185, 57%, 50%)"
return color |
def get_thresholds(points=100, power=3) -> list:
"""Run a function with a series of thresholds between 0 and 1"""
return [(i / (points + 1)) ** power for i in range(1, points + 1)] |
def center_x(cell_lower_left_x, cell_width, word_length):
""" This function centers text along the x-axis
:param cell_lower_left_x: Lower left x-coordinate
:param cell_width: Width of cell in which text appears
:param word_length: Length of plotted word
:return: Centered x-position
"""
return cell_lower_left_x + (cell_width / 2.0) - (word_length / 2.0) |
def remove_prefix(text, prefix):
"""Removes the prefix `prefix` from string `text` in case it is present."""
return text[len(prefix):] if text.startswith(prefix) else text |
def compare_snpchecks(sangerdict, ngsdict):
"""Compare values from 2 dicts with overlapping keys. NGS-dict contains
all keys from sangerdict. Create dict with loci as keys and ok or ERROR as
values. Ok when both values are the same, ERROR if not. Return a dict.
"""
out = dict()
for k, v in sangerdict.items():
if v not in ['WT', 'HET', 'HOM']:
out[k] = v
continue
try:
if ngsdict[k] == v:
out[k] = 'ok'
elif ngsdict[k] != v:
out[k] = 'ERROR'
except KeyError:
out[k] = 'NoNGS'
return out |
def gf_mul_const(f, a, p):
"""Returns f * a where f in GF(p)[x] and a in GF(p). """
if not a:
return []
else:
return [ (a*b) % p for b in f ] |
def color565(r, g, b):
"""Return RGB565 color value.
Args:
r (int): Red value.
g (int): Green value.
b (int): Blue value.
"""
return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3 |
def format_str_strip(form_data, key):
"""
"""
if key not in form_data:
return ''
return form_data[key].strip() |
def sim_max(terms1, terms2, sem_sim):
"""Similarity score between two term sets based on maximum value
"""
sims = []
for t1 in terms1:
for t2 in terms2:
sim = sem_sim(t1, t2)
if sim is not None:
sims.append(sim)
return round(max(sims), 3) |
def RGB2YCbCr(RGB):
"""
This is a fast version, that sometimes differes by 1.
It can be easily cythonized.
"""
R, G, B = RGB
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16
Cb = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128
Cr = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128
return Y, Cb, Cr |
def return_converted_dict(key, *value):
"""this method create dict from token key and value parameters.
It is converted element of list,
if parameters of this method get the str.
NOTE
----
if you give str to parameters of this method,
Dictionary return key will be unexpected
Parameters
----------
key : sequence
[description]
value : list tuple
[description]
Returns
-------
dictionary : dict
if length of key and value is not much droped value other one
"""
dictionary = {}
for key, val in zip(key, value):
dictionary[key] = val
return dictionary |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.