content
stringlengths 42
6.51k
|
|---|
def get_string(array):
"""return the string for the array"""
return "".join([str(x) for x in array])
|
def _bits_to_int(bits):
"""Converts bits to int."""
result = 0
for bit in bits:
result = (result << 1) | bit
return result
|
def mode_mods_to_int(mode: str) -> int:
"""Converts mode_mods (str) to mode_mods (int)."""
# NOTE: This is a temporary function to convert the leaderboard mode to an int.
# It will be removed when the site is fully converted to use the new
# stats table.
for mode_num, mode_str in enumerate((
'vn_std', 'vn_taiko', 'vn_catch', 'vn_mania',
'rx_std', 'rx_taiko', 'rx_catch',
'ap_std'
)):
if mode == mode_str:
return mode_num
else:
return 0
|
def incrementString(text="AAAA"):
"""Returns the text incremented by one letter
The text must be alphabetic or a ValueError is raised.
>>> incrementString("A")
'B'
>>> incrementString("Z")
'AA'
>>> incrementString("AM")
'AN'
>>> incrementString("AZ")
'BA'
>>> incrementString("BA")
'BB'
>>> incrementString("BZ")
'CA'
>>> incrementString("ZZA")
'ZZB'
>>> incrementString("ZZZ")
'AAAA'
>>> incrementString("AAAA")
'AAAB'
>>> incrementString("AAAZ")
'AABA'
>>> incrementString("ABC2")
Traceback (most recent call last):
ValueError: text must be purely alphabetic
"""
if not text.isalpha():
raise ValueError("text must be purely alphabetic")
OrdA = ord("A")
OrdZ = ord("Z")
changed = False
values = [ord(c) for c in reversed(text.upper())]
for i in range(len(values)):
if values[i] < OrdZ:
values[i] += 1
changed = True
break
elif values[i] == OrdZ:
values[i] = OrdA
if not changed:
values = [OrdA] + values
return "".join([chr(v) for v in reversed(values)])
|
def bytes_label(size):
"""Returns a human-readable file size with unit label."""
try:
size = float(size.encode('ascii', errors='ignore').strip())
except:
# probably already text-formatted
return size
suffix = 'B'
suffixes = ['PB', 'TB', 'GB', 'MB', 'KB']
while size >= 1024 and len(suffixes) > 0:
size = size / 1024.0
suffix = suffixes.pop()
return '%.1f %s' % (size, suffix)
|
def removeengineeringpids(pids):
"""Removing propcodes that are associated with engineering and calibration proposals"""
new_pids=[]
for pid in pids:
if not pid.count('ENG_') and not pid.count('CAL_'):
new_pids.append(pid)
return new_pids
|
def plural_if(zstring, zcondition):
"""
Returns zstring pluralized (adds an 's' to the end) if zcondition is True or
if zcondition is not equal to 1.
Example usage could be ``plural_if("cow", len(cow_list))``.
"""
# If they gave us a boolean value, just use that, otherwise, assume the
# value is some integral type.
if type(zcondition) is bool:
plural = zcondition
else:
plural = zcondition != 1
return zstring + ("s" if plural else "")
|
def union(span, other):
"""Union of two spans."""
if span is None or other is None: return None
return min(span[0], other[0]), max(span[1], other[1])
|
def as_cli_arg(property: str) -> str:
"""We'd like to match command line arguments to their
corresponding python variables, but sadly python doesn't
allow variable/field names with hyphens. As such,
we convert the underscores to hyphens when using
command line args.
Parameters
----------
property : Variable or field name with underscores.
Returns
-------
str
String with underscores replaced by dashes.
"""
return property.replace("_", "-")
|
def checkListsEqualSize(list1,list2):
"""
if false, return false,longuestlist
else return true,list1
"""
if (len(list1)>len(list2)):
#~ flagsToQuery[6]= 'HSV > RBG'
return False
elif(len(list1)<len(list2)):
#~ flagsToQuery[6]= 'HSV < RBG'
return False
else :
return True
|
def sum_multiples(start, end, divisor):
"""
>>> sum_multiples(1, 12, 4)
24
>>> sum_multiples(1, 12, 3)
30
"""
result = 0
counter = start
while counter <= end:
if counter % divisor == 0:
result += counter
counter += 1
return result
|
def handle_omeka(omeka_data):
"""
General handling normally done every time after we grab Omeka data
via the API with get_all.
"""
for obj in omeka_data:
# Handle the weird 'element_texts' format by flattening as best we can
if 'element_texts' in obj:
for element_text in obj['element_texts']:
key = element_text['element']['name']
value = element_text['text']
try:
obj[key] += '\n' + value
except KeyError:
obj[key] = value
# Flatten out other misc dicts
for obj_key, obj_val in obj.copy().items():
if obj[obj_key] and type(obj_val) is dict:
for key, value in obj_val.items():
obj[obj_key + '_' + key] = obj[obj_key][key]
if type(obj_val) is dict or not obj_val:
del obj[obj_key]
return omeka_data
|
def fixed_mul(a, b):
"""Multiply fixed values"""
return (a >> 8) * (b >> 8)
|
def filter_list(iterable, include_values):
"""
Create new list that contains only values
specified in the ``include_values`` attribute.
Parameters
----------
iterable : list
List that needs to be filtered.
include_values : list, tuple
List of values that needs to be included in the
filtered list. Other values that hasn't been
defined in the list will be excluded from the
list specified by ``iterable`` attribute.
Returns
-------
list
Filtered list.
"""
filtered_list = []
for value in iterable:
if value in include_values:
filtered_list.append(value)
return filtered_list
|
def count_leading_values(lst, char):
"""count the number of char at the beginnig of the string/bytearray lst"""
n = 0
l = len(lst)
while n < l and lst[n] == char:
n += 1
return n
|
def format_subarticle(article):
"""
Format the list of subarticles.
:param article: string containing the list of articles
:type article: str
:return: list of subarticles
:rtype: [str]
"""
articles = article.split(';')
articles = [a for sublist in articles for a in sublist.split('+')]
res = list(set(articles))
return res
|
def merge_index_boundaries(indices):
"""
Auxiliary function to merge the boundaries of adjacent spans into continuous ones.
"""
merged_indices = []
j = 0
while j < len(indices):
curr_start, curr_end = indices[j]
continue_merge = j < len(indices) - 1 and curr_end == indices[j + 1][0]
while continue_merge:
curr_end = indices[j + 1][1]
j += 1
continue_merge = j < len(indices) - 1 and curr_end == indices[j + 1][0]
j += 1
merged_indices.append((curr_start, curr_end))
return merged_indices
|
def _dict_keys_get(d, keys):
"""Recursively get values from d using `__getitem__`
"""
d = d
for k in keys:
d = d[k]
return d
|
def quote(string):
""" Surround a string with double quotes """
return f'"{string}"'
|
def str2intlist(s, repeats_if_single=None):
"""Parse a config's "1,2,3"-style string into a list of ints.
Args:
s: The string to be parsed, or possibly already an int.
repeats_if_single: If s is already an int or is a single element list,
repeat it this many times to create the list.
Returns:
A list of integers based on `s`.
"""
if isinstance(s, int):
result = [s]
else:
result = [int(i.strip()) if i != "None" else None
for i in s.split(",")]
if repeats_if_single is not None and len(result) == 1:
result *= repeats_if_single
return result
|
def RKOCstr2code(ocstr):
"""
Converts output of runge_kutta_order_conditions() to
numpy-executable code.
"""
factors=ocstr.split(',')
occode='np.dot(b,'
for factor in factors[0:len(factors)-1]:
occode=occode+'np.dot('+factor+','
occode=occode+factors[len(factors)-1]
occode=occode.replace(']',',:]')
occode=occode+')'*len(factors)
return occode
|
def convert_weights_to_dict(weights_list):
""" The weights are passed in as a list and we need to convert them to a dict """
db_formatted_weights = {}
for weight_details in weights_list:
weight_name = weight_details.name
weight_value = weight_details.weight
db_formatted_weights[weight_name] = weight_value
return db_formatted_weights
|
def n_dgt_Fibo(n):
"""
Tihs funcion returns the n-th number of the Fibonacci sequence.
"""
u = 1
v = 1
j = 2
while len(str(v)) < n:
u, v = v, u+v
j += 1
return j
|
def db2lin(value):
"""Convert logarithimic units to linear
>>> round(db2lin(10.0), 2)
10.0
>>> round(db2lin(20.0), 2)
100.0
>>> round(db2lin(1.0), 2)
1.26
>>> round(db2lin(0.0), 2)
1.0
>>> round(db2lin(-10.0), 2)
0.1
"""
return 10**(value / 10)
|
def epoch_time(start_time, end_time):
"""
Function to calculate total time taken in an epoch
"""
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
|
def psmid_to_scan(psmid):
"""
Extract charge from Percolator PSMId.
Expects the following formatted PSMId:
`run` _ `SII` _ `MSGFPlus spectrum index` _ `PSM rank` _ `scan number` _ `MSGFPlus-assigned charge` _ `rank`
See https://github.com/percolator/percolator/issues/147
"""
psmid = psmid.split('_')
scan = int(psmid[-3])
return scan
|
def __gen_array(page_id, array_data):
"""Generate an array of elements."""
content = []
for item in array_data['items']:
genfun = globals()['__gen_' + item['type']]
content.append(genfun(page_id, item))
if array_data['enclosedInView']:
if array_data['orientation'] == 'vertical':
container_style = 'flexible'
else:
container_style = '[flexible, row]'
return '<View style={{{}}}>\n{}\n</View>'.format(
container_style,
'\n'.join(content))
return '\n'.join(content)
|
def naming_convention(dic, convert):
"""
Convert a nested dictionary from one convention to another.
Args:
dic (dict): dictionary (nested or not) to be converted.
convert (func): function that takes the string in one convention and
returns it in the other one.
Returns:
Dictionary with the new keys.
"""
new = {}
for key, value in dic.items():
converted_value = value
if isinstance(value, dict):
converted_value = naming_convention(value, convert)
elif isinstance(value, list):
converted_value = []
for each_value in value:
if isinstance(each_value, dict):
converted_value.append(naming_convention(each_value,
convert))
else:
converted_value.append(each_value)
new[convert(key)] = converted_value
return new
|
def colval(letter: str):
""" chess columns are 1-indexed, subtract one to operate on 'board' """
return { "a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6, "h": 7 }[letter]
|
def is_string(value):
"""
Tests the value to determine whether it is a string.
:param any value:
:return: True of the value is a string (an instance of the str class)
>>> is_string( 'Hello' )
True
>>> is_string( ['Hello'] )
False
"""
return isinstance(value, str)
|
def change_angle_origin(angles, max_positive_angle):
"""
:param angles: a list of angles
:param max_positive_angle: the maximum positive angle, angles greater than this will be shifted to negative angles
:return: list of the same angles, but none exceed the max
:rtype: list
"""
if len(angles) == 1:
if angles[0] > max_positive_angle:
return [angles[0] - 360]
else:
return angles
new_angles = []
for angle in angles:
if angle > max_positive_angle:
new_angles.append(angle - 360)
elif angle == max_positive_angle:
if angle == angles[0] and angles[1] > max_positive_angle:
new_angles.append(angle - 360)
elif angle == angles[-1] and angles[-2] > max_positive_angle:
new_angles.append(angle - 360)
else:
new_angles.append(angle)
else:
new_angles.append(angle)
return new_angles
|
def splitit(l, n):
"""Utility function to evenly split a list. Handles edge cases.
There is probably a 1-liner list comprehension to do this, but it would be super gnarly.
@param l list to split (not a generator)
@param n number of chunks to split the list into
@return list of n lists
"""
# if list is shorter then n, split into a list of lists, each with one entry
if len(l) < n:
n = len(l)
s = len(l) // (n) # each chunk will either be of size s or size s+1
m = len(l) % (
s * n
) # remainder after n chunks of size s = how many chunks of size s+1 are needed
r = [] # result
p = 0 # pointer
for i in range(n):
if i < m:
r.append(l[p : p + s + 1])
p += s + 1
else:
r.append(l[p : p + s])
p += s
return r
|
def dialect_del_fixer(values: str) -> str:
""" Fix unprintable delimiter values provided as CLI args
"""
if values == '\\t':
val = '\t'
elif values == 'tab':
val = '\t'
elif values == '\\n':
val = '\n'
else:
val = values
return val
|
def get_real_platform(single_platform: str) -> str:
"""
Replace different platform variants of the platform provided platforms with the two canonical ones we
are using: amd64 and arm64.
"""
return single_platform.replace("x86_64", "amd64").replace("aarch64", "arm64").replace("/", "-")
|
def check_data(data):
"""For a 44-year-old, the api should
always return an age, a full retirement age
and a value for benefits at age 70
"""
if (
data["current_age"] == 44
and data["data"]["full retirement age"] == "67"
and data["data"]["benefits"]["age 70"]
):
return "OK"
else:
return "BAD DATA"
|
def perm(N, R):
"""
The permutation function P(N,R) = N!/(N-R)!
:param N
Total elements.
:param R
Number to choose.
:return
<int:permutations>
"""
result = 1
while N > R:
result *= N
N -= 1
return result
|
def supports_raw_attributes(config: dict) -> bool:
"""Get the supports_raw_attributes config setting."""
return config.get("supports_raw_attributes", False)
|
def clean_text(text: str) -> str:
"""
Removes extra spaces and new lines from the text.
"""
return text.replace('\n', ' ').strip()
|
def flatten_material(material):
"""
Flattens a given material.
Args:
material (dict): material config.
Returns:
list
"""
lattice = material["lattice"]
return [
material["_id"],
material["name"],
", ".join(material["tags"]),
len(material["basis"]["coordinates"]),
lattice["a"],
lattice["b"],
lattice["c"],
lattice["alpha"],
lattice["beta"],
lattice["gamma"]
]
|
def getHeaders(environ):
""" Get all Headers and return them back as dictionary """
headers = {}
for key in list(environ.keys()):
if key.startswith('HTTP_'):
headers[key[5:]] = environ.get(key)
return headers
|
def is_hex(color):
""" Method to check if a color code is hexadecimal (HEX)
This method returns the value True if the input argument corresponds to an hexadecimal color
code.
Parameters
----------
color: :obj:
Color code
Returns
-------
bool
True if the input color code takes the hexadecimal (HEX) form.
Examples
--------
>>> from uibcdf_stdlib.colors import is_hex
>>> is_hex('#008080')
True
>>> is_hex([0.0, 0.5, 0.5])
False
"""
output = False
if type(color)==str:
if (len(color)==6) or (len(color)==7 and color.startswith('#')):
output = True
return output
|
def csw_query(identifier):
"""
Create CSW GetRecordById query for a given identifier.
"""
return f'''
<csw:GetRecordById xmlns:csw="http://www.opengis.net/cat/csw/2.0.2" service="CSW" version="2.0.2" outputSchema="http://www.isotc211.org/2005/gmd">
<csw:Id>{identifier}</csw:Id>
<csw:ElementSetName>full</csw:ElementSetName>
</csw:GetRecordById>
'''
|
def ensure_complete_modality(modality_dict, require_rpc=False):
"""
Ensures that a certain modality (MSI, PAN, SWIR) has all of the required files for computation
through the whole pipeline.
:param modality_dict: Mapping of a certain modality to its image, rpc, and info files.
:type modality_dict: dict
:param require_rpc: Whether or not to consider the rpc file being present as a requirement for
a complete modality.
:type require_rpc: bool
"""
keys = ['image', 'info']
if require_rpc:
keys.append('rpc')
return all(key in modality_dict for key in keys)
|
def _add_workdir(path, **kwargs):
"""Return Dockerfile WORKDIR instruction to set working directory."""
return "WORKDIR {}".format(path)
|
def remove_citations(descr, citations):
"""This method is used to replace and citation in markdown format
with a link leading to the resource
"""
for citation in citations:
descr = descr.replace("(Citation: " + citation['source_name'] + ")","")
return descr
|
def value_for_key(tuple_of_tuples, key):
"""Processes a tuple of 2-element tuples and returns the value
corresponding to the given key. If no value is found, the key is returned.
"""
for t in tuple_of_tuples:
if t[0] == key:
return t[1]
else:
return key
|
def get_label2id(labels):
"""
Get label2id mapping based on labels
Args:
labels: list of labels.
Return:
label2id map
"""
return {v: str(k) for k, v in enumerate(labels)}
|
def productDict(pairsData):
"""
transform the data into Dict with product info
:param pairsData: (companyName,product)
:return: dict={companyName:[product1,...]...}
"""
product_dict = {}
for company_name,product in pairsData:
if company_name not in product_dict:
product_dict[company_name] = []
product_dict[company_name].append(product)
return product_dict
|
def test_depth(array_like):
"""Test object to see how much nested depth it has.
Compatible with arbitrarily nested list, tuple, dict, or numpy.ndarray.
Compare with numpy.ndarray.ndim.
Here we only test array_like[0], which is not thorough.
E.g.
test_depth( [1, [0, [3]]] ) == 1
test_depth( [[[3], 2], 1] ) == 3
"""
try:
# use recursive test to add up the depth, of course
len(array_like)
if len(array_like) == 0:
return 1
if type(array_like) is dict:
array_like_first = list(array_like.values())[0]
return 1 + test_depth(array_like_first)
else:
return 1 + test_depth(array_like[0])
except:
# if len(array_like) causes error, we must be at the bottom
return 0
|
def delta_function(value, bands=''):
"""
Use a delta function to set a specific value. Alternatively you can directly set the
value of a parameter if it is going to be constant. This functionality is useful if
you want to avoid deleting the `DISTRIBUTION` entry for a parameter in your
configuration file.
Args:
value : The value to set for this parameter
Returns:
A list of values with one value for each band in the simulation
"""
return [value] * len(bands.split(','))
|
def linear_intersect(lambda1,lambda2):
""" Given two lambda functions corresponding to linear y=mx+b
Find the point of intersection
x_intersect = (b2-b1)/(m1 - m2)
intersect = (tuple) (x,y) intersect
"""
intersect = (None,None)
# extract b's
b1 = lambda1(0.0)
b2 = lambda2(0.0)
# sample for m's
m1 = lambda1(2.0)-lambda1(1.0)
m2 = lambda2(2.0)-lambda2(1.0)
x_intersect = (b2-b1)/(m1-m2)
y1_intersect = lambda1(x_intersect)
y2_intersect = lambda2(x_intersect)
if abs(y1_intersect - y2_intersect) < 1e-8:
intersect = (x_intersect,y1_intersect)
return intersect
|
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
|
def getInterfaceRecord(networkRecords, mac):
"""
Get the interface record of xen by it's mac.
Arguments:
interfaces -- Interfaces on xen server.
mac -- Mac of the interface.
Returns:
network object on success else None.
"""
interfaces = [rec for rec in networkRecords if networkRecords[rec]['MAC'] == mac]
if len(interfaces) == 0:
return None
else:
return interfaces[0]
|
def makeListofDicts(StateVector):
"""
Just get the data back from the complicated object FFS
"""
data = [] # empty list. Each element will be a dict
for plane in StateVector:
data.append(plane.__dict__)
return data
|
def replace(old, new, names, count=-1):
"""Replace characters that match string."""
return [i.replace(old, new, count) for i in names]
|
def valid_bytes_128_after(valid_bytes_48_after):
"""
Fixture that yields a :class:`~bytes` that is 128 bits and is ordered "greater than" the
result of the :func:`~valid_bytes_128_after` fixture.
"""
return valid_bytes_48_after + b'\0' * 10
|
def attempt(func, *args, **kwargs):
"""Attempt to call a function."""
if "maxattempt" in kwargs:
maxattempt = kwargs.pop("maxattempt")
else:
maxattempt = 100
test = 1
count = 1
while test:
try:
func(*args, **kwargs)
test = 0
except (ValueError, IOError):
if count <= maxattempt:
count += 1
else:
break
return count
|
def str2chksm( s ):
""" str2chksm(s) -> int,int
Keyword arguments:
s -- string with first 14 or 15 characters being IMEI
Description:
If the input string first splitable string has 14 characters then
generate the Check Digit for the IMEI code. Return id,None.
If the input string first splitable string has 15 characters then
verify the Check Digit of the IMEI. Return id,checksum.
Otherwise, return None, None as an error.
The input line can be anything as long as the first part has
14 or 15 character IMEI. The rest of the line is ignored.
"""
if s.__len__() > 14:
cd = int(s[14])
elif s.__len__() > 13:
cd = None
else:
return None,None
i = 13; mul = 2; sum = 0
while i >= 0:
n = int(s[i]) * mul
if n >= 10:
sum += ((n / 10) + (n % 10))
else:
sum += n
mul ^= 3
i -= 1
id = sum % 10
if id > 0:
id = 10 - id
return id,cd
|
def difference(
f_inverted_index,
s_inverted_index
) -> list:
"""
Operator "OR"
:type f_inverted_index: list
:type s_inverted_index: list
"""
if (not f_inverted_index) and (not s_inverted_index):
return []
if not f_inverted_index:
return []
if not s_inverted_index:
return f_inverted_index
differences = f_inverted_index[:]
for index in s_inverted_index:
if index in differences:
differences.remove(index)
return differences
|
def condense_border_none(css: str) -> str:
"""Condense border:none; to border:0;.
"""
return css.replace('border:none;', 'border:0;')
|
def check_substring(string, substring, normalize = False):
""" Normalize compares lower-case """
if normalize:
return substring.lower() in string.lower()
else:
return substring in string
|
def RPL_TRACELOG(sender, receipient, message):
""" Reply Code 261 """
return "<" + sender + ">: " + message
|
def count_ports(port_mask):
"""Return the number of ports of given portmask"""
ports_in_binary = format(int(port_mask, 16), 'b')
nof_ports = ports_in_binary.count('1')
return nof_ports
|
def vec_mul(a, b):
"""
Multiply two vectors or a vector and a scalar element-wise
Parameters
----------
a: list[]
A vector of scalar values
b: list[]
A vector of scalar values or a scalar value.
Returns
-------
list[]
A vector product of a and b
"""
if type(b) is list:
assert len(a) == len(b)
# return [a[n] * b[n] for n in range(len(a))]
return [*map(lambda ai, bi: ai * bi, a, b)]
else:
# return [a[n] * b for n in range(len(a))]
return [*map(lambda ai: ai * b, a)]
|
def _extractFileData(file, dicomMetadata, archivePath=None):
"""
Extract the useful data to be stored in the `item['dicom']['files']`.
In this way it become simpler to sort them and store them.
"""
result = {
'dicom': {
'SeriesNumber': dicomMetadata.get('SeriesNumber'),
'InstanceNumber': dicomMetadata.get('InstanceNumber'),
'SliceLocation': dicomMetadata.get('SliceLocation')
},
'name': file['name'],
'_id': file['_id']
}
if archivePath:
result['archivePath'] = archivePath
return result
|
def pythagorean_triples(n):
"""
Return list of pythagorean triples as non-descending tuples
of ints from 1 to n.
Assume n is positive.
@param int n: upper bound of pythagorean triples
>>> pythagorean_triples(5)
[(3, 4, 5)]
"""
# helper to check whether a triple is pythagorean and non_descending
# you could also use a lambda instead of this nested function def.
res = []
for i in range(1, n + 1):
for j in range(1, n + 1):
for k in range(1, n + 1):
if i <= j <= k and (i ** 2 + j ** 2) == k ** 2:
res.append((i, j, k))
return res
|
def gen_HttpApiLog(source, action, target):
"""Generate a Http Api Log object from action and target."""
httpapilog = {
"@type": "HttpApiLog",
"Subject": source,
"Predicate": action,
"Object": target
}
return httpapilog
|
def check_nomenclature(glycan):
"""checks whether the proposed glycan has the correct nomenclature for glycowork\n
| Arguments:
| :-
| glycan (string): glycan in IUPAC-condensed format
"""
if not isinstance(glycan, str):
print("You need to format your glycan sequences as strings.")
return
if '?' in glycan:
print("You're likely using ? somewhere, to indicate linkage uncertainty. Glycowork uses 'z' to indicate linkage uncertainty")
return
if '=' in glycan:
print("Could it be that you're using WURCS? Please convert to IUPACcondensed for using glycowork.")
if 'RES' in glycan:
print("Could it be that you're using GlycoCT? Please convert to IUPACcondensed for using glycowork.")
return True
|
def cm2inch(*tupl):
"""
Convert a value or tuple of values from cm
to inches.
Source: https://stackoverflow.com/a/22787457
Input
---
tupl : float, int or tuple of arbitrary size
Values to convert
Returns
---
Converted values in inches.
"""
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i / inch for i in tupl[0])
else:
return tuple(i / inch for i in tupl)
|
def extract_terms(document):
""" document = "545,32 8:1 18:2"
extract_terms(document) => returns [[8, 1], [18, 2]]
"""
terms = [item.split(':') for item in document.split() if item.find(':') >= 0]
terms = [[int(term), int(frequency)] for term, frequency in terms]
return terms
|
def extract_nsynth_volume(filename):
"""function to extract volume level from nsynth dataset base filenames
keyboard_acoustic_000-059-075.wav -> 75"""
return filename[-6:-4]
|
def strip_unneeded(text: str) -> str:
"""Get rid of unneeded characters in text and return it.
"""
text = text.strip().replace("+", "").replace(",", "").replace(" ", "")
if not text:
text = "0"
return text
|
def azero(seq):
"""Return True if all numbers in 'seq' are 0s."""
return not filter(None, seq)
|
def define_num_LM_eq(lc_list):
"""
define_num_LM_eq
Define the number of equations needed to define the boundary boundary conditions
Args:
lc_list(): list of all the defined contraints
Returns:
num_LM_eq(int): number of new equations needed to define the boundary boundary conditions
Examples:
num_LM_eq = lagrangeconstraints.define_num_LM_eq(lc_list)
Notes:
"""
num_LM_eq = 0
# Compute the number of equations
for lc in lc_list:
num_LM_eq += lc.get_n_eq()
return num_LM_eq
|
def hashable(obj):
"""
Return whether the `obj` is hashable.
Parameters
----------
obj : object
The object to check.
Returns
-------
bool
"""
try:
hash(obj)
except TypeError:
return False
return True
|
def _prioritify(line_of_css, css_props_text_as_list):
"""Return args priority, priority is integer and smaller means higher."""
sorted_css_properties, groups_by_alphabetic_order = css_props_text_as_list
priority_integer, group_integer = 9999, 0
for css_property in sorted_css_properties:
if css_property.lower() == line_of_css.split(":")[0].lower().strip():
priority_integer = sorted_css_properties.index(css_property)
group_integer = groups_by_alphabetic_order[priority_integer]
break
return priority_integer, group_integer
|
def try_get_dn_part(subject, oid=None):
"""
Tries to extracts the OID from the X500 name.
:param subject:
:param oid:
:return:
"""
try:
if subject is None:
return None
if oid is None:
return None
for sub in subject:
if oid is not None and sub.oid == oid:
return sub.value
except:
pass
return None
|
def _find_lines_after_prefix(text, prefix, num_lines):
"""Searches |text| for a line which starts with |prefix|.
Args:
text: String to search in.
prefix: Prefix to search for.
num_lines: Number of lines, starting with line with prefix, to return.
Returns:
Matched lines. Returns None otherwise.
"""
lines = text.split('\n')
for i, line in enumerate(lines):
if line.startswith(prefix):
return lines[i:i + num_lines]
return None
|
def get_classification(average) -> str:
"""Return a string containing the classification of the student
according to the Programme Specification."""
if average >= 70:
return "First Class Honours"
if average >= 60:
return "Second Class Honours [Upper Division]"
if average >= 50:
return "Second Class Honours [Lower Division]"
if average >= 40:
return "Third Class Honours"
return "Fail"
|
def convertnontitled(nontitledparagraphs):
"""
Convert non-titled bullets to strings
"""
string_list = []
for paragraphs in nontitledparagraphs:
lines = []
for para in paragraphs:
lines.extend(para)
string_list.append(b' '.join(lines))
return string_list
|
def getField(row, field, sheet="submissions"):
"""
Each row is an array, and this function lets you get a specific value from that row
This function is used because row size varies across time and certain rows may not have
values at certain positions
Also this function lets you use human readable keynames like timestamp instead of row[0]
Also, score's are pre-converted from strings into integers
"""
if sheet == "submissions":
fields = ["timestamp", "status", "score", "name", "proof", "tank", "gamemode", "specialSubmission", "extraDetails"]
elif sheet == "has_calculations":
fields = ["score", "name", "proof", "tank", "gamemode", "isLegacy"]
elif sheet == "airtable":
fields = ["score", "name", "tankImage", "tank", "proof", "gamemode", "build"]
else:
fields = []
fieldIndices = { field: i for i,field in enumerate(fields) }
assert field in fieldIndices
index = fieldIndices[field]
assert len(row) > index
# return an integer for the score
if field == "score":
score = row[index].strip()
assert score != ""
# some scores on Oldest don't have numerical forms, they're just strings like 5.45mil, so account for those here
if score.endswith("mil"):
score = float( score.replace("mil", "").strip() ) * 1_000_000
elif score.endswith("k"):
score = float( score.replace("k", "").strip() ) * 1000
# some scores are decimals for whatever reason, so parse them as floats first and then ints
return int(float(score))
# return a boolean for isLegacy
elif field == "isLegacy":
return row[index].strip().lower() == "k"
# airtable proof's are in a weird format like this: "imageName (imageLink)"
elif field == "proof" and sheet == "airtable":
pass
# just return a string for all other fields with surrounding whitespace removed
return row[index].strip()
|
def no_disp_settings_page(on=0):
"""Esconder a Pagina Configuracoes de Video
DESCRIPTION
Esta opcao esconde a pagina "Configuracoes" do controle de propriedades
de video.
COMPATIBILITY
Todos.
MODIFIED VALUES
NoDispSettingsPage : dword : 00000000 = Desabilitado;
00000001 = Habilitada restricao.
"""
if on:
return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\
CurrentVersion\\Policies\\System]
"NoDispSettingsPage"=dword:00000001'''
else:
return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\
CurrentVersion\\Policies\\System]
"NoDispSettingsPage"=dword:00000000'''
|
def evaluate_groups_score(groups):
"""Evaluate score of groups."""
return sum([x * y for x, y in zip(groups.keys(), groups.values())])
|
def sort_defects_on_status(defects):
"""Sort the defects on status."""
new_bucket = ["New", "To Do"]
analyzing_bucket = ["Analyzing", "To_Analyzing"]
solving_bucket = ["Solving", "Analyzing_done", "To_Solving"]
verifying_bucket = ["Verifying", "To_Reviewing", "Reviewing", "To_Verifying"]
closing_bucket = ["Closing", "Reviewing_Done", "Verifying_Done"]
postponed_bucket = ["Postponed", "Blocked"]
buckets = [
new_bucket,
analyzing_bucket,
solving_bucket,
verifying_bucket,
closing_bucket,
postponed_bucket,
]
status_buckets = {
"New": [],
"Analyzing": [],
"Solving": [],
"Verifying": [],
"Closing": [],
"Postponed": [],
}
for issue in defects:
for bucket in buckets:
if issue.status in bucket:
status_buckets[bucket[0]].append(issue)
return status_buckets
|
def Read(filename):
"""Read entire contents of file with name 'filename'."""
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
|
def is_cs_pubkey(provided):
"""
Check if the value is a proper WAMP-cryptosign public key.
:param provided: The value to check.
:return: True iff the value is of correct type.
"""
return type(provided) == bytes and len(provided) == 32
|
def make_url_of_flair_text(string):
"""Takes string that MAY not start with http://, if it doesn't it prepends that elsewise returns"""
if string is None:
return '' # If empty or None return empty string
if string.startswith('https://'):
return string
else:
string = "https://" + string
return string
|
def _extract_meta_instance_id(form):
"""Takes form json (as returned by xml2json)"""
if form.get('Meta'):
# bhoma, 0.9 commcare
meta = form['Meta']
elif form.get('meta'):
# commcare 1.0
meta = form['meta']
else:
return None
if meta.get('uid'):
# bhoma
return meta['uid']
elif meta.get('instanceID'):
# commcare 0.9, commcare 1.0
return meta['instanceID']
else:
return None
|
def split_string(str, limit, sep="\n"):
"""
Split string
Parameters
----------
str : str
string to split
limit : int
string length limit
sep : str
separator
default: "\n"
Returns
-------
True if blockquotes are balanced, False otherwise
"""
limit = int(limit)
words = str.split(sep)
if max(map(len, words)) > limit:
# limit is too small, return original string
return str
res, part, others = [], words[0], words[1:]
for word in others:
if (len(sep) + len(word)) > (limit - len(part)):
res.append(part)
part = word
else:
part += sep + word
if part:
res.append(part)
return res
|
def solution(board, boardSize):
"""Checks to see if the board is a solution.
Args:
board (int[]) : The representation of the chess board.
boardSize (int) : The size of the n*n chess board.
"""
# If there is no board, no solution.
if not board:
return False
"""
The set() function removes duplicates (turns a list into a set).
For a board to be a solution, there needs to be exactly one Queen on every column.
So if the length of the board is the same as the length of the set of the board, that means all elements are unique.
And if all elements are unique, then there must be exactly one Queen on every row.
"""
if len(board) != len(set(board)):
return False
# list declarations
diagonal1 = []
diagonal2 = []
# The hills & dales of each Queen are calculated.
for i in range(0, boardSize):
diagonal1.append(board[i] + i)
diagonal2.append(board[i] - i)
# The diagonals are checked the same way that the rows are checked
if len(diagonal1) != len(set(diagonal1)) or len(diagonal2) != len(set(diagonal2)):
return False
# The solution works if it passed all the previous requirements
return True
|
def _matches_app_id(app_id, pkg_info):
"""
:param app_id: the application id
:type app_id: str
:param pkg_info: the package description
:type pkg_info: dict
:returns: True if the app id is not defined or the package matches that app
id; False otherwise
:rtype: bool
"""
return app_id is None or app_id in pkg_info.get('apps')
|
def distance(x1, y1, x2, y2):
"""
This calculates the distance between (x1, y1) and (x2, y2)
"""
return ((x1 - y1) ** 2 + (x2 - y2) ** 2) ** 0.5
|
def calculateReturn(arrayOfReturns):
"""Function for calculation of returns function to make code more readable
Args:
arrayOfReturns ([Float]): Array of retuns from specific month
Returns:
Float : Returns sum devided by 5
"""
return round((sum(arrayOfReturns)/5),2)
|
def stem(filename):
"""Get the stem of a filename"""
if '.' in filename:
return ''.join(filename.split(".")[:-1])
return filename
|
def validate(var,vals,types):
"""This function checks if an input is valid by ensuring that does not have
a restricted value and that it is of a specified type."""
return bool(var not in vals and isinstance(var,types))
|
def my_momentum(M, v):
""" Calculate total momentum.
Args:
mass (float): particle mass
vel (np.array): particle velocities, shape (natom, ndim)
Return:
float: total momentum
"""
return sum(M*v)
|
def left_join(phrases: tuple) -> str:
"""
Join strings and replace "right" to "left"
"""
return ','.join(phrases).replace("right", "left")
|
def AT(y,x):
"""
Returns control codes to set the coordinates of the text cursor.
Use this in a ``PRINT`` or ``SET`` command. Example:
``PRINT("normal",AT(5,15),"row 5 column 15",AT(14,4),"row 14 column 4")``
Args:
- y - integer - the y coordinate to move to (0-23)
- x - integer - the x coordinate to move to (0-31)
"""
return "".join((chr(22),chr(int(y)),chr(int(x))))
|
def is_number(arg: str):
"""
string is number like 1.0, -1
>>> is_number('1.5')
"""
if len(arg) > 1 and arg[0] == '0':
return False
if arg.startswith('-'):
arg = arg[1:]
if arg.isdigit():
return True
if arg.find('.') > 0:
args = arg.split('.')
if len(args) > 0:
if args[0].isdigit() and args[1].isdigit():
return True
return False
|
def filt_dct(dct):
"""Filter None values from dict."""
return dict((k,v) for k,v in dct.items() \
if v is not None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.