content
stringlengths 42
6.51k
|
|---|
def checkSum(intList, intVal):
"""checks if the sum of elements in intList equals intVal"""
s = 0
for x in intList:
s += x
return (s == intVal)
|
def hexint_parser(arg: str) -> int:
"""Parse a hexadecimal starting with 0x into an integer."""
if not arg.startswith("0x"):
raise Exception("Received non-hex integer where hex expected")
return int(arg, 16)
|
def kreis_auf(dl, dr):
"""Kreis gegen UZS, auf Grundlinie liegend"""
# (Startpunkt ist nicht definiert)
m = [(0.25, 0), (0.5, 0.25), (0.5, 0.5), # [Q1], [Q2], (Q3/R0)
(0.5, 0.75), (0.25, 1), (0, 1), # [R1], [R2], (R3/S0)
(-0.25, 1), (-0.5, 0.75), (-0.5, 0.5), # [S1], [S2], (S3/T0)
(-0.5, 0.25), (-0.25, 0), (0, 0)] # [T1], [T2], (T3/U0)
return m
|
def ras_to_lps(point):
"""
This function..
:param point:
:return:
"""
surf_x, surf_y, surf_z = point
point = (-surf_x, -surf_y, surf_z) # must flip y axis to convert from VTK to ITK
return point
|
def get_chrom_size(chrom):
"""These sizes are based on the catalog for Homosapiens in stdpopsim,
but they're exactly the same as the one given by the VCF files,
so I use them for both real and simulated data"""
chrom = str(chrom)
length = {
"1": 249250621,
"2": 243199373,
"3": 198022430,
"4": 191154276,
"5": 180915260,
"6": 171115067,
"7": 159138663,
"8": 146364022,
"9": 141213431,
"10": 135534747,
"11": 135006516,
"12": 133851895,
"13": 115169878,
"14": 107349540,
"15": 102531392,
"16": 90354753,
"17": 81195210,
"18": 78077248,
"19": 59128983,
"20": 63025520,
"21": 48129895,
"22": 51304566,
}
return length[chrom]
|
def unroll(current_key, output_map, entry, keys_to_append=None):
"""
:param current_key:
:param output_map:
:param entry:
:param keys_to_append:
:return:
"""
def unroll_dict(current_key, output_map, entry, keys_to_append=None):
for key, value in entry.items():
unroll(".".join([current_key, key]).lstrip("."),
output_map,
value,
keys_to_append=keys_to_append)
def unroll_list(current_key, output_map, entry, keys_to_append=None):
for item in entry:
unroll(current_key,
output_map,
item,
keys_to_append=keys_to_append)
if isinstance(entry, dict):
unroll_dict(current_key, output_map, entry, keys_to_append=keys_to_append)
elif isinstance(entry, list):
unroll_list(current_key, output_map, entry, keys_to_append=keys_to_append)
else: # not iterable
if not keys_to_append or current_key in keys_to_append:
output_map.append((current_key, entry))
return output_map
|
def median(li):
"""Median of elements - shaonutil.stats.median(list of numbers)"""
n = len(li)
li.sort()
if n % 2 == 0:
median1 = li[n//2]
median2 = li[n//2 - 1]
median = (median1 + median2)/2
else:
median = li[n//2]
return median
|
def extract_csv_links(text):
"""Get a list of csv links from the download link response text"""
links = text.replace("\r", "").split("\n")
links.remove("")
return links
|
def dict_compare_keys(d1, d2, key_path=''):
"""
Compare two dicts recursively and see if dict1 has any keys that dict2 does not
Returns: list of key paths
"""
res = []
if not d1:
return res
if not isinstance(d1, dict):
return res
for k in d1:
if k not in d2:
missing_key_path = f'{key_path}->{k}'
res.append(missing_key_path)
else:
if isinstance(d1[k], dict):
key_path1 = f'{key_path}->{k}'
res1 = dict_compare_keys(d1[k], d2[k], key_path1)
res = res + res1
elif isinstance(d1[k], list):
key_path1 = f'{key_path}->{k}[0]'
dv1 = d1[k][0] if len(d1[k]) > 0 else None
dv2 = d2[k][0] if len(d2[k]) > 0 else None
res1 = dict_compare_keys(dv1, dv2, key_path1)
res = res + res1
return res
|
def has_lines(string: str, count: int) -> bool:
"""Return True if `string` has at least `count` lines."""
# Benchmarks show this is significantly faster than using str.count("\n") or a for loop & break.
split = string.split("\n", count - 1)
# Make sure the last part isn't empty, which would happen if there was a final newline.
return bool(split[-1]) and len(split) == count
|
def find_weight(word):
"""
input is bit vector
output is Hamming weight
"""
i = 0
for letter in word:
if letter != 0:
i += 1
return i
|
def join_host_strings(user, host, port=None):
"""
Turns user/host/port strings into ``user@host:port`` combined string.
This function is not responsible for handling missing user/port strings;
for that, see the ``normalize`` function.
If ``port`` is omitted, the returned string will be of the form
``user@host``.
"""
port_string = ''
if port:
port_string = ":%s" % port
return "%s@%s%s" % (user, host, port_string)
|
def latest_no_overlap(jobs, n):
"""
Find the job before the nth job which does not
overlap with the nth job
Return -1 if no such job found
"""
for j in range(n - 1, -1, -1):
if jobs[j][1] <= jobs[n][0]:
return j;
return -1;
|
def npv_f(rate, cashflows):
"""Objective : estimate NPV value
rate : discount rate
cashflows: cashflows
e.g.
>>>npv_f(0.1, [-100.0, 60.0, 60.0, 60.0])
49.211119459053322
"""
total = 0.0
for i, cashflow in enumerate(cashflows):
total += cashflow / (1 + rate)**i
return total
|
def unique_match_from_list(list):
"""
Check the list for a potential pattern match
@param list : a list of potential matching groups
@rtype : return the unique value that matched, or nothing if nothing matched
"""
result = ''
for item in list:
if item != None:
result = str(item)
return result
|
def _check_possible_tree_participation(num_participation: int,
min_separation: int, start: int,
end: int, steps: int) -> bool:
"""Check if participation is possible with `min_separation` in `steps`.
This function checks if it is possible for a sample to appear
`num_participation` in `steps`, assuming there are at least `min_separation`
nodes between the appearance of the same sample in the streaming data (leaf
nodes in tree aggregation). The first appearance of the sample is after
`start` steps, and the sample won't appear in the `end` steps after the given
`steps`.
Args:
num_participation: The number of times a sample will appear.
min_separation: The minimum number of nodes between two appearance of a
sample. If a sample appears in consecutive x, y steps in a streaming
setting, then `min_separation=y-x-1`.
start: The first appearance of the sample is after `start` steps.
end: The sample won't appear in the `end` steps after the given `steps`.
steps: Total number of steps (leaf nodes in tree aggregation).
Returns:
True if a sample can appear `num_participation` with given conditions.
"""
return start + (min_separation + 1) * num_participation <= steps + end
|
def find_empty_col(slots):
"""Find empty col in slots.
Args:
slots (dict): slots for answer
Returns:
int: available col for insert
6 --> no available col
others --> col index
"""
index = 0
for i in list(zip(*list(slots.values())[::])):
if sum([1 for j in list(i) if j]) == 0:
return index
index += 1
return 6
|
def k2f(k: float, r: int = 2) -> float:
"""Kelvin to Fahrenheit."""
return round(((k - 273.15) * (9 / 5)) + 32, r)
|
def utf8safe(s):
"""Remove characters invalid in UTF-8."""
return s.decode('utf-8', errors='replace').encode('utf-8')
|
def split_suffix(symbol):
"""
Splits a symbol such as `__gttf2@GCC_3.0` into a triple representing its
function name (__gttf2), version name (GCC_3.0), and version number (300).
The version number acts as a priority. Since earlier versions are more
accessible and are likely to be used more, the lower the number is, the higher
its priortiy. A symbol that has a '@@' instead of '@' has been designated by
the linker as the default symbol, and is awarded a priority of -1.
"""
if '@' not in symbol:
return None
data = [i for i in filter(lambda s: s, symbol.split('@'))]
_, version = data[-1].split('_')
version = version.replace('.', '')
priority = -1 if '@@' in symbol else int(version + '0' *
(3 - len(version)))
return data[0], data[1], priority
|
def cleanse(s):
"""
Clean a string
:s: (str) the string to clean
"""
return s.strip('"').strip("'").strip('\n').strip(' ')
|
def rotate_right(x, y):
"""
Right rotates a list x by the number of steps specified
in y.
Examples
========
>>> from sympy.utilities.iterables import rotate_right
>>> a = [0, 1, 2]
>>> rotate_right(a, 1)
[2, 0, 1]
"""
if len(x) == 0:
return []
y = len(x) - y % len(x)
return x[y:] + x[:y]
|
def containsDuplicateD(nums):
"""
:type nums: List[int]
:rtype: bool
"""
nums.sort()
for i in range(1,len(nums)):
if nums[i] == nums[i-1]:
return True
return False
|
def remove_none_func_lines(lines):
"""Doc."""
new_lines = []
for line in lines:
if line.startswith('#') and '.h' in line:
new_lines.append(line)
elif line.endswith(');') or line.endswith('{') or \
line.endswith('}') or line.endswith('};'):
if '(' in line and ')' in line and '::' not in line:
new_lines.append(line)
return new_lines
|
def __newobj__(cls, *args):
"""A compatibility pickler function.
This function is not part of the public Atom api.
"""
return cls.__new__(cls, *args)
|
def strictly_decreasing(l):
"""Check whether list l has strictly deacreasing values"""
return all(x > y for x, y in zip(l, l[1:]))
|
def get_datatype(value) -> list:
"""
Determine the data type for an object and set the type if possible. A string such as "1.23"
will result in a type "float" and "2" will result in type "int".
Args:
value: object to be typed
Returns:
list [type, value] of data type as a code and object with that type
"""
if type(value) is str:
# string integer
if value.isdigit():
attributetype = "int"
value = int(value)
return [attributetype, value]
else:
try:
value = float(value)
attributetype = "float"
return [attributetype, value]
except ValueError:
pass
attributetype = "str"
elif type(value) is int:
attributetype = "int"
value = int(value)
elif type(value) is float:
attributetype = "float"
value = float(value)
# more work here
else:
attributetype = "str"
return [attributetype, value]
|
def logical_and(a, b, unsafe=False):
"""logical and without the bloat of numpy.logical_and
could be substituted with numpy.logical_and
!important: if unsafe is set to True, a and b are not checked. This improves speed but is risky.
expects integers [0,1] or bools
"""
if not unsafe:
sum = a*1+b*1
if sum > 2 or sum < 0:
raise Exception(
"The parameters for logical_and have to be boolean or integer in range [0,1]. got a: " + str(a) + ", b: " + str(b))
return a & b
|
def _closest(e, l):
""" Finds the element and its index, closest to e, in l """
closest = None
closest_idx = None
min_diff = float('inf')
for i, j in enumerate(l):
diff = abs(j - e)
if diff < min_diff:
min_diff = abs(j - e)
closest = j
closest_idx = i
return closest, closest_idx
|
def parse_keys(keys):
"""
Parse keys for complex __getitem__ and __setitem__
Parameters
----------
keys : string | tuple
key or key and slice to extract
Returns
-------
key : string
key to extract
key_slice : slice | tuple
Slice or tuple of slices of key to extract
"""
if isinstance(keys, tuple):
key = keys[0]
key_slice = keys[1:]
else:
key = keys
key_slice = (slice(None),)
return key, key_slice
|
def binary_sum(S,start,stop):
"""Return the sum of the numbers in implicitslice S[start:stop]."""
if start >= stop:
return 0
elif start == stop - 1:
return S[start]
else:
mid = (start + stop) // 2
return binary_sum(S,start, mid) + binary_sum(S, mid, stop)
|
def distanceV(vector1, vector2):
""" Returns vertical distance between 2 vectors. """
return vector1[1] - vector2[1]
|
def IsTryJobResultAtRevisionValid(result, revision):
"""Determines whether a try job's results are sufficient to be used.
Args:
result (dict): A dict expected to be in the format
{
'report': {
'result': {
'revision': (dict)
}
}
}
revision (str): The revision to ensure is in the result dict.
"""
return result and revision in result.get('report', {}).get('result', {})
|
def get_number_of_soil_levels(description):
"""
This function identifies the number of soil levels. If there is no soil level information then this will return '0'.
Else a default value of '5' is given.
:param description: The [model_component"], ["atmos"] and ["description] from a [dict] of model information returned
by get_latest_models
:return: [int] 0 or 5
"""
if description == "none":
return "0"
else:
return "5"
|
def is_chinese_char(char):
"""Checks whether CP is the codepoint of a CJK character."""
cp = ord(char)
if ((0x4E00 <= cp <= 0x9FFF) or #
(0x3400 <= cp <= 0x4DBF) or #
(0x20000 <= cp <= 0x2A6DF) or #
(0x2A700 <= cp <= 0x2B73F) or #
(0x2B740 <= cp <= 0x2B81F) or #
(0x2B820 <= cp <= 0x2CEAF) or
(0xF900 <= cp <= 0xFAFF) or #
(0x2F800 <= cp <= 0x2FA1F)): #
return True
return False
|
def sequences_to_bytes(sequence, value_op=None):
"""
Transforms result of rle.get_sequences() into a byte array.
Encoding:
- repeats start with a byte whose MSB is 1 and the lower bits are the count,
followed by the value to repeat.
- sequences start with a byte whose MSB is 0 and the lower bits are the sequence length,
followed by that number of bytes of the sequence.
"""
result = []
for seq in sequence:
if seq[0] == "R":
count = seq[1]
val = seq[2]
while count != 0:
cur_reps = min(128, count)
result.append((0x80 | (cur_reps - 1)).to_bytes(1, "little"))
store_val = val.to_bytes(1, "little")
if value_op:
store_val = value_op(store_val)
result.append(store_val)
count -= cur_reps
else:
part_sequence = seq[1]
seq_len = len(part_sequence)
seq_i = 0
while seq_len != 0:
cur_len = min(128, seq_len)
result.append((cur_len - 1).to_bytes(1, "little"))
for seq_val in part_sequence[seq_i : seq_i + cur_len]:
store_val = seq_val.to_bytes(1, "little")
if value_op:
store_val = value_op(store_val)
result.append(store_val)
seq_i += cur_len
seq_len -= cur_len
return b''.join(result)
|
def first_or_default(iterable):
"""
Gets the first or default (= None) value from an iterable
:param iterable: Iterable instance
:return: First item or None
"""
return next(iter(iterable or []), None)
|
def _vectorise(value):
""" Converts singletons to length 1 lists """
if not isinstance(value, list):
return [value]
return value
|
def get_patk(gt_label, label_list, K):
"""Calculate Precision@K
Args:
gt_label: the ground truth label for query
label_list: labels for the retrieved ranked-list
K: top K in the ranked-list for computing precision.
Set to len(label_list) if compute P@N
Returns:
P@K score
"""
patk = 0
for i, pred_label in enumerate(label_list[:K]):
if gt_label == pred_label:
patk += 1
patk /= K
return patk
|
def WtoBTUhr(qW):
"""
Convertie la puissance en W vers btu/hr
Conversion: 1 W = 3.412142 BTU/hr
:param qW: Puissance [W]
:return qBTUhr: Puissance [btu/hr]
"""
qBTUhr = qW * 3.412142
return qBTUhr
|
def createCarPositionLog(car, currentTime):
"""
:param car: dictionary with spesific car parameters at time Current time
:param currentTime: the time wanted to be logged
:return: dictionary to add to car logger
"""
return {'position': car['position'], 'time': currentTime, 'target': car['target'], 'targetId': car['targetId']}
|
def jsearch(json,sfld,search,rfld):
""" return a list of values from a column based on a search """
lst=[]
for j in json:
if j[sfld]==search or search == '*':
lst.append(j[rfld].strip())
return lst
|
def uniquify_sequence(sequence):
"""Uniqify sequence.
Makes sure items in the given sequence are unique, having the original
order preserved.
:param iterable sequence:
:return list:
"""
seen = set()
seen_add = seen.add
return [x for x in sequence if x not in seen and not seen_add(x)]
|
def midpoint(point_1, point_2):
"""Middle point between two points"""
x1, y1 = point_1
x2, y2 = point_2
return ((x1 + x2) / 2, (y1 + y2) / 2)
|
def convert_patch_url_to_download_url(patch_url, patch_id,
project_name, patchset_number,
file_name):
""" Convert gerrit patch URL to URl from where
we can download the patch
:param patch_url: URL in string format
:returns: download_patch_url in a string format
"""
if 'c/' in patch_url:
url_first_part = patch_url.split('c/')[0]
else:
raise Exception("Doesn't looks like a proper gerrit patch URL: "
"we split the url on 'c/'")
second_part_url = (
f"changes/{project_name}~{patch_id}/revisions/"
f"{patchset_number}/files/{file_name}/download")
return url_first_part + second_part_url
|
def equivalent(list1, list2):
"""Ensures that two lists are equivalent, i.e., contain the same items."""
if len(list1) != len(list2):
return False
set1 = set(list1)
for item in list2:
if item not in set1:
return False
return True
|
def str2intlist(s, repeats_if_single=None):
"""Parse a config's "1,2,3"-style string into a list of ints.
Args:
s: The string to be parsed, or possibly already an int.
repeats_if_single: If s is already an int or is a single element list,
repeat it this many times to create the list.
Returns:
A list of integers based on `s`.
"""
if isinstance(s, int):
result = [s]
else:
result = [int(i.strip()) if i != "None" else None
for i in s.split(",")]
if repeats_if_single is not None and len(result) == 1:
result *= repeats_if_single
return result
|
def get_continious(objects_l):
""" just to fix the list of list of list to finally make it one list to keep track of images and qs per timestep """
# print(len(objects_l))
fixed_objs = []
for obj in objects_l:
# print(img)
if obj:
# print("img = ", img)
for _obj in obj:
try:
if _obj.any(): # for images
# print("_obj = ", _obj)
fixed_objs.append(_obj)
except:
if obj:
fixed_objs.append(_obj)
# pass
# print(len(fixed_objs))
return fixed_objs
|
def clamp(value, minimum=0, maximum=None):
"""Set the value to within a fixed range."""
if maximum:
value = min(maximum, value)
return max(value, minimum)
|
def get_deep(instance, path):
"""
Do NOT use this helper in your code.
It's extremely primitive and wouldn't work properly with most Mappings.
"""
path = path.split(".")
for path_segment in path:
if path_segment not in instance:
return None
instance = instance[path_segment]
# instance is empty ex: tickets: {}
if not instance:
return {}
return instance
|
def get_nature_avsent(inputdict):
""" Calculate nature of average sentiment score per user
Args:
inputdict (:obj:`dict`): dict of user and average sentiment compound score
Returns:
:obj:`outputdict`: dict of user and nature of average sentiment compound score
"""
outputdict = {}
for k, v in inputdict.items():
if v >= 0.05:
outputdict[k] = 'positive'
elif v <= -0.05:
outputdict[k] = 'negative'
else:
outputdict[k] = 'neutral'
return outputdict
|
def headers_to_dict(headers):
"""
Converts a sequence of (name, value) tuples into a dict where if
a given name occurs more than once its value in the dict will be
a list of values.
"""
hdrs = {}
for h, v in headers:
h = h.lower()
if h in hdrs:
if isinstance(hdrs[h], list):
hdrs[h].append(v)
else:
hdrs[h] = [hdrs[h], v]
else:
hdrs[h] = v
return hdrs
|
def clsInd2Name(lbls, ind):
"""
Converts a cls ind to string name
"""
if ind>=0 and ind<len(lbls):
return lbls[ind]
else:
raise ValueError('unknown class')
|
def skip_whitespace(s, pos):
""" eats white space
"""
while s[pos] == " ":
pos += 1
return pos
|
def build_plot_values(gdpinfo, gdpdata):
"""
Inputs:
gdpinfo - GDP data information dictionary
gdpdata - A single country's GDP stored in a dictionary whose
keys are strings indicating a year and whose values
are strings indicating the country's corresponding GDP
for that year.
Output:
Returns a list of tuples of the form (year, GDP) for the years
between "min_year" and "max_year", inclusive, from gdpinfo that
exist in gdpdata. The year will be an integer and the GDP will
be a float.
"""
table = []
gdpdat_v2 = {}
for k, v in gdpdata.items():
try:
gdpdat_v2[int(k)] = float(v)
except ValueError:
pass
min_max = [year for year in range(gdpinfo['min_year'], gdpinfo['max_year'] + 1)]
for key in min_max:
if key in gdpdat_v2:
table.append((key, gdpdat_v2[key]))
return table
|
def contain_all_elements(lst, other_lst):
""" checking whether the second contains a list of all the elements of the first
:param lst: first list
:param other_lst: second list
:return: check result
"""
diff = set(other_lst)
diff -= frozenset(lst)
return not len(diff)
|
def remove_suffix(s, suffix):
""" Removes string suffix from end of string s. """
return s[:len(s)-len(suffix)] if s.endswith(suffix) else s
|
def gassmann_update_rho(Rho_sat, Rho_f1, Rho_f2):
"""
Update density due to change in pore fluids.
"""
Rho_sat2 = Rho_sat + (Rho_f2 - Rho_f1)
return Rho_sat2
|
def default_if_none(value, default=None):
"""Return default if a value is None."""
if value is None:
return default
return value
|
def is_triangular(k):
"""
k, a positive integer
returns True if k is triangular and False if not
"""
num = 1
triangularNo = 0
while triangularNo < k:
triangularNo += num
num += 1
if triangularNo == k:
return True
return False
|
def factorial(n):
""" berechnet n! (n-Fakultaet) fuer gegebenes n
"""
fak = 1
for k in range(2,n+1):
fak = fak * k
return(fak)
|
def atom(gplus_id, page_id=None):
"""Return an Atom-format feed for the given G+ id, possibly from cache."""
if len(gplus_id) != 21:
return 'Invalid G+ user ID (must be exactly 21 digits).', 404 # Not Found
if page_id and len(page_id) != 21:
return 'Invalid G+ page ID (must be exactly 21 digits).', 404 # Not Found
# Google+ is no longer publicly available for consumers.
return 'Google+ was sunset for consumer users in April 2019. This feed is no longer available.', 410 # Gone
##### CODE BELOW FOR HISTORICAL PURPOSES ONLY #####
cache_key = ATOM_CACHE_KEY_TEMPLATE % gplus_id
if page_id:
cache_key = '%s-%s' % (cache_key, page_id)
response = Cache.get(cache_key) # A frozen Response object
if response is None:
try:
response = generate_atom(gplus_id, page_id)
except oauth2.UnavailableException as e:
app.logger.info("Feed request failed - %r", e)
flask.abort(e.status)
response.add_etag()
response.freeze()
Cache.set(cache_key, response, time=Config.getint('cache', 'stream-expire'))
return response.make_conditional(flask.request)
|
def reset_moved_bools(inp):
"""Reset the third value in our character tuples, which tracks whether they've moved in a round"""
for rowI,row in enumerate(inp):
for colI,col in enumerate(row):
if col[0] in ["G","E"]:
char_tup = (col[0],col[1],False)
inp[rowI][colI] = char_tup
return inp
|
def pick_wm_class_2(tissue_class_files):
"""
Returns the white matter tissue class file from the list of segmented tissue class files
Parameters
----------
tissue_class_files : list (string)
List of tissue class files
Returns
-------
file : string
Path to segment_seg_2.nii.gz is returned
"""
if isinstance(tissue_class_files, list):
if len(tissue_class_files) == 1:
tissue_class_files = tissue_class_files[0]
for filename in tissue_class_files:
if filename.endswith("seg_2.nii.gz"):
return filename
return None
|
def lower(xs):
"""returns lowercase for sequence of strings"""
# """performs lowercasing on string or sequence of strings"""
# if isinstance(xs, str):
# return xs.lower()
return [x.lower() for x in xs]
|
def dyn_mean(val, prev_mean, n):
"""Dynamic mean: computes the mean based on a previous mean plus a new value. Useful when mean is built
incrementally, it saves the usage of huge arrays.
Keyword arguments:
val -- new val to add to the mean
prev_mean -- previous mean value
n -- number of total elements in the mean including the new val
"""
if n < 1:
raise ValueError("n < 1, mean only defined for a positive number of elements")
if n == 1:
return val
return (prev_mean*(n-1)+val) / n
|
def calculate_checksum(data: bytes) -> int:
"""
Checksum calculation, as per MH-Z19B datasheet, section C. Calibrate and calculate.
``data`` are either command or response, including the original/expected checksum as last byte.
Link: https://www.winsen-sensor.com/d/files/infrared-gas-sensor/mh-z19b-co2-ver1_0.pdf
"""
# -1 to skip the checksum at the end
s = sum(data[1:-1]) & 0xFF
inv = 0xFF - s
return (inv + 1) & 0xFF
|
def lmap(f,l):
""" given a function and a list
maping the function on the list
return the mapped list
"""
return list(map(f,l))
|
def spacer(value) -> str:
"""Adds leading space if value is not empty"""
return ' ' + str(value) if value is not None and value else ''
|
def to_sec(v):
"""
Convert millisecond, microsecond or nanosecond to second.
Args:
v: timestamp in int, long, float or string.
It can be a timestamp in second, millisecond(10e-3),
microsecond(10e-6) or nanosecond(10e-9).
Returns:
int: timestamp in second.
Raises:
ValueError: If `v` is not a valid timestamp.
"""
v = float(str(v))
if (not isinstance(v, float) or v < 0):
raise ValueError('invalid time to convert to second: {v}'.format(v=v))
l = len(str(int(v)))
if l == 10:
return int(v)
elif l == 13:
return int(v / 1000)
elif l == 16:
return int(v / (1000**2))
elif l == 19:
return int(v / (1000**3))
else:
raise ValueError(
'invalid time length, not 10, 13, 16 or 19: {v}'.format(v=v))
|
def vowels_set(word):
"""Vowels set:
Given a string representing a word, write a set comprehension that produces a set of all the vowels in that word.
>>> vowels_set('mathematics')
set(['a', 'i', 'e'])
"""
lst = set([w for w in word if w == 'a' or w == 'e' or w == 'i' or w == 'o' or w == 'u'])
return lst
pass
|
def cross(p1, p2):
""" Cross product of two vectors """
x = p1[1] * p2[2] - p1[2] * p2[1]
y = p1[2] * p2[0] - p1[0] * p2[2]
z = p1[0] * p2[1] - p1[1] * p2[0]
return x, y, z
|
def format_translation_changes(old, new):
""" Return a comment stating what is different between the old and new
function prototype parts.
"""
changed = False
result = ''
# normalize C API attributes
oldargs = [x.replace('struct _', '') for x in old['args']]
oldretval = old['retval'].replace('struct _', '')
newargs = [x.replace('struct _', '') for x in new['args']]
newretval = new['retval'].replace('struct _', '')
# check if the prototype has changed
oldset = set(oldargs)
newset = set(newargs)
if len(oldset.symmetric_difference(newset)) > 0:
changed = True
result += '\n // WARNING - CHANGED ATTRIBUTES'
# in the implementation set only
oldonly = oldset.difference(newset)
for arg in oldonly:
result += '\n // REMOVED: ' + arg
# in the current set only
newonly = newset.difference(oldset)
for arg in newonly:
result += '\n // ADDED: ' + arg
# check if the return value has changed
if oldretval != newretval:
changed = True
result += '\n // WARNING - CHANGED RETURN VALUE'+ \
'\n // WAS: '+old['retval']+ \
'\n // NOW: '+new['retval']
if changed:
result += '\n #pragma message("Warning: "__FILE__": '+new['name']+ \
' prototype has changed")\n'
return result
|
def is_overlap_1d(start1: float, end1: float, start2: float, end2: float) -> bool:
"""Return whether two 1D intervals overlaps"""
assert start1 <= end1
assert start2 <= end2
return not (start1 > end2 or end1 < start2)
|
def decimalToBinaryv2(decimal):
"""assumes decimal is an int, representing a number in base 10
returns an int, representing the same number in base 2
"""
digits = []
reminder = decimal
while reminder > 0:
digits.append(str(reminder % 2))
reminder = reminder // 2
return "".join(digits[::-1])
|
def array_contains_tracking_data(array_to_check):
"""
Returns True if the array contains some tracking data.
"""
result = False
if array_to_check is not None:
number_of_items = len(array_to_check)
if number_of_items > 0:
found_none = False
for i in range(0, number_of_items):
if array_to_check[i] is None:
found_none = True
if not found_none:
result = True
return result
|
def magnitude2cps(magnitude, magnitude_zero_point):
"""
converts an apparent magnitude to counts per second
The zero point of an instrument, by definition, is the magnitude of an object that produces one count
(or data number, DN) per second. The magnitude of an arbitrary object producing DN counts in an observation of
length EXPTIME is therefore:
m = -2.5 x log10(DN / EXPTIME) + ZEROPOINT
:param magnitude: astronomical magnitude
:param magnitude_zero_point: magnitude zero point (astronomical magnitude with 1 count per second)
:return: counts per second of astronomical object
"""
delta_M = magnitude - magnitude_zero_point
counts = 10**(-delta_M/2.5)
return counts
|
def flatten(a):
"""
Flattens list of lists with arbitrary depth into a 1-D list.
Parameters
----------
l : list
list to be flattened
Returns
-------
flat_l : list
flattened list
"""
flat_l = []
for el in a:
if isinstance(el, list):
flat_l.extend(flatten(el))
else:
flat_l.append(el)
return flat_l
|
def cmdline_remove_npools(cmdline):
"""Remove all options related to npools in the `settings.cmdline` input.
The cmdline setting is a list of strings that typically looks something like:
cmdline = ['-nk', '4', '-ntg', '8']
This function will remove all occurrences of '-nk', '-npool', '-npools', which are
all synonymous flags, and the directly following element, which should be the integer
:param cmdline: the cmdline setting which is a list of string directives
:return: the new cmdline setting
"""
return [
e for i, e in enumerate(cmdline)
if (e not in ('-npools', '-npool', '-nk') and cmdline[i - 1] not in ('-npools', '-npool', '-nk'))
]
|
def get_intersections(path1, path2):
"""returns a list of the intersection points between the two paths
Args:
path1: one path (list of tuples with consecutive integer x, y coords)
path2: second path (see above)
Returns:
a list of all overlapping tuples from the two paths
"""
intersects = []
for pt in path1:
if pt in path2 and pt != (0,0):
intersects.append(pt)
return intersects
|
def err(error_dictionary):
"""
Formats the error response as wanted by the Flask app
:param error_dictionary: name of the error dictionary
:return: tuple of error message and error number
"""
return {'error': error_dictionary['body']}, error_dictionary['number']
|
def isintlike(value):
"""
Checks if an object can be converted to an integer.
@param value: {object}
@return {bool}
"""
try:
int(value)
return True
except (TypeError, ValueError):
return False
|
def comparison_bool(str1, str2, reverse=False):
"""idk why I need to write a tag for this, it returns a bool"""
if reverse:
return str1 != str2
return str1 == str2
|
def read_lines(file_name):
"""
Read all the lines in a given file
Shortcut to avoid clumping up of many with-blocks
Handles the IO exception to return a blank list when no file is present
"""
lines = []
try:
with open(file_name, "r") as f:
lines = f.readlines()
except Exception:
pass
return lines
|
def convert_to_float(var):
"""
Tries to convert an number to float.
:param var
:returns the value of the float or None if it fails
"""
try:
return float(var)
except ValueError:
return None
|
def rv12(mjd, hist=[], **kwargs):
"""cadence requirements for 12-visit RV plates
Request: 12 total, ~3 per month, ideally within 1 week
mjd: float, or int should be ok
hist: list, list of previous MJDs
"""
# covid 4 plug per night solution
return True
if len(hist) == 0:
return True
if len(hist) > 12:
return False
deltas = mjd - np.array(hist)
this_month = deltas[np.where(deltas < 15)]
if len(deltas) > 3:
return False
# would allow for observations more than a week after previous
return np.min(deltas) > 1
|
def is_not_logged_in(session):
"""Returns wether the user is logged in or not"""
return 'logged_in' not in session or session['logged_in'] is None
|
def jaccard_similarity(x, y):
""" Returns the Jaccard Similarity Coefficient (Jarccard Index) between two
lists.
From http://en.wikipedia.org/wiki/Jaccard_index: The Jaccard
coefficient measures similarity between finite sample sets, as is defined as
the size of the intersection divided by the size of the union of the sample
sets.
"""
intersection_cardinality = len(set.intersection(*[set(x), set(y)]))
union_cardinality = len(set.union(*[set(x), set(y)]))
return intersection_cardinality / float(union_cardinality)
|
def _get_connect_string(backend,
user="openstack_citest",
passwd="openstack_citest",
database="openstack_citest"):
"""
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
% locals())
|
def gen_unknown_filter(skip_unknown = False):
"""
Generation of a SPARQL Filter clause to exclude unknown edge type
Produces something like
FILTER( ?controlType = "ACTIVATION"^^xsd:string || ?controlType = "INHIBITION"^^xsd:string)
"""
if skip_unknown == True:
return 'FILTER( ?controlType = "ACTIVATION"^^xsd:string || ?controlType = "INHIBITION"^^xsd:string)'
else:
return ''
|
def remove_crud(string):
"""Return string without useless information.
Return string with trailing zeros after a decimal place, trailing
decimal points, and leading and trailing spaces removed.
"""
if "." in string:
string = string.rstrip('0')
string = string.lstrip('0 ')
string = string.rstrip(' .')
return string
|
def osc(last, type):
"""
R=[AG], Y=[CT], K=[GT], M=[AC], S=[GC], W=[AT], and the four-fold
degenerate character N=[ATCG]
"""
if type == "k":
if last == "g":
return("t")
elif last == "t":
return("g")
return "g"
elif type == "m":
if last == "a":
return("c")
elif last == "c":
return("a")
return "a"
elif type == "n":
if last == "a":
return("c")
elif last == "c":
return("g")
elif last == "g":
return("t")
elif last == "t":
return("a")
return "a"
elif type == "r":
if last == "a":
return("g")
elif last == "g":
return("a")
return "a"
elif type == "s":
if last == "c":
return("g")
elif last == "g":
return("c")
return "g"
elif type == "w":
if last == "a":
return("t")
elif last == "t":
return("a")
return "a"
elif type == "y":
if last == "c":
return("t")
elif last == "t":
return("c")
return"c"
return type
|
def find_path(src, target, passable):
"""
>>> path = find_path((1, 1), (3, 4), lambda: True)
>>> len(path)
6
>>> find_path((1, 1), (3, 1), lambda: True)
[(1, 1), (2, 1), (3, 1)]
:rtype: list or None if not found
"""
paths = [[src]]
visited = {src}
# search until target is reached or all possible paths are discovered
while len(paths) > 0:
current_paths = paths
paths = []
for path in current_paths:
for step in ((0, 1), (1, 0), (-1, 0), (0, -1)):
p = path[-1]
dst = p[0] + step[0], p[1] + step[1]
if passable(p, dst) and dst not in visited:
new_path = path.copy()
new_path.append(dst)
paths.append(new_path)
visited.add(dst)
if dst == target:
return new_path
return None
|
def info_item_cmp(x,y):
"""
Comparison function for items (k,v) in the dictionary returned
byt the frame_drop_corrector.info_all() function.
"""
x_num = int(x[0].split('_')[1])
y_num = int(y[0].split('_')[1])
if x_num > y_num:
return 1
elif x_num < y_num:
return -1
else:
return 0
|
def _locate_qubit_in_address(qubit_map, address):
"""
Returns the name of a qubit in a pulse address.
"""
if address is None:
raise ValueError("Could not resolve address '{}'".format(address))
for sub_addr in address.split(":"):
if sub_addr in qubit_map:
return sub_addr
raise ValueError("Could not resolve address '{}'".format(address))
|
def _update_imports(new_imports: list, old_imports: list) -> list:
"""Update imports.
Compare the old imports against the new ones and returns the old imports
with the new imports that did not existed previously.
:param new_imports: All new imports.
:param old_imports: All old imports.
:return: A list of imports as strings.
"""
not_in_imports = []
for i in new_imports:
already = False
for j in old_imports:
if i == j:
already = True
if not already:
not_in_imports.append(i)
# Merge the minimum imports
imports = old_imports + not_in_imports
return imports
|
def cubic_hermite_spline(x, p0, m0, p1, m1):
"""The third order polynomial p(x) with p(0)=p0, p'(0)=m0, p(1)=p1, p'(1)=m1."""
a3 = m0 + m1 + p0 + p0 - p1 - p1
a2 = p1 - a3 - m0 - p0
return p0 + x * (m0 + x * (a2 + x * a3))
|
def subsequence(first_sequence, second_sequence):
"""
Returns whether the first sequence is a subsequence of the second sequence.
Inputs:
first_sequence (list): A sequence.
second_sequence (list): Another sequence.
Returns:
Boolean indicating whether first_sequence is a subsequence of second_sequence.
"""
for startidx in range(len(second_sequence) - len(first_sequence) + 1):
if second_sequence[startidx:startidx + len(first_sequence)] == first_sequence:
return True
return False
|
def _correct_predecessor(reachability_plot, predecessor_plot, ordering, s, e):
"""Correct for predecessors.
Applies Algorithm 2 of [1]_.
Input parameters are ordered by the computer OPTICS ordering.
.. [1] Schubert, Erich, Michael Gertz.
"Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
"""
while s < e:
if reachability_plot[s] > reachability_plot[e]:
return s, e
p_e = ordering[predecessor_plot[e]]
for i in range(s, e):
if p_e == ordering[i]:
return s, e
e -= 1
return None, None
|
def parse_number(string):
"""Takes a string and attempts to convert it to a number.
This function simply removes commas and underscores from the string, and
then tries to convert it to an int or a float.
>>> parse_number('10')
10
>>> parse_number('1.4')
1.4
>>> parse_number('12,345')
12345
>>> parse_number('987_654_321')
987654321
>>> parse_number('0xff')
255
>>> parse_number('0XFE')
254
>>> parse_number({})
Traceback (most recent call last):
...
TypeError: parse_number() argument must be a string
>>> parse_number('1.2.3')
Traceback (most recent call last):
...
Exception: invalid number literal: '1.2.3'
"""
if not isinstance(string, str):
raise TypeError('parse_number() argument must be a string')
string = string.replace(',', '').replace('_', '')
try:
return int(string)
except Exception:
pass
try:
return float(string)
except Exception:
pass
try:
if string.startswith(('0x', '0X')):
return int(string, 16)
except Exception:
pass
raise Exception(f'invalid number literal: {repr(string)}')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.