content stringlengths 42 6.51k |
|---|
def fix_hyphen_commands(raw_cli_arguments):
"""Update options to match their module names with underscores."""
for i in ['gen-sample', 'run-aws', 'run-python', 'run-stacker']:
raw_cli_arguments[i.replace('-', '_')] = raw_cli_arguments[i]
raw_cli_arguments.pop(i)
return raw_cli_arguments |
def DecomposeStandardFieldName(standard_field_name):
"""Utility function takes a standard_field_name from the ontology and returns its composition.
Example: [run_command_1] -> ['Run', 'Command']
Args:
standard_field_name: a standard field name defined by Carson.
Returns:
list: a list of concepts that composes the standard field name.
"""
split_points_data = standard_field_name.split("_")
filtered_list = []
for item in split_points_data:
if not item.isdigit():
filtered_list.append(item.capitalize())
return filtered_list |
def human_readable_time(time_ps: int) -> str:
"""Transform **time_ps** to a human readable string.
Args:
time_ps (int): Time in pico seconds.
Returns:
str: Human readable time.
"""
time_units = ['femto seconds', 'pico seconds', 'nano seconds', 'micro seconds', 'mili seconds']
t = time_ps * 1000
for tu in time_units:
if t < 1000:
return str(t) + ' ' + tu
t /= 1000
return str(time_ps) |
def _raw_misc_to_dict(raw):
"""
Converts text-form of misc to a dictionary.
misc will be stored in the form ["(key1, val1)", "(key2, val2)",...]
"""
ret = {}
for elem in raw:
key, _, val = elem.partition(',')
key = key[1:].strip()
val = val[:-1].strip()
ret[key] = val
return ret |
def _reducemax_pattern(kernel_info):
"""Check ReduceMax and return reduce_size when true."""
for op in kernel_info['op_desc']:
if op['name'] == 'ReduceMax':
input_shape = op['input_desc'][0][0]['shape']
batch_size = input_shape[0]
reduce_size = batch_size * input_shape[1] * input_shape[2]
return True, reduce_size
return False, 0 |
def files_belong_to_same_module(filename_cpp, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cpp, foo_test.cpp and foo_unittest.cpp belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cpp contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cpp', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cpp and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cpp: is the path for the .cpp file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cpp and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cpp.endswith('.cpp'):
return (False, '')
filename_cpp = filename_cpp[:-len('.cpp')]
if filename_cpp.endswith('_unittest'):
filename_cpp = filename_cpp[:-len('_unittest')]
elif filename_cpp.endswith('_test'):
filename_cpp = filename_cpp[:-len('_test')]
filename_cpp = filename_cpp.replace('/public/', '/')
filename_cpp = filename_cpp.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cpp.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cpp[:-len(filename_h)]
return files_belong_to_same_module, common_path |
def combine_labels(left, right):
"""
For use with the join operator &: Combine left input/output labels with
right input/output labels.
If none of the labels conflict then this just returns a sum of tuples.
However if *any* of the labels conflict, this appends '0' to the left-hand
labels and '1' to the right-hand labels so there is no ambiguity).
"""
if set(left).intersection(right):
left = tuple(l + '0' for l in left)
right = tuple(r + '1' for r in right)
return left + right |
def xor(a, b):
"""
this function replicate the XOR operation
"""
if a != b:
return "1"
else:
return "0" |
def findPointMax(listInd):
"""
This function return the maximal not null value in a list
In : list : List of index
Out : maximal indexe
"""
i = len(listInd) - 1
while((listInd[i] == [] or listInd[i] == None) and i >= 0):
i = i - 1
if i == 0:
return None
else:
return listInd[i][0] |
def is_apple_item(item_pl):
"""Returns True if the item to be installed or removed appears to be from
Apple. If we are installing or removing any Apple items in a check/install
cycle, we skip checking/installing Apple updates from an Apple Software
Update server so we don't stomp on each other"""
# check receipts
for receipt in item_pl.get('receipts', []):
if receipt.get('packageid', '').startswith('com.apple.'):
return True
# check installs items
for install_item in item_pl.get('installs', []):
if install_item.get('CFBundleIdentifier', '').startswith('com.apple.'):
return True
# if we get here, no receipts or installs items have Apple
# identifiers
return False |
def max_value(d):
""" Takes a dictionary d and returns the maximum element value and its
corresponding key. Raises a TypeError if any of the values are not
comparable to each other.
>>> max_value({'a': 12, 3: 45})
(3, 45)
>>> max_value({}) is None
True
>>> max_value({33: 34, -1: 600, 'xyz': 2000.4})
('xyz', 2000.4)
>>> max_value({1: 'abc', 2: 'xyz', 3: 'ghijkl'})
(2, 'xyz')
>>> max_value({1:'a', 2:3}) # doctest:+ELLIPSIS
Traceback (most recent call last):
...
TypeError:...
"""
# Hint: d.values() is a sequence of all the values in dictionary d
# try using this along with built-in function max
if d == {}: # if empty, return None
return None
max_key = '' # initialize max_key as ''
max_val = max(d.values()) # set max_val to the max of all values in d
for key in d.keys(): # iterate over all keys in d
if d[key] >= max_val:
max_val = d[key] # set d[key] to max_val if it's the highest that's been iterated over
max_key = key # assign max_key when max_val is assigned
return (max_key, max_val) |
def load_sets(path='../data/processed/', val=False):
"""Load the different locally save sets
Parameters
----------
path : str
Path to the folder where the sets are saved (default: '../data/processed/')
Returns
-------
Numpy Array
Features for the training set
Numpy Array
Target for the training set
Numpy Array
Features for the validation set
Numpy Array
Target for the validation set
Numpy Array
Features for the testing set
Numpy Array
Target for the testing set
"""
import numpy as np
import os.path
X_train = np.load(f'{path}X_train.npy') if os.path.isfile(f'{path}X_train.npy') else None
X_val = np.load(f'{path}X_val.npy' ) if os.path.isfile(f'{path}X_val.npy') else None
X_test = np.load(f'{path}X_test.npy' ) if os.path.isfile(f'{path}X_test.npy') else None
y_train = np.load(f'{path}y_train.npy') if os.path.isfile(f'{path}y_train.npy') else None
y_val = np.load(f'{path}y_val.npy' ) if os.path.isfile(f'{path}y_val.npy') else None
y_test = np.load(f'{path}y_test.npy' ) if os.path.isfile(f'{path}y_test.npy') else None
return X_train, y_train, X_val, y_val, X_test, y_test |
def mean(data):
"""Return the sample arithmetic mean of data.
Arguments:
data (list):
A list of numbers.
Returns:
float: Mean of the provided data.
"""
n = len(data)
if n < 1:
raise ValueError('mean requires at least one data point')
return sum(data)/float(n) |
def indexesof(l, fn, opposite=0):
"""indexesof(l, fn) -> list of indexes
Return a list of indexes i where fn(l[i]) is true.
"""
indexes = []
for i in range(len(l)):
f = fn(l[i])
if (not opposite and f) or (opposite and not f):
indexes.append(i)
return indexes |
def stueckweise_hermite(xx, x_list, f_list, abl_list, abl = 0):
""" Stueckweise Hermite-Interpolation
Sei x in dem Intervall [xl,xr] := [x_list[i-1],x_list[i]] und p das kubische Polynom mit
p(xl) = f_list[i-1], p(xr) = f_list[i],
p'(xl) = abl_list[i-1], p'(xr) = abl_list[i]
Die Funktion wertet p (oder dessen 1. oder 2. Ableitung) in x aus
(falls x<x_list[0], das Polynom fuer das erste Intervall,
falls x>x_list[-1], das Polynom fuer das letzte Intervall)
xx -- Auswertestelle
x_list -- Liste von Stuetzstellen, aufsteigend geordnet
f_list -- Liste mit zugehoerigen Stuetzwerten
abl_list -- Liste mit zugehoerigen Ableitungen
x_list, f_list und abl_list muessen gleich lang sein
abl -- 0, um Funktionswert auszuwerten, 1 fuer erste Ableitung, 2 fuer zweite Ableitung
Ergebnis: p(x) bzw. p'(x) bzw. p''(x)
"""
NN = len(x_list) - 1
# Teilintervall bestimmen, in dem x liegt
ii = 1;
while xx>x_list[ii] and ii<NN:
ii = ii + 1
# Parameter fuer Hermite-Interpolation
f_links = f_list[ii-1]
f_rechts = f_list[ii]
abl_links = abl_list[ii-1]
abl_rechts = abl_list[ii]
hh = x_list[ii] - x_list[ii-1]
# Koordinatentransformation
tt = (xx - x_list[ii-1]) / hh
# Koeffizienten aus p(t(x)) = c0 + c1*t + c2*t**2 + c3**3
c0 = f_links
c1 = hh*abl_links
c2 = -3.*f_links + 3.*f_rechts - 2.*hh*abl_links - hh*abl_rechts
c3 = 2.*f_links - 2.*f_rechts + hh*abl_links + hh*abl_rechts
# Auswerten im Horner Schema
if abl == 0:
return ((c3*tt + c2)*tt + c1)*tt + c0
elif abl == 1:
return ((3.*c3*tt + 2.*c2)*tt + c1) / hh
elif abl == 2:
return (6.*c3*tt + 2.*c2) / hh**2 |
def is_irreflexive(universe, relation):
"""
Function to determine if a relation of a set is irreflexiver
:param universe: a set
:param relation: a relation on set universe
return: True if relation is a reflexiver relation of set universe
"""
new_set = {(a, b) for a in universe for b in universe if a == b}
if relation >= new_set:
return False
return True |
def to_dict(lists):
"""
convert lists to a dictionary where the index-th elt is the key
not needed?
"""
return {l[i]: l for i, l in enumerate(lists)} |
def parse_instream(txt):
"""Parse the current instruction by stripping unecessary
characters and separating C + RX notation into two elements"""
seq = [] # Container to hold parsed instruction
for i in range(0, len(txt)):
s = txt.pop(0).replace(",", "").lower() # Strip commas and make lowercase for detection
if s[-1] == ")": # Strip constant(reg) notation into two parts
start = s.find("(")
end = s.find(")")
a = s[:start]
b = s[start + 1:end]
seq.append(a)
seq.append(b)
else:
seq.append(s)
return seq |
def img_resize_near(grid, w2, h2):
"""
From techalgorithm.com
"""
w1 = len(grid[0])
h1 = len(grid)
newgrid = []
x_ratio = w1/float(w2)
y_ratio = h1/float(h2)
for i in range(0, h2):
py = int(i*y_ratio)
newrow = []
for j in range(0, w2):
px = int(j*x_ratio)
newval = grid[py][px]
newrow.append(newval)
newgrid.append(newrow)
return newgrid |
def is_palindrome_v2(head) -> bool:
"""Use the runner method"""
# Create a reversed linked list by using the runner method
fast = slow = head
rev = None
while fast and fast.next:
fast = fast.next.next
rev, rev.next, slow = slow, rev, slow.next
# When a list is an even
if fast:
slow = slow.next
# Check out a palindrome
while rev and rev.val == slow.val:
rev = rev.next
slow = slow.next
return not rev |
def nth(n):
"""
Formats an ordinal.
Doesn't handle negative numbers.
>>> nth(1)
'1st'
>>> nth(0)
'0th'
>>> [nth(x) for x in [2, 3, 4, 5, 10, 11, 12, 13, 14]]
['2nd', '3rd', '4th', '5th', '10th', '11th', '12th', '13th', '14th']
>>> [nth(x) for x in [91, 92, 93, 94, 99, 100, 101, 102]]
['91st', '92nd', '93rd', '94th', '99th', '100th', '101st', '102nd']
>>> [nth(x) for x in [111, 112, 113, 114, 115]]
['111th', '112th', '113th', '114th', '115th']
"""
assert n >= 0
if n % 100 in [11, 12, 13]:
return '%sth' % n
return {1: '%sst', 2: '%snd', 3: '%srd'}.get(n % 10, '%sth') % n |
def find_exacte_matching(pattern, string):
"""takes a DNA string and the sought pattern returns a list of the pattern's locations in the string"""
location_indexes = []
for i in range(len(string) - len(pattern)+1):
match = True
for n in range(len(pattern)):
if string[i+n] != pattern[n]:
match = False
break
if match:
location_indexes.append(i)
return location_indexes |
def char_to_float(c: int, f_min: float = -100., f_max: float = 50.) -> float:
"""Translates char number ``c`` from -128 to 127 to float from ``f_min`` to ``f_max``.
Parameters
----------
``c`` : int
value to translate
``f_min`` : float, optional
(default is -100)
``f_max`` : float, optional
(default is 50)
Returns
-------
float
"""
f = f_min + (f_max - f_min) * (c + 128.) / 255.
return f |
def both_positive(x, y):
"""Returns True if both x and y are positive.
>>> both_positive(-1, 1)
False
>>> both_positive(1, 1)
True
"""
return (x > 0) and (y > 0) |
def performance_drop_metric(results_st, results_mt):
"""
Calculates the performance drop of the multi-tasking system
compared to a single-tasking baseline.
Hardcoded for PASCAL-MT 5 tasks.
"""
performance_drop = 0.0
for k in results_st:
l_k = 1.0 if k=="normals" else 0.0
performance_drop += (-1.0)**l_k * (results_mt[k] - results_st[k]) / results_st[k]
performance_drop /= len(results_st)
performance_drop *= 100
print(performance_drop)
return performance_drop |
def filter_unwanted(filename):
""" Returns true for hidden or unwanted files """
return filename.startswith(".") |
def _get_spm_os(swift_os):
"""Maps the Bazel OS value to a suitable SPM OS value.
Args:
bzl_os: A `string` representing the Bazel OS value.
Returns:
A `string` representing the SPM OS value.
"""
# No mapping at this time.
return swift_os |
def linspace(start, end, num=None, step=None):
"""
Args:
num: numpy.linspace(start, end, num)
step: start:step:end
"""
if num:
sepera = (end-start) / (num-1)
return [start+i*sepera for i in range(num)]
elif step:
length = int((end-start)/step)
return [start+i*step for i in range(length+1)]
else:
return [start, end] |
def fiscal_from_calendar_year(month_num: int, calendar_year: int) -> int:
"""Return the fiscal year for the input calendar year."""
# Calendar == fiscal year if month is Jan to June (< 7)
return calendar_year if month_num < 7 else calendar_year + 1 |
def _handle_string(val):
"""
Replaces Comments: and any newline found.
Input is a cell of type 'string'.
"""
return val.replace('Comments: ', '').replace('\r\n', ' ') |
def get_url(city):
"""
Gets the full url of the place you want to its weather
You need to obtain your api key from open weather, then give my_api_key the value of your key below
"""
my_api_key = 'fda7542e1133fa0b1b312db624464cf5'
unit = 'metric' # To get temperature in Celsius
weather_query = 'http://api.openweathermap.org/data/2.5/weather?q='
full_query = weather_query + city + '&units=' + unit + '&APPID=' + my_api_key
# This full_query results in smth like
# 'http://api.openweathermap.org/data/2.5/weather?q=Nairobi&units=metric&APPID=YOUR-KEY-HERE'
return full_query |
def _getformat(val):
"""
Get the output format for a floating point number.
The general format is used with 16 places of accuracy, except for when
the floating point value is an integer, in which case a decimal point
followed by a single zero is used.
Parameters
----------
val : float or int
the number which needs formatted.
Returns
-------
string
the format string.
"""
if int(val) == val:
return "%.1f"
else:
return "%.16g" |
def validateDir(value):
"""Validate direction.
"""
msg = "Direction must be a 3 component vector (list)."
if not isinstance(value, list):
raise ValueError(msg)
if 3 != len(value):
raise ValueError(msg)
try:
nums = list(map(float, value))
except:
raise ValueError(msg)
return nums |
def diff(f, x, h = 1e-5):
"""
Approximates the derivative of a function at a given value of x.
"""
return ((f(x + h) - f(x - h)) / (2.0 * h)) |
def pad(s, width, align):
"""Return string padded with spaces,
based on alignment parameter."""
if align == 'l':
s = s.ljust(width)
elif align == 'r':
s = s.rjust(width)
else:
s = s.center(width)
return s |
def is_special_agenda_item(assignment):
"""Is this agenda item a special item?
Special items appear as top-level agenda entries with their own timeslot information.
>>> from collections import namedtuple # use to build mock objects
>>> mock_timeslot = namedtuple('t2', ['slug'])
>>> mock_assignment = namedtuple('t1', ['slot_type']) # slot_type must be a callable
>>> factory = lambda t: mock_assignment(slot_type=lambda: mock_timeslot(slug=t))
>>> all(is_special_agenda_item(factory(t)) for t in ['break', 'reg', 'other', 'officehours'])
True
>>> any(is_special_agenda_item(factory(t)) for t in ['regular', 'plenary'])
False
>>> is_special_agenda_item(None)
False
"""
return assignment is not None and assignment.slot_type().slug in [
'break',
'reg',
'other',
'officehours',
] |
def reduce_array_shape(shape):
""" Reduce the shape of a NumPy array
* Trim trailing ones
* Check that remaining non-zero dimension sizes are equal
Args:
shape (:obj:`list` of :obj:`int`): shape of array
Returns:
:obj:`:obj:`list` of :obj:`int`: reduced shape
"""
shape = list(shape)
while shape and shape[-1] == 1:
shape.pop()
return shape |
def read_dm3_image_info(original_metadata):
"""Read essential parameter from original_metadata originating from a dm3 file"""
if not isinstance(original_metadata, dict):
raise TypeError('We need a dictionary to read')
if 'DM' not in original_metadata:
return {}
main_image = original_metadata['DM']['chosen_image']
exp_dictionary = original_metadata['ImageList'][str(main_image)]['ImageTags']
experiment = {}
if 'Acquisition' in exp_dictionary:
if 'Parameters' in exp_dictionary['Acquisition']:
if 'High Level' in exp_dictionary['Acquisition']['Parameters']:
if 'Exposure (s)' in exp_dictionary['Acquisition']['Parameters']['High Level']:
experiment['exposure_time'] = exp_dictionary['Acquisition']['Parameters']['High Level'][
'Exposure (s)']
if 'Microscope Info' in exp_dictionary:
if 'Microscope' in exp_dictionary['Microscope Info']:
experiment['microscope'] = exp_dictionary['Microscope Info']['Microscope']
if 'Voltage' in exp_dictionary['Microscope Info']:
experiment['acceleration_voltage'] = exp_dictionary['Microscope Info']['Voltage']
if 'Illumination Mode' in exp_dictionary['Microscope Info']:
if exp_dictionary['Microscope Info']['Illumination Mode'] == 'TEM':
experiment['convergence_angle'] = 0.0
experiment['collection_angle'] = 100.0
if exp_dictionary['Microscope Info']['Illumination Mode'] == 'SPOT':
experiment['convergence_angle'] = 10.0
experiment['collection_angle'] = 50.0
return experiment |
def check_cat_symb(x: str):
"""
Args:
x:
Returns:
"""
if type(x) is str:
x = "'{0}'".format(x)
else:
x = str(x)
return x |
def read_incl_input(incl_file):
"""
reads in the include file
:param in_file:
:return: list(included_epitopes)
"""
included_epitopes = []
if not incl_file is None:
with open(incl_file, "rU") as f:
included_epitopes = f.read().splitlines()
return included_epitopes
else:
return None |
def graph_place_tooltip(work):
"""Generate place tooltip
Default: tooltip with all information from Place object
"""
for key in ["booktitle", "journal", "school", "howpublished"]:
value = getattr(work, key, None)
if value is not None:
return key |
def needs_build_output(job_name: str) -> str:
"""Returns the output ID for the flag for needing a build"""
return f"needs-build-{job_name}" |
def _find_class_construction_fn(cls):
"""Find the first __init__ or __new__ method in the given class's MRO."""
for base in type.mro(cls):
if '__init__' in base.__dict__:
return base.__init__
if '__new__' in base.__dict__:
return base.__new__ |
def is_comment(line):
"""Lines starting with "#" are comments.
"""
return line.strip().startswith('#') |
def wholeFieldPredicate(field):
"""Returns the whole field as-is.
Examples:
.. code:: python
> print(wholeFieldPredicate('John Woodward'))
> ('John Woodward',)
"""
return (str(field), ) |
def _encoded_str_len(l):
"""
Compute how long a byte string of length *l* becomes if encoded to hex.
"""
return (l << 2) / 3 + 2 |
def _etextno_to_uri_subdirectory(etextno):
"""Returns the subdirectory that an etextno will be found in a gutenberg
mirror. Generally, one finds the subdirectory by separating out each digit
of the etext number, and uses it for a directory. The exception here is for
etext numbers less than 10, which are prepended with a 0 for the directory
traversal.
>>> _etextno_to_uri_subdirectory(1)
'0/1'
>>> _etextno_to_uri_subdirectory(19)
'1/19'
>>> _etextno_to_uri_subdirectory(15453)
'1/5/4/5/15453'
"""
str_etextno = str(etextno).zfill(2)
all_but_last_digit = list(str_etextno[:-1])
subdir_part = "/".join(all_but_last_digit)
subdir = "{0}/{1}".format(subdir_part, etextno) # etextno not zfilled
return subdir |
def is_float(value):
"""is float"""
try:
float(value)
return True
except ValueError:
return False
except TypeError:
return False |
def split(a_string, seperator):
""" Split a string using seperator. """
return a_string.split(seperator) |
def distance_sqr_2d(pt0, pt1):
""" return distance squared between 2d points pt0 and pt1 """
return (pt0[0] - pt1[0])**2 + (pt0[1] - pt1[1])**2 |
def clean_str_HTML(html_string):
"""This takes a string and returns it with all HTML tags (i.e </sub>) removed."""
h = html_string
h=h.replace("<br>",'')
h=h.replace("<sub>",'')
h=h.replace("<sup>",'')
h=h.replace("</br>",'')
h=h.replace("</sub>",'')
h=h.replace("</sup>",'')
return h |
def get_best_rssi_per_location(sensors):
"""Find the strongest signal in each location."""
best_rssi = {}
for sensor in sensors.values():
location = sensor["location"]
rssi = sensor["rssi"]
if rssi and (location not in best_rssi or rssi > best_rssi[location]):
best_rssi[location] = rssi
return best_rssi |
def relu_mul(x):
"""[fastest method](https://stackoverflow.com/a/32109519/743078)"""
return x * (x > 0) |
def normalized_device_coordinates_to_normalized_image(image):
"""Map image value from [0, 1] -> [-1, 1].
"""
return (image + 1.0) / 2.0 |
def db_begin_transaction(driver):
"""Begin transaction.
:return: SQL command as string
"""
if driver in ('sqlite', 'pg'):
return 'BEGIN'
if driver == 'mysql':
return 'START TRANSACTION'
return '' |
def is_palindrome_permutation(test_string):
"""
Assumption: Incoming string won't have punctuation marks
:param test_string:
:return:
"""
# Pre-process the given string. i.e. strip off spaces and convert all to lowercase letters
test_string = test_string.replace(" ", "")
test_string = test_string.lower()
# Fill in dictionary with keys being chars and values being num of occurrences
palindrome_dict = {}
for item in test_string:
if item not in palindrome_dict.keys():
palindrome_dict[item] = 1
else:
palindrome_dict[item] += 1
# Now parse the dictionary and give the decision whether it is Palindrome Permutation or not
counter = 0
for key, value in palindrome_dict.items():
if counter > 1:
# Early exit
return False
if value % 2 != 0:
counter += 1
if counter > 1:
return False
else:
return True |
def red_bold(msg: str) -> str:
"""
Given an 'str' object, wraps it between ANSI red & bold escape characters.
:param msg: Message to be wrapped.
:return: The same message, which will be displayed as red & bold by the terminal.
"""
return '\u001b[1;31m%s\u001b[0m' % msg |
def mk_basic_call(row, score_cols):
"""Call is pathogenic/1 if all scores are met"""
cutoffs = {'mpc':2, 'revel':.375, 'ccr':.9}
for col in score_cols:
if row[col] < cutoffs[col]:
return 0
return 1 |
def check_change(val: int, return_money: int, balance: int):
"""
This function will check in the balance sheet (Balance),
how many coins is available, for a specific coin value,
to correspond to the money return.
"""
if return_money >= val:
money = return_money // val
if money < balance:
return_money = return_money % val
balance -= money
else:
money = balance
return_money -= money * val
balance -= money
else:
money = 0
return money, return_money, balance |
def get_span_labels(sentence_tags, inv_label_mapping=None):
"""Go from token-level labels to list of entities (start, end, class)."""
if inv_label_mapping:
sentence_tags = [inv_label_mapping[i] for i in sentence_tags]
span_labels = []
last = 'O'
start = -1
for i, tag in enumerate(sentence_tags):
pos, _ = (None, 'O') if tag == 'O' else tag.split('-')
if (pos == 'S' or pos == 'B' or tag == 'O') and last != 'O':
span_labels.append((start, i - 1, last.split('-')[-1]))
if pos == 'B' or pos == 'S' or last == 'O':
start = i
last = tag
if len(sentence_tags) > 0 and sentence_tags[-1] != 'O':
span_labels.append((start, len(sentence_tags) - 1,
sentence_tags[-1].split('-')[-1]))
return span_labels |
def arithmetic_mean(X):
"""Computes the arithmetic mean of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
.. math::
u = \frac{\sum_{i = 0}^{n - 1} X_i}{n}
"""
return sum(X) / len(X) |
def _stringify(elems):
"""Convert a sequence of ranges into a string.
Args:
elems (list): List of 2-tuples representing ranges.
Returns:
str: String with lo..hi ranges concatenated.
"""
return ''.join(chr(lo) + chr(hi) for (lo, hi) in elems) |
def int_ip_to_string(ip_int):
"""
Convert ip4 address from integer into string representation.
Parameters
----------
ip_int : int
4-byte ip4 integer representation
Returns
-------
string
ip4 string representation
"""
byte_mask = 0xFF
return '.'.join(
[str((ip_int >> shift) & byte_mask) for shift in (0, 8, 16, 24)],
) |
def sec(x):
"""Return the number of days given x seconds."""
return x / 24 / 60 / 60 |
def isport(value):
"""
Return whether or not given value represents a valid port.
If the value represents a valid port, this function returns ``True``, otherwise ``False``.
Examples::
>>> isport('8080')
True
>>> isport('65536')
False
:param value: string to validate port
"""
try:
return 0 < int(value) < 65536
except ValueError:
return False |
def reverse_str(numeral_str):
"""Reverses the order of a string."""
return numeral_str[::-1] |
def complain(ctx) -> str:
"""No description."""
return (
"What a crap bot this is! :rage: "
"Hours of time wasted on this useless procuct of a terrible coder and a lousy artist "
":rage: :rage: Is this bot even TESTED before the updates are published... "
"Horrible, just HORRIBLE this spawn of incopetence. Who tf made this? A 12 year old child? "
"This child would probably have made it better than THAT :rage: "
) |
def get_codes(metadata):
"""Read dimension codes and their dimension names from metadata dictionary.
Args:
metadata: dictionary of metadata
Returns:
dimensions_with_codes(list)
dimension_codes(list)
"""
dimensions_with_codes = []
dimension_codes = []
# add CODES of STUB to a list of dimension codes
stubs = metadata.get('STUB', [])
for stub in stubs:
stub_values = []
code_key = 'CODES(' + stub + ')'
# Not all stubs necessarily have CODES
if code_key in metadata:
dimensions_with_codes.append(stub)
raw_stub_values = metadata['CODES(' + stub + ')']
for value in raw_stub_values:
stub_values.append(value)
dimension_codes.append(stub_values)
# add HEADING values to the list of dimension codes
headings = metadata.get('HEADING', [])
for heading in headings:
heading_values = []
code_key = 'CODES(' + heading + ')'
# Not all headings necessarily have CODES
if code_key in metadata:
dimensions_with_codes.append(heading)
raw_heading_values = metadata['CODES(' + heading + ')']
for value in raw_heading_values:
heading_values.append(value)
dimension_codes.append(heading_values)
return dimensions_with_codes, dimension_codes |
def barycentric(vector1, vector2, vector3, u, v, w):
"""
barycentric coordinates are normalized.
barycentric coordinates are known as areal coordinates.
very useful in ray tracing and figuring out the center of three point constraints.
w = 1-u-v;
u + v + w = 1.0;
:return: <tuple> barycentric vector coordinates.
"""
return u * vector1 + v * vector2 + w * vector3 |
def dec2bin(dec, bits=15):
"""
function:
convert dec to bin
:param dec:
:param bits:
:return:
"""
bin_nums = "{0:0{bitNums}b}".format(dec, bitNums=bits)
return bin_nums |
def range_to_number(interval_str):
"""Converts "X-Y" -> "X"."""
if not '-' in interval_str:
return int(interval_str)
# If first character is -, X is a negative number
if interval_str.startswith('-'):
number = '-' + interval_str.split('-')[1]
else:
number = interval_str.split('-')[0]
if number[-1] == 'M':
return int(round(float(number[:-1]) * 1000000))
elif number[-1] == 'B':
return int(round(float(number[:-1]) * 1000000000))
elif '.' in number:
return float(number)
else:
return int(number) |
def third_order_poly(x, a, b, c, d):
"""
:param x:
:param a:
:param b:
:param c:
:param d:
:return:
"""
return a + b * x + c * x ** 2 + d * x ** 3 |
def sign_of_weight(x, weight):
"""
Determines the sign of a weight, based on the value of the first digit of the gene.
:param x: int, range [0, 9]
First digit of the gene.
:param weight: int
Weight determined from the gene.
:return: int
Signed weight.
"""
if x >= 5:
return weight
else:
return -weight |
def get_type_from_dict(config_dict: dict) -> str:
"""Get model type from config dict.
Args:
config_dict: Config dict.
Returns:
Model type.
"""
return config_dict.get("model_type", config_dict.get("type", "")) |
def generate_network_url(project_id, network):
"""Format the resource name as a resource URI."""
return 'projects/{}/global/networks/{}'.format(project_id, network) |
def binary_dominates(a, b):
"""Returns whether a binary dominates b"""
for ai, bi in zip(a, b):
if bi > ai:
return False
if a == b:
return False
return True |
def _ilog2(x):
"""Compute the ceiling of the base-2 logarithm of a number."""
i = 0
while x > (1 << i):
i += 1
return i |
def log_in(user: str, pwd):
"""
:type pwd: object
:type user: object
"""
return f'The user name is: {user} with password {pwd}' |
def startGame(jsonData):
"""set constants at game start and print some data"""
gameID,playerID = (jsonData['gameID'],jsonData['playerID']) # store id of current game session, and player number
print("json data:",jsonData)
print("gameID: {0}\nplayerID: {1}".format(gameID, playerID))
return gameID,playerID |
def _parse_condor_submit_job_id(condor_submit_out):
"""Parse job id from condor_submit output string.
Assume format:
Submitting job(s).
1 job(s) submitted to cluster 8.
"""
return float(condor_submit_out.split()[-1]) |
def center(s, l, delim='#'):
"""Center a text s at a length l"""
start = int(l/2 - len(s)/2)
remain = l - (start + len(s))
return delim * start + s + delim * remain |
def RGB_to_rgb(r, g, b):
"""
Convert RGB values to rgb values
:param r,g,b: (0,255) range floats
:return: a 3 element tuple of rgb values in the range (0, 1)
"""
return (float(r) / 255, float(g) / 255, float(b) / 255) |
def relatively_prime(a, b):
"""
Returns true or false for whether the given numbers are relatively prime,
having no common factors other than 1.
"""
for number in range(2, min(a, b) + 1):
if a % number == b % number == 0:
return False
return True |
def get_alias(infos):
"""Get aliases of all parameters.
Parameters
----------
infos : list
Content of the config header file.
Returns
-------
pairs : list
List of tuples (param alias, param name).
"""
pairs = []
for x in infos:
for y in x:
if "alias" in y:
name = y["name"][0]
alias = y["alias"][0].split(',')
for name2 in alias:
pairs.append((name2.strip(), name))
return pairs |
def selected_indices(total_number_of_indices, desired_number_of_indices=None):
"""Returns a list of indices that will contain exactly the number of desired indices (or the number of total items in the list, if this is smaller).
These indices are selected such that they are evenly spread over the whole sequence."""
if (
desired_number_of_indices is None
or desired_number_of_indices >= total_number_of_indices
or desired_number_of_indices < 0
):
return range(total_number_of_indices)
increase = float(total_number_of_indices) / float(desired_number_of_indices)
# generate a regular quasi-random index list
return [int((i + 0.5) * increase) for i in range(desired_number_of_indices)] |
def collect_state_action_pairs(iterator):
# concat state and action
"""
Overview:
Concate state and action pairs from input iterator.
Arguments:
- iterator (:obj:`Iterable`): Iterables with at least ``obs`` and ``action`` tensor keys.
Returns:
- res (:obj:`Torch.tensor`): State and action pairs.
"""
res = []
for item in iterator:
state = item['obs']
action = item['action']
# s_a = torch.cat([state, action.float()], dim=-1)
res.append((state, action))
return res |
def get_mount_info(pools, volumes, actions, fstab):
""" Return a list of argument dicts to pass to the mount module to manage mounts.
The overall approach is to remove existing mounts associated with file systems
we are removing and those with changed mount points, re-adding them with the
new mount point later.
Removed mounts go directly into the mount_info list, which is the return value,
while added/active mounts to a list that gets appended to the mount_info list
at the end to ensure that removals happen first.
"""
mount_info = list()
mount_vols = list()
# account for mounts removed by removing or reformatting volumes
if actions:
for action in actions:
if action.is_destroy and action.is_format and action.format.type is not None:
mount = fstab.lookup('device_path', action.device.path)
if mount is not None:
mount_info.append({"src": mount['device_id'], "path": mount['mount_point'],
'state': 'absent', 'fstype': mount['fs_type']})
def handle_new_mount(volume, fstab):
replace = None
mounted = False
mount = fstab.lookup('device_path', volume['_device'])
if volume['mount_point'] or volume['fs_type'] == 'swap':
mounted = True
# handle removal of existing mounts of this volume
if mount and mount['fs_type'] != 'swap' and mount['mount_point'] != volume['mount_point']:
replace = dict(path=mount['mount_point'], state="absent")
elif mount and mount['fs_type'] == 'swap':
replace = dict(src=mount['device_id'], fstype="swap", path="none", state="absent")
return mounted, replace
# account for mounts that we set up or are replacing in pools
for pool in pools:
for volume in pool['volumes']:
if pool['state'] == 'present' and volume['state'] == 'present':
mounted, replace = handle_new_mount(volume, fstab)
if replace:
mount_info.append(replace)
if mounted:
mount_vols.append(volume)
# account for mounts that we set up or are replacing in standalone volumes
for volume in volumes:
if volume['state'] == 'present':
mounted, replace = handle_new_mount(volume, fstab)
if replace:
mount_info.append(replace)
if mounted:
mount_vols.append(volume)
for volume in mount_vols:
mount_info.append({'src': volume['_mount_id'],
'path': volume['mount_point'] if volume['fs_type'] != "swap" else "none",
'fstype': volume['fs_type'],
'opts': volume['mount_options'],
'dump': volume['mount_check'],
'passno': volume['mount_passno'],
'state': 'mounted' if volume['fs_type'] != "swap" else "present"})
return mount_info |
def make_links(line):
"""Split a line into parts, return a dictionary of chain links."""
link_parts = line.split(";")
chain_dict = {
k: v for k, v in tuple([x.split('-') for x in link_parts])
}
return chain_dict |
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
result = hand.copy()
for letter in word:
if result.get(letter, 0) > 0:
result[letter] -= 1
return result |
def iseven(n):
"""
Return True if n is an even number and False otherwise.
"""
return (n % 2 == 0) |
def binary_srch_exact(a,t,lo,hi):
"""
Here, a is a rotated sorted array.
"""
while hi>=lo:
mid=(lo+hi)//2
if a[mid]==t:
return mid
elif a[mid]<t:
lo=mid+1
elif a[mid]>t:
hi=mid-1
return -1 |
def get_azimuth(angle):
"""Converts headings represented by angles in degrees 180 to -180to Azimuth Angles.
Will also normalize any number to 0-360 """
if angle <= 180 and angle > 90:
azimuth_angles = 360.0 - (angle - 90)
else:
azimuth_angles = abs(angle - 90)
if abs(azimuth_angles) > 360:
azimuth_angles % 360
return azimuth_angles |
def check_features_name(columns_dict, features_dict, features):
"""
Convert a list of feature names (string) or features ids into features ids.
Features names can be part of columns_dict or features_dict.
Parameters
----------
features : List
List of ints (columns ids) or of strings (business names)
columns_dict: dict
Dictionary mapping integer column number to technical feature names.
features_dict: dict
Dictionary mapping technical feature names to domain names.
Returns
-------
list of ints
Columns ids compatible with var_dict
"""
if all(isinstance(f, int) for f in features):
features_ids = features
elif all(isinstance(f, str) for f in features):
inv_columns_dict = {v: k for k, v in columns_dict.items()}
inv_features_dict = {v: k for k, v in features_dict.items()}
if features_dict and all(f in features_dict.values() for f in features):
columns_list = [inv_features_dict[f] for f in features]
features_ids = [inv_columns_dict[c] for c in columns_list]
elif inv_columns_dict and all(f in columns_dict.values() for f in features):
features_ids = [inv_columns_dict[f] for f in features]
else:
raise ValueError(
'All features must came from the same dict of features (technical names or domain names).'
)
else:
raise ValueError(
"""
features must be a list of ints (representing ids of columns)
or a list of string from technical features names or from domain names.
"""
)
return features_ids |
def get_image_modality(image_modality):
"""Change image_modality (string) to rgb (bool) and flow (bool) for efficiency"""
if image_modality == "joint":
rgb = flow = True
elif image_modality == "rgb" or image_modality == "flow":
rgb = image_modality == "rgb"
flow = image_modality == "flow"
else:
raise Exception("Invalid modality option: {}".format(image_modality))
return rgb, flow |
def _iterate(arrays, cur_depth, iterators, n):
"""
dfs algorithm for returning the next iterator value
Args:
arrays: A list of 1-D arrays
cur_depth: the depth of the dfs tree in current call
iterators: a list of iterators
n: number of arrays
Returns:
new iterator value
"""
if cur_depth >= 0 and cur_depth < n - 1:
iterators = _iterate(arrays, cur_depth + 1, iterators, n)
iterators[cur_depth] += (iterators[cur_depth + 1] // len(arrays[cur_depth + 1]))
iterators[cur_depth + 1] %= len(arrays[cur_depth + 1])
return iterators
elif cur_depth == n - 1:
iterators[cur_depth] += 1
return iterators |
def is_alive(cell):
"""returs True if a cell is alive and false, otherwise."""
return True if cell == 1 else False |
def ami_architecture(ami_info):
"""
Finds source AMI architecture AMI tag.
Parameters
----------
ami_info : dict
AMI information.
Returns
-------
string
Architecture of source AMI.
"""
for tag in ami_info['Tags']:
if tag['Key'] == 'Architecture':
return tag['Value'] |
def mimic_preprocessing(text):
"""
:param text:
:return:
"""
# remove junk headers that concatenate multiple notes
sents = []
skip = False
for line in text.split('\n'):
if line.strip() == '(Over)':
skip = True
elif line.strip() == '(Cont)':
skip = False
continue
if not skip:
sents.append(line)
text = '\n'.join(sents)
return text |
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
"""Converts an integer to a base36 string."""
if not isinstance(number, int):
number = int(number)
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36 |
def ord_prio(prio):
"""Compute the ordinal number of a text priority
:param prio: string
:rtype: integer
"""
return {"urgmust": 1, "must": 2, "high": 3, "medium": 4, "low": 5}.get(prio, 5) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.