content stringlengths 42 6.51k |
|---|
def get_color_from_similarity(similarity_score: float) -> str:
""" Return css style according to similarity score """
if float(similarity_score) > 15:
return "#990033; font-weight: bold"
if float(similarity_score) > 10:
return "#ff6600"
if float(similarity_score) > 5:
return "#ffcc00"
return "green" |
def get_string(element):
"""Helper for safely pulling string from XML"""
return None if element == None else element.string |
def sort_by_points(hn_list):
"""
Returns a sorted list of dictionaries from alternative_hacker_news to
be ordered by score (highest first).
"""
sorted_list = sorted(hn_list, key=lambda k: k["score"], reverse=True)
return sorted_list |
def and_join(strings):
"""Join the given ``strings`` by commas with last `' and '` conjuction.
>>> and_join(['Korea', 'Japan', 'China', 'Taiwan'])
'Korea, Japan, China, and Taiwan'
:param strings: a list of words to join
:type string: :class:`collections.abc.Sequence`
:returns: a joined string
:rtype: :class:`str`, :class:`basestring`
"""
last = len(strings) - 1
if last == 0:
return strings[0]
elif last < 0:
return ''
iterator = enumerate(strings)
return ', '.join('and ' + s if i == last else s for i, s in iterator) |
def convert_to_aws_federated_user_format(string):
"""Make string compatible with AWS ECR repository naming
Arguments:
string {string} -- Desired ECR repository name
Returns:
string -- Valid ECR repository name
"""
string = string.replace(" ", "-")
result = ""
for ch in string:
if ch.isalnum() or ch in ["=", ",", ".", "@", "-"]:
result += ch
return result |
def in_flight_entertainment(flight_length, movie_lengths):
"""
Loops through the movie lengths and checks whether there are 2 movies to watch which sum up to the flight
length
:param flight_length: length of the flight in minutes
:param movie_lengths: list of movie lengths
:return: 2 recommended movies to watch that are equal to or less than the flight length
:rtype: bool
"""
# will hold reference to the movies already summed and checked to prevent duplication
movie_lengths_seen = set()
for first_movie_length in movie_lengths:
matching_second_movie_length = flight_length - first_movie_length
# if there is a 2nd matching movie length we have seen already we short circuit early
if matching_second_movie_length in movie_lengths_seen:
return True
movie_lengths_seen.add(first_movie_length)
# we never found a match
return False |
def fibonacci_matrix_mul(n):
"""
:param n: F(n)
:return: val
"""
if n == 0:
return 0
if n == 1:
return 1
mul = [[0,1],[1,1]]
def matrix_mul(matrix1, matrix2):
"""
:param matrix1:2*2 matrix
:param matrix2: 2*2 matrix
:return: 2*2 matrix
"""
(a11, a12), (a21, a22) = matrix1
(b11, b12), (b21, b22) = matrix2
c11 = a11 * b11 + a12 * b21
c12 = a11 * b12 + a12 * b22
c21 = a21 * b11 + a22 * b21
c22 = a21 * b12 + a22 * b22
mul_matrix =[[c11, c12], [c21, c22]]
return mul_matrix
def matrix_pow(mul,n):
"""
:param mul:2*2 matrix
:param n: pow n
:return: 2*2 matrix
"""
if n == 1:
return mul
if n == 2:
return matrix_mul(mul, mul)
temp = matrix_pow(mul, n // 2)
pow = matrix_mul(temp, temp)
if n % 2:
return matrix_mul(pow, mul)
else:
return pow
return matrix_pow(mul, n-1)[1][1] |
def convertRomanNumeral(romNumeral):
"""
converts Roman numerals into ordinary numbers.
Params:
romNumeral (String) - the Roman numeral string to be converted
Returns:
num (int): ordinary numbers equivalant of the roman numeral.
Examples:
>>> num = convertRomanNumeral("M")
>>>print(num)
100
>>> num = convertRomanNumeral("DX")
>>>print(num)
510
>>> num = convertRomanNumeral("XL")
>>>print(num)
40
"""
romNumeral = romNumeral.upper()
rom_dict = {"M":1000,"D":500,"C":100,"L":50,"X":10,"V":5,"I":1,'IV':4,\
'IX':9,'XL':40,'XC':90,'CD':400,'CM':900}
i = 0
num = 0
while i < len(romNumeral):
if i+1<len(romNumeral) and romNumeral[i:i+2] in rom_dict:
num+=rom_dict[romNumeral[i:i+2]]
i+=2
else:
num+=rom_dict[romNumeral[i]]
i+=1
return num |
def test_get_early_out(hour_out,check_out,tolerance):
"""menghitung berapa lama pegawai pulang lebih awal"""
if hour_out > check_out:
if (hour_out - check_out) < tolerance:
return ' '
else:
return hour_out - check_out
else:
return ' ' |
def retrograde(motif):
"""Reverse the order of notes in a motif.
None of the notes' parameters are altered.
Arguments:
motif (list of ints)
Returns:
A list of ints
"""
new_motif = []
for i in range(len(motif)-1, -1, -1):
new_motif.append(motif[i])
return new_motif |
def sn_temp_ratio(signal_Tant, noise_Trms,
output=None,
verbose=0):
"""
Returns the signal-to-noise ratio.
Parameters
----------
signal_Tant : signal (antenna) temperature [K]
noise_Trms : noise rms temperature [K]
output : output dictionary (default: None)
verbose : verbosity (default: 0)
"""
# ratio
res = signal_Tant / noise_Trms
if type(output) == dict:
# saving to ouput dictionary
output['S/N_temp'] = res
if verbose > 0:
print("S/N_temp computed and saved in output['S/N_temp'].")
return res |
def extract_metadata_from_query_results(query_results):
"""
Given a Sparql query result, extract nationality, gender and birthdate
:param query_results:
:return:
"""
if query_results["results"]["bindings"]:
raw_metadata = query_results["results"]["bindings"][0]
gender = raw_metadata['gender']['value'].lower()
birth_date = raw_metadata['birthdate']['value']
if "nationality_dbp" in raw_metadata.keys():
nationality = raw_metadata['nationality_dbp']['value'].lower()
elif "nationality_dbo" in raw_metadata.keys():
nationality = raw_metadata['nationality_dbo']['value'].lower()
else:
nationality = ""
return birth_date, gender, nationality
else:
raise ValueError |
def actionIndexInt2Tuple(actionIdx, numActionList):
"""Transforms an action index to tuple of action indices.
Args:
actionIdx (int): action index of the discrete action set.
numActionList (list): consists of the number of actions in evader and
pursuer's action sets.
Returns:
tuple of int: indices of the Q-matrix.
"""
numJoinAction = int(numActionList[0] * numActionList[1])
assert (actionIdx < numJoinAction), (
"The size of joint action set is "
"{:d} but get index {:d}".format(numJoinAction, actionIdx)
)
rowIdx = actionIdx // numActionList[1]
colIdx = actionIdx % numActionList[1]
return (rowIdx, colIdx) |
def _title_to_filename(title: str) -> str:
"""Formats a title to be a safe filename.
Strips down to alphanumeric chars and replaces spaces with underscores.
"""
return "".join(c for c in title if c.isalnum() or c == " ").replace(" ", "_") |
def indent(t, indent=0):
"""Indent text."""
return '\n'.join(' ' * indent + p for p in t.split('\n')) |
def isfloat(value):
"""
Determine if string value can be converted to a float. Return True if
value can be converted to a float and False otherwise.
Parameters
----------
value : string
String value to try to convert to a float.
Returns
-------
bool : bool
Examples
--------
>>> import nwispy_helpers
>>> nwispy_helpers.isfloat(value = "2.5")
True
>>> nwispy_helpers.isfloat(value = "hello world")
False
>>> nwispy_helpers.isfloat(value = "5.5_")
False
"""
try:
float(value)
return True
except ValueError:
return False |
def format_range(low, high, width):
"""Format a range from low to high inclusively, with a certain width."""
if low == high:
return "%0*d" % (width, low)
else:
return "%0*d-%0*d" % (width, low, width, high) |
def contains(node, value):
"""
Return whether tree rooted at node contains value.
@param BinaryTree|None node: binary tree to search for value
@param object value: value to search for
@rtype: bool
>>> contains(None, 5)
False
>>> contains(BinaryTree(5, BinaryTree(7), BinaryTree(9)), 7)
True
"""
# handling the None case will be trickier for a method
if node is None:
return False
else:
return (node.data == value or
contains(node.left, value) or
contains(node.right, value)) |
def fuzzy_name(str_in):
""" convert to lower cases and remove common delimiters """
if isinstance(str_in, str):
return str_in.lower().replace('_', '').replace('-', '')
else:
return [_s.lower().replace('_', '').replace('-', '')
if isinstance(_s, str) else _s for _s in str_in] |
def get_stretch(image_name, sample_name):
"""extract stretch value from filename
ex: hpr10p100031.TIF --> 0.1%
"""
image_name = image_name.replace(sample_name, '')
image_name = image_name.split('.')[0]
u, d = image_name.split('p')
d = d[:1]
s = float(u) + float(d)/10
return s |
def fancy_ind(inds, shape):
"""
This simple function gives the index of the flattened array given the
indices in an unflattened array.
Parameters
----------
inds : list- or array-like
List of indices in the unflattened array.
For sensical results, must all be less than value at corresponding index in 'shape'.
shape : list- or array-like
Dimension sizes in the unflattened array.
"""
ind = 0
for i in range(len(inds)):
mult = 1
for j in range(i+1, len(inds)):
mult *= shape[j]
ind += inds[i]*mult
return ind |
def range(actual_value, lower_limit, higher_limit):
"""Assert that actual_value is within range (inclusive)."""
result = lower_limit <= actual_value <= higher_limit
if result:
return result
else:
raise AssertionError(
"{!r} is OUTSIDE RANGE of {!r} to {!r} inclusive".format(
actual_value, lower_limit, higher_limit
)
) |
def decode_text(text):
"""Decodes a string from HTML."""
output = text
output = output.replace('\\n', '\n')
output = output.replace('\\t', '\t')
output = output.replace('\s', '\\')
output = output.replace('<', '<')
output = output.replace('>', '>')
output = output.replace('"', '"')
return output |
def add_dictionnaries(dict_1, dict_2):
"""Add to dictionnaries.
Assumes both dictionnaries have the same keys.
Parameters
----------
dict_1 : dict
First dictionnary.
dict_2 : dict
Second dictionnary.
Returns
-------
dict
Sum of both dictionnaries.
"""
assert sorted(sorted(dict_1.keys())) == sorted(sorted(dict_2.keys()))
dict_sum = {}
for key in dict_1:
dict_sum[key] = dict_1[key] + dict_2[key]
return dict_sum |
def pop_file(in_files):
"""
Select the first file from a list of filenames.
Used to grab the first echo's file when processing
multi-echo data through workflows that only accept
a single file.
Examples
--------
>>> pop_file('some/file.nii.gz')
'some/file.nii.gz'
>>> pop_file(['some/file1.nii.gz', 'some/file2.nii.gz'])
'some/file1.nii.gz'
"""
if isinstance(in_files, (list, tuple)):
return in_files[0]
return in_files |
def scan_year(visit, studyid='TON'):
"""
Retrieve the year in which a scan was collected.
Parameters
----------
visit : str or int
Visit number
studyid: str, optional
Specifies the study from which files will be retrieved. Valid
values are 'THD' and 'TON'.
Returns
-------
sc_year : int
Actual scan year
"""
if type(visit) is str:
visit = int(visit[-1:])
if studyid == 'TON':
years = [2012, 2013, 2014]
else:
years = [2008, 2009, 2010, 2011]
sc_year = years[visit-1]
return sc_year |
def is_p2tr(script: bytes) -> bool:
"""
Determine whether a script is a P2TR output script.
:param script: The script
:returns: Whether the script is a P2TR output script
"""
return len(script) == 34 and script[0] == 0x51 and script[1] == 0x20 |
def cc_bad_mock(url, request):
"""
Mock for carrier checking, worst case.
"""
badbody = str('<?xml version="1.0" encoding="UTF-8"?><response ttl="600000"><country id="222" name="United States"/><carrier id="0" name="default" icon="-1" downloadlimit="50" allowedoverride="false"/></response>')
return badbody |
def n_blocks(n_frames, block_length):
"""Calculates how many blocks of _block_size_ frames with frame_shift separation can be taken from n_frames"""
return n_frames - block_length + 1 |
def move_down(rows, t):
""" A method that takes number of rows in the matrix
and coordinates of bomb's position and returns
coordinates of neighbour located bellow the bomb.
It returns None if there isn't such a neighbour """
x, y = t
if x == rows:
return None
else:
return (x + 1, y) |
def reverse(lst):
"""Reverse a list
"""
return lst[::-1] |
def parse_list_to_string(tags):
"""
Parses a list of tags into a single string with the tags separated by comma
:param tags: A list of tags
:return: A string with tags separated by comma
"""
return ', '.join(tags) |
def comp2dict(composition):
"""Takes composition: Si20 O10, returns dict of atoms {'Si':20,'O':10}"""
import re
pat = re.compile('([A-z]+|[0-9]+)')
m = re.findall(pat,composition)
return dict(list(zip(m[::2],list(map(int,m[1::2]))))) |
def dictsize(value):
"""Turn python dict into JSON formatted string"""
if not value:
return '(n/a)'
return len(str(value)) |
def _int_to_tuple_conv(axes):
"""
Converts ints to tuples in input axes, expected by most validation checks.
"""
for x in [0, 1]:
if isinstance(axes[x], int):
axes[x] = (axes[x],)
return axes |
def sort_separation(separation):
"""Sort a separation.
:param separation: Initial separation.
:return: Sorted list of separation.
"""
if len(separation[0]) > len(separation[2]):
return [sorted(separation[2]), sorted(separation[1]), sorted(separation[0])]
return [sorted(separation[0]), sorted(separation[1]), sorted(separation[2])] |
def rgb_color_wheel(wheel_pos):
"""Color wheel to allow for cycling through the rainbow of RGB colors."""
wheel_pos = wheel_pos % 255
if wheel_pos < 85:
return 255 - wheel_pos * 3, 0, wheel_pos * 3
elif wheel_pos < 170:
wheel_pos -= 85
return 0, wheel_pos * 3, 255 - wheel_pos * 3
else:
wheel_pos -= 170
return wheel_pos * 3, 255 - wheel_pos * 3, 0 |
def capitalize_all(x):
"""Capitalize all words of a sentence"""
_str = [word.capitalize() for word in x.split(' ')]
return ' '.join(_str) |
def get_data_config(config, spec_len):
""" Prepares configuration dictionary for validation dataset. """
data_config = {}
data_config.update({'fps': config['fps'],
'sample_rate': config['sample_rate'],
'frame_len': config['frame_len'],
'mel_min': config['mel_min'],
'mel_max': config['mel_max'],
'blocklen': config['blocklen'],
'batchsize': config['batchsize'],
'n_mels': config['mel_bands'],
'spec_len': spec_len})
return data_config |
def is_symbol(s):
"""A string s is a symbol if it starts with an alphabetic char.
>>> is_symbol('R2D2')
True
"""
return isinstance(s, str) and s[:1].isalpha() |
def chunk_data(data, size):
"""Creates a list of chunks of the specified `size`.
Args:
data (list): self-explanatory.
size (int): desired size of each chunk, the last one can be <= `size`.
Returns:
coll (list): lists of lists.
"""
coll = []
start_indx = 0
while start_indx < len(data):
slice_range = range(start_indx, min(start_indx + size, len(data)))
chunk = [data[i] for i in slice_range]
coll.append(chunk)
start_indx += size
return coll |
def f(x: int, n: int) -> float:
"""
Calcula el resultado de la serie.
"""
return sum([((x - i) ** n) / i for i in range(1, n + 1)]) |
def prepend(string, prefix):
"""Append something to the beginning of another string.
Parameters
----------
string : str
String to prepend to.
prefix : str
String to add to the beginning.
Returns
-------
str
String with the addition to the beginning.
Notes
-----
This function deals with empty inputs, and returns an empty string in that case.
"""
return prefix + string if string else string |
def safe_print_list_integers(my_list=[], _x=0):
""" safe_print_list_integers """
counter = 0
for i in range(_x):
try:
print("{:d}".format(my_list[i]), end="")
counter += 1
except (ValueError, TypeError):
continue
print()
return counter |
def get_width(x: int, gw: float, divisor: int=8 ):
"""
Using gw to control the number of kernels that must be multiples of 8.
return math.ceil(x / divisor) * divisor
"""
if x*gw % divisor == 0:
return int(x*gw)
return (int(x*gw/divisor)+1)*divisor |
def selectSort(list1, list2):
"""
Razeni 2 poli najednou (list) pomoci metody select sort
input:
list1 - prvni pole (hlavni pole pro razeni)
list2 - druhe pole (vedlejsi pole) (kopirujici pozice pro razeni
podle hlavniho pole list1)
returns:
dve serazena pole - hodnoty se ridi podle prvniho pole, druhe
"kopiruje" razeni
"""
length = len(list1)
for index in range(0, length):
min = index
for index2 in range(index + 1, length):
if list1[index2] > list1[min]:
min = index2
# Prohozeni hodnot hlavniho pole
list1[index], list1[min] = list1[min], list1[index]
# Prohozeni hodnot vedlejsiho pole
list2[index], list2[min] = list2[min], list2[index]
return list1, list2 |
def proxy_exception(host, list):
"""Return 1 if host is contained in list or host's suffix matches
an entry in list that begins with a leading dot."""
for exception in list:
if host == exception:
return 1
try:
if exception[0] == '.' and host[-len(exception):] == exception:
return 1
except IndexError:
pass
return 0 |
def unique(it):
"""Return a list of unique elements in the iterable, preserving the order.
Usage::
>>> unique([None, "spam", 2, "spam", "A", "spam", "spam", "eggs", "spam"])
[None, 'spam', 2, 'A', 'eggs']
"""
seen = set()
ret = []
for elm in it:
if elm not in seen:
ret.append(elm)
seen.add(elm)
return ret |
def _identity_decorator(func, *args, **kwargs):
"""Identity decorator.
This isn't as useless as it sounds: given a function with a
``__signature__`` attribute, it generates a wrapper that really
does have that signature.
"""
return func(*args, **kwargs) |
def items_sum(items, field):
"""Take a list of items, and return the sum of a given field."""
if items:
sum = 0
for item in items:
value = getattr(item, field)
if type(value) in (int, float):
sum += value
return sum
else:
return None |
def _compute_fans(shape):
"""
Taken from https://github.com/tensorflow/tensorflow/blob/2b96f3662bd776e277f86997659e61046b56c315/tensorflow/python/ops/init_ops_v2.py#L994
Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1.
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out |
def _get_sqa_table_id(wtq_table_id):
"""Goes from 'csv/123-csv/123.csv' to 'table_csv/123-123.csv'."""
return u'table_csv/' + wtq_table_id[4:].replace('/', '-').replace('-csv', '') |
def get_num_frames(dur, anal):
"""Given the duration of a track and a dictionary containing analysis
info, return the number of frames."""
total_samples = dur * anal["sample_rate"]
return int(total_samples / anal["hop_size"]) |
def __bounding_box(p1,p2):
"""Returns left, bottom, right and top coordinate values"""
if p1[0] < p2[0]:
left=p1[0]
right=p2[0]
else:
left=p2[0]
right=p1[0]
if p1[1] < p2[1]:
bottom=p1[1]
top=p2[1]
else:
bottom=p2[1]
top=p1[1]
return (left, bottom, right, top) |
def count_sign_changes(values):
""" Returns the number of sign changes in a list of values. """
count = 0
prev_v = 0
for i, v in enumerate(values):
if i == 0:
prev_v = v
else:
if prev_v * v < 0:
count += 1
prev_v = v
return count |
def find_char(pixel, weighted_chars):
"""Find and return character from dict with similar darkness to image pixel"""
return min(weighted_chars.keys(), key=lambda c: abs(pixel - weighted_chars[c])) |
def bCOM(r, ra, b0):
"""
Anisotropy profile from (Cuddeford 1991; Osipkov 1979; Merritt 1985) inversion.
Parameters
----------
r : array_like, float
Distance from center of the system.
ra : float
Anisotropy radius.
b0 : float
Anisotropy at r = 0.
Returns
-------
b : array_like, float
Anisotropy profile.
"""
b = (b0 + (r / ra) * (r / ra)) / (1 + (r / ra) * (r / ra))
return b |
def _mat_mat_add_fp(x, y):
"""Add to matrices."""
return [[a + b for a, b in zip(x_row, y_row)]
for x_row, y_row in zip(x, y)] |
def underscored(s: str) -> str:
"""Turn spaces in the string ``str`` into underscores.
Used primarely for filename formatting.
"""
return '_'.join(s.split(' ')) |
def unprocess_args(args):
"""Unprocesses processed config args.
Given a dictionary of arguments ('arg'), returns a dictionary where all values have been
converted to string representation.
Returns:
args, where all values have been replaced by a str representation.
"""
unprocessed_args = {}
for arg, value in args.items():
if isinstance(value, list):
unprocessed_arg = ', '.join(value)
elif isinstance(value, dict):
dict_values = [str(v) for v in value.values()]
unprocessed_arg = ', '.join(dict_values)
else:
unprocessed_arg = str(value)
unprocessed_args[arg] = unprocessed_arg
return unprocessed_args |
def decrypt_seiga_drm(enc_bytes, key):
"""Decrypt the light DRM applied to certain Seiga images."""
n = []
a = 8
for i in range(a):
start = 2 * i
value = int(key[start:start + 2], 16)
n.append(value)
dec_bytes = bytearray(enc_bytes)
for i in range(len(enc_bytes)):
dec_bytes[i] = dec_bytes[i] ^ n[i % a]
return dec_bytes |
def build_filename(artifact_id, version, extension, classifier=None):
"""
Return a filename for a Maven artifact built from its coordinates.
"""
extension = extension or ''
classifier = classifier or ''
if classifier:
classifier = f'-{classifier}'
return f'{artifact_id}-{version}{classifier}.{extension}' |
def filter_dict_by_key_value(dict_, key, value):
"""
helper function to filter a dict by a dict key
:param dict_: ``dict``
:param key: dict key
:param value: dict key value
:returns: filtered ``dict``
"""
return {k: v for (k, v) in dict_.items() if v[key] == value} |
def average(measurements):
"""
Use the builtin functions sum and len to make a quick average function
"""
# Handle division by zero error
if len(measurements) != 0:
return sum(measurements)/len(measurements)
else:
# When you use the average later, make sure to include something like
# sensor_average = rolling_average(sensor_measurements)
# if (conditions) and sensor_average > -1:
# This way, -1 can be used as an "invalid" value
return -1 |
def comm(lhs, rhs):
"""Returns (left-only, common, right-only)
"""
com = lhs & rhs
return (lhs-com), com, (rhs-com) |
def int_to_binstr(i, bits):
"""Convert integer to a '01' string.
Args:
bits (int): Number of bits for this integer.
Returns:
str: binary representation of the integer.
"""
output = ''
for j in range(0,bits):
bit = (i & (1 << j)) >> j
output = str(bit) + output
return output |
def is_str_empty(string):
""" Check is string empty/NoNe or not.
:param string:
:return:
"""
if string is None: # check for None
return True
if not string or not string.strip(): # check for whitespaces string
return True
return False |
def fibonacci_recursive(n):
"""
Compute the Fibonacci numbers with given number by recursive method
:param n: given number
:type n: int
:return: the Fibonacci numbers
:rtype: int
"""
if n == 0:
return 0
elif n == 1:
return 1
elif n < 0:
return -1
return fibonacci_recursive(n - 1) + fibonacci_recursive(n - 2) |
def produced_by(entry):
"""
Modify source activity names to clarify data meaning
:param entry: original source name
:return: modified activity name
"""
if "ArtsEntRec" in entry:
return "Arts Entertainment Recreation"
if "DurableWholesaleTrucking" in entry:
return "Durable Wholesale Trucking"
if "Education" in entry:
return "Education"
if "ElectronicEquipment" in entry:
return "Electronic Equipment"
if "FoodBeverageStores" in entry:
return "Food Beverage Stores"
if "FoodNondurableWholesale" in entry:
return "Food Nondurable Wholesale"
if "HotelLodging" in entry:
return "Hotel Lodging"
if "MedicalHealth" in entry:
return "Medical Health"
if "Multifamily" in entry:
return "Multifamily"
if "NotElsewhereClassified" in entry:
return "Not Elsewhere Classified"
if "OtherManufacturing" in entry:
return "Other Manufacturing"
if "OtherRetailTrade" in entry:
return "Other Retail Trade"
if "PublicAdministration" in entry:
return "Public Administration"
if "Restaurants" in entry:
return "Restaurants"
if "ServicesManagementAdminSupportSocial" in entry:
return "Services Management Administration Support Social"
if "ServicesProfessionalTechFinancial" in entry:
return "Services Professional Technical Financial"
if "ServicesRepairPersonal" in entry:
return "Services Repair Personal" |
def authorize_header(auth_token) -> str:
"""create a properly formatted `authorization` header for Pomerium
Args:
auth_token: string format service account credentials
"""
return 'Pomerium ' + auth_token |
def create_metadata(
input_name_list,
input_type_list,
input_shape_list,
output_name_list,
output_type_list,
output_shape_list,
model_input_list=None,
model_output_list=None,
custom_meta_dict=None,
):
"""
Facilitates creation of a metadata dictionary for model packaging (MAR). The method renders user-supplied
information compliant with standard expected format for the metadata.
It has to be noted that the format of metadata is completely up the user. The only reqirement is that metadata
should be always supplied as a json-serializable dictionary.
This method makes metadata more standard by capturing information about model inputs and outputs in fields
that are conventionally used and accepted across Eisen ecosystem. That is, this method implements a convention
about the format of metadata
:param input_name_list: A list of strings representing model input names Eg. ['input'] for single-input model
:type input_name_list: list
:param input_type_list: A list of strings for input types Eg. ['ndarray'] matching exp. type for 'input'
:type input_type_list: list
:param input_shape_list: A list of shapes (list) representing expected input shape Eg. [[-1, 3, 244, 244]]
:type input_shape_list: list
:param output_name_list: List of strings representing model output names Eg. ['logits', 'prediction']
:type output_name_list: list
:param output_type_list: List of strings representing model output types Eg. ['ndarray', 'str']
:type output_type_list: list
:param output_shape_list: List of shapes (list) for output shape Eg. [[-1, 10], [-1]]
:type output_shape_list: list
:param model_input_list: List of input names that should be used as model inputs (default all input_name_list)
:type model_input_list: list
:param model_output_list: List of output names that should be obtained from the model (default all output_name_list)
:type model_output_list: list
:param custom_meta_dict: A json-serializable dictionary containing custom information (Eg. options or notes)
:type custom_meta_dict: dict
:return: Dictionary containing metadata in standardized format
"""
if model_input_list is None:
model_input_list = input_name_list
if model_output_list is None:
model_output_list = output_name_list
metadata = {
'inputs': [],
'outputs': [],
'model_input_list': model_input_list,
'model_output_list': model_output_list,
'custom': {}
}
if custom_meta_dict is None:
custom_meta_dict = {}
assert len(input_name_list) == len(input_type_list) == len(input_shape_list)
assert len(output_name_list) == len(output_type_list) == len(output_shape_list)
for name, typ, shape in zip(input_name_list, input_type_list, input_shape_list):
metadata['inputs'].append({
'name': name,
'type': typ,
'shape': shape
})
for name, typ, shape in zip(output_name_list, output_type_list, output_shape_list):
metadata['outputs'].append({
'name': name,
'type': typ,
'shape': shape
})
metadata['custom'] = custom_meta_dict
return metadata |
def calc_points_hit(num_transfers, free_transfers):
"""
Current rules say we lose 4 points for every transfer beyond
the number of free transfers we have.
Num transfers can be an integer, or "W", "F", "Bx", or "Tx"
(wildcard, free hit, bench-boost or triple-caption).
For Bx and Tx the "x" corresponds to the number of transfers
in addition to the chip being played.
"""
if num_transfers in ["W", "F"]:
return 0
elif isinstance(num_transfers, int):
return max(0, 4 * (num_transfers - free_transfers))
elif (num_transfers.startswith("B") or num_transfers.startswith("T")) and len(
num_transfers
) == 2:
num_transfers = int(num_transfers[-1])
return max(0, 4 * (num_transfers - free_transfers))
else:
raise RuntimeError(
"Unexpected argument for num_transfers {}".format(num_transfers)
) |
def numberofdupes(string, idx):
"""return the number of times in a row the letter at index idx is duplicated"""
# "abccdefgh", 2 returns 1
initial_idx = idx
last = string[idx]
while idx+1 < len(string) and string[idx+1] == last:
idx += 1
return idx-initial_idx |
def merge_values(list1, list2):
"""Merge two selection value lists and dedup.
All selection values should be simple value types.
"""
tmp = list1[:]
if not tmp:
return list2
else:
tmp.extend(list2)
return list(set(tmp)) |
def filter_fields(header, data, fields):
"""Filter (header, data) with selected header fields.
Args:
header ([str]): The header.
data ([[float]]): The data, with the same number of columns as header.
fields ([str]): The fields that need to be written.
Returns:
(header ([str]), data ([[float]]))
Examples:
>>> filter_fields(['a','b','c'], [[0,1,2], [3,4,5]], ['d','b','a'])
(['b', 'a'], [[1, 0], [4, 3]])
"""
picked = []
for f in fields:
try:
picked.append(header.index(f))
except ValueError:
# OK to have missing values
pass
h = [header[i] for i in picked]
d = []
for e in data:
d.append([e[i] for i in picked])
return (h, d) |
def calculate(below):
"""Returns the sum of all the multiples of 3 or 5 below the specified number"""
answer = sum(x for x in range(below) if (x % 3 == 0 or x % 5 == 0))
answer = str(answer)
return answer |
def huber_loss(r, delta):
"""Huber loss function, refer to wiki https://en.wikipedia.org/wiki/Huber_loss"""
return (abs(r) <= delta) * r ** 2 / 2 + (abs(r) > delta) * delta * (abs(r) - delta / 2) |
def createJDL(jooID, directory, jobCE):
"""
_createJDL_
Create a simple JDL string list
"""
jdl = []
jdl.append("universe = globus\n")
jdl.append("should_transfer_executable = TRUE\n")
jdl.append("notification = NEVER\n")
jdl.append("Executable = %s/submit.sh\n" % (directory))
jdl.append("Output = condor.$(Cluster).$(Process).out\n")
jdl.append("Error = condor.$(Cluster).$(Process).err\n")
jdl.append("Log = condor.$(Cluster).$(Process).log\n")
jdl.append("initialdir = %s\n" % directory)
jdl.append("globusscheduler = %s\n" % (jobCE))
jdl.append("+WMAgent_JobID = %s\n" % jooID)
jdl.append("+WMAgent_AgentName = testAgent\n")
jdl.append("Queue 1")
return jdl |
def safestr(value):
"""
Turns ``None`` into the string "<None>".
:param str value: The value to safely stringify.
:returns: The stringified version of ``value``.
"""
return value or '<None>' |
def convert_mac_colon_to_dot_format(mac_addr):
"""
Convert mac address in colon format to dot format
For e.g convert aa:bb:cc:dd:ee:ff to aabb.ccdd.eeff
Args(str):
mac address in colon format
Returns(str):
mac address in dot format
"""
mac = mac_addr.split(":")
mac_addr_dot = "".join(mac[0:2]) + "." + "".join(mac[2:4]) + "." + "".join(mac[4:6])
return mac_addr_dot |
def humanize_bytes(num, suffix='B'):
""" Via # https://stackoverflow.com/a/1094933"""
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix) |
def func_xy_ab_kwargs(x, y, a=2, b=3, **kwargs):
"""func.
Parameters
----------
x, y: float
a, b: int
kwargs: dict
Returns
-------
x, y: float
a, b: int
kwargs: dict
"""
return x, y, a, b, None, None, None, kwargs |
def filter_more_characters(words, anagram):
""" Filters the words which contain a symbol more times than it is present
in the anagram.
"""
ret = []
append_flag = True
for word in words:
word_chars = list(word)
anagram_chars = list(anagram)
word_chars_set = set(word)
for char in word_chars_set:
if word_chars.count(char) > anagram_chars.count(char):
append_flag = False
break
if append_flag:
ret.append(word)
append_flag = True
return ret |
def _find_all_pairs(list):
""" returns all pairs created from the given list. (a, b) is not the same as (b, a). """
pairs = set();
for value_a in list:
for value_b in list:
pairs.add((value_a, value_b))
return pairs |
def mul(array):
"""
Return the product of all element of the array
"""
res = 1
for ele in array:
res *= ele
return res |
def compute_cos2phi(dxs, dys, square_radius):
"""Compuet cos 2 phi."""
return (dxs - dys) / square_radius |
def get_clarifai_tags(clarifai_response, probability):
"""Get the response from the Clarifai API and return results filtered by
concepts with a confidence set by probability parameter (default 50%)"""
results = []
concepts = []
# Parse response for Color model
try:
concepts = [
{concept.get('w3c', {}).get('name').lower(): concept.get('value')}
for concept in clarifai_response['data']['colors']
]
except KeyError:
pass
# Parse response for Celebrity and Demographics models
try:
for value in clarifai_response['data']['regions']:
for face in value['data']['face'].values():
concepts.extend(
[
{concept.get('name').lower(): concept.get('value')}
for concept in face['concepts']
]
)
except KeyError:
pass
# Parse response for Logo model
try:
concepts = [
{concept.get('name').lower(): concept.get('value')}
for concept in
clarifai_response['data']['regions'][0]['data']['concepts']
]
except KeyError:
pass
# Parse response for General model and similarly structured responses
try:
concepts = [
{concept.get('name').lower(): concept.get('value')}
for concept in clarifai_response['data']['concepts']
]
except KeyError:
pass
# Parse response for Video input
try:
for frame in clarifai_response['data']['frames']:
concepts.extend(
[
{concept.get('name').lower(): concept.get('value')}
for concept in frame['data']['concepts']
]
)
except KeyError:
pass
# Filter concepts based on probability threshold
for concept in concepts:
if float([x for x in concept.values()][0]) > probability:
results.append(str([x for x in concept.keys()][0]))
return results |
def V_tank_Reflux(Reflux_mass, tau, rho_Reflux_20, dzeta_reserve):
"""
Calculates the tank for waste.
Parameters
----------
Reflux_mass : float
The mass flowrate of Reflux, [kg/s]
tau : float
The time, [s]
rho_Reflux_20 : float
The destiny of waste for 20 degrees celcium, [kg/m**3]
dzeta_reserve : float
The coefificent of reserve, [dismensionless]
Returns
-------
V_tank_Reflux : float
The tank for Reflux, [m**3]
References
----------
&&&&&&&&&&&&
"""
return Reflux_mass * tau * dzeta_reserve / rho_Reflux_20 |
def build_obj_ref_list(objects):
"""
:param objects: Python list of requested objects.
:returns: Tcl list of all requested objects references.
"""
return ' '.join([o.ref for o in objects]) |
def is_prime(n):
"""
Very slow implementation
"""
for i in range(2, n):
if n % i == 0:
return False
return True |
def get_all_refs(schema):
"""Get all ref links in a schema.
Traverses a schema and extracts all relative ref links from the schema,
returning a set containing the results.
Parameters:
schema: An OAS schema in the form of nested dicts to be traversed.
Returns:
set: All of the ref links found during traversal.
"""
all_refs = set()
if type(schema) is dict:
for key, val in schema.items():
if key == "$ref" and type(val) is str:
all_refs.add(val)
all_refs.update(get_all_refs(val))
elif type(schema) is list:
for item in schema:
all_refs.update(get_all_refs(item))
return all_refs |
def max_gini(n, ys):
"""Calculates the normalisation coefficient for the generalised Gini index.
Parameters
----------
n : int
The number of agents in the simulation.
ys : list of int
The agents' cumulative utilities.
Returns
-------
int
The normalisation coefficient for the generalised Gini index.
"""
total = 0
aggregate = 0
k_p1 = 0
for i, y in enumerate(ys):
if total + y > 0:
k_p1 = i + 1
break
aggregate += (i+1) * y
total += y
return 1 + 2*(aggregate - k_p1*total)/n |
def ElfHash(name):
"""Compute the ELF hash of a given input string."""
h = 0
for c in name:
h = (h << 4) + ord(c)
g = h & 0xf0000000
h ^= g
h ^= g >> 24
return h & 0xffffffff |
def group_arg_and_key(parameter_arg_and_keys):
"""
Group argnames based on key
:param parameter_arg_and_keys: [{"name": <arg_name>, "key": <arg_key>}]
:return: {"<arg_key>": [<arg_name1>, <arg_name2>]}
"""
keys_argnames_dict = {}
for parameter_arg_and_key in parameter_arg_and_keys:
typ = parameter_arg_and_key["key"]
name = parameter_arg_and_key["name"]
if typ in keys_argnames_dict:
keys_argnames_dict[typ].append(name)
else:
keys_argnames_dict[typ] = [name]
return keys_argnames_dict |
def dictFromTokenList(paramlist):
"""Return a dictionary formed from the terms in paramlist.
paramlist is a sequence of items in the form
parametername, =, value.
If there are duplicates, the last one wins."""
ret = {}
msg = "Ill-formed parameter list: keywords and values must have the form kw = value"
if not len(paramlist) % 3 == 0:
raise ValueError(msg)
for i in range(0, len(paramlist), 3):
if not paramlist[i+1] == "=":
raise ValueError(msg)
ret[paramlist[i]] = paramlist[i+2]
return ret |
def longest (s1, s2, s3):
"""Longest of three strings.
You may assume that the longest string is unique.
(What would you do if it wasn't?)
Params:
s1 (string)
s2 (string)
s3 (string)
Returns: (string) longest string
"""
# INSERT YOUR CODE HERE, replacing 'pass'
s1 = ''
s2 = ''
s3 = ''
if s2[:] > s1[:]:
return s2
else:
return s3 |
def clean_dict(target, remove=None):
"""Recursively remove items matching a value 'remove' from the dictionary
:type target: dict
"""
if type(target) is not dict:
raise ValueError("Target is required to be a dict")
remove_keys = []
for key in target.keys():
if type(target[key]) is not dict:
if target[key] == remove:
remove_keys.append(key)
else:
clean_dict(target[key], remove)
for key in remove_keys:
target.pop(key)
return target |
def apply_from_data(act_fun, d):
"""
Computes the effect (substitution) of an action.
:param act_fun: process action (a dictionary mapping variables to expressions)
:param d: a data space
:return: a substitution
"""
return dict([(x, f(d)) for (x,f) in act_fun.items()]) |
def flag_warning(voltage, check):
"""Set flag for whether voltage threshold has been exceeded
Args:
voltage (float): float value to be checked
Returns:
bool: bool of threshold check
"""
if (voltage < 299.99 and voltage > -299.99) and check:
return True
return False |
def ubtou(str_in):
""" Shorthand for converting unicode bytes to UTF-8 """
if not isinstance(str_in, bytes):
return str_in
return str_in.decode('utf-8') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.