content stringlengths 42 6.51k |
|---|
def cast_to_number_or_bool(inputstr):
"""Cast a string to int, float or bool. Return original string if it can't be
converted.
Scientific expression is converted into float.
"""
if inputstr.strip().lower() == 'true':
return True
elif inputstr.strip().lower() == 'false':
return False
try:
return int(inputstr)
except ValueError:
try:
return float(inputstr)
except ValueError:
return inputstr |
def str_evra(instance):
"""
Convert evra dict entries to a string.
"""
if instance.get('epoch', '*') == '*' or instance.get('epoch', '*') == None:
return '%s-%s.%s' % (instance.get('version', '*'), instance.get('release', '*'),
instance.get('arch', '*'))
else:
return '%s:%s-%s.%s' % (instance.get('epoch', '*'), instance.get('version', '*'),
instance.get('release', '*'), instance.get('arch', '*')) |
def sillyText(word, num):
"""
Purpose: To duplicate each character in a string
Parameters: word - the string in question, num - the number of duplications
Return: word - the duplicated word
"""
if len(word) == 0:
return ""
thing = word[0]
word = word[1:]
word = sillyText(word, num)
word = thing * 2 + word
return word |
def concat_sequence(sequences):
"""Concatenate sequences of tokens into a single flattened list of tokens.
Parameters
----------
sequences : list of list of object
Sequences of tokens, each of which is an iterable of tokens.
Returns
-------
Flattened list of tokens.
"""
return [token for seq in sequences for token in seq if token] |
def get_insert_many_query(table_name: str) -> str:
"""Build a SQL query to insert several RDF triples into a MVCC-PostgreSQL table.
Argument: Name of the SQL table in which the triples will be inserted.
Returns: A prepared SQL query that can be executed with a list of tuples (subject, predicate, object).
"""
return f"INSERT INTO {table_name} (subject, predicate, object, insert_t, delete_t) VALUES %s ON CONFLICT DO NOTHING" |
def dict_to_string(some_dict):
"""
Takes the {"A":some_value, "B":some_other_value}
to
"Asome_valueBsome_other_value".
This can then be used as a key for a CPT table.
"""
result = ""
sorted_keys = sorted(some_dict, key=lambda key: key)
new_dict = {key: some_dict[key] for key in sorted_keys}
for key in new_dict:
result += str(key) + str(new_dict[key])
return result |
def quant_to_qual_lst(lst):
"""Quantitative List to Qualitative List
Purpose: convert counts of >0 to YES"""
qual_lst = []
for i in lst:
if (i == 0):
qual_lst.append("NO")
else:
qual_lst.append("YES")
return qual_lst |
def determine_flexibility(histogram):
"""
Returns the simple estimation of torsion flexibility by counting the
number of non-zero bins in the torsional histogram.
:type histogram: list(int)
"""
nr_non_zero_bins = sum([1 for x in histogram if x > 0]) * 2
return nr_non_zero_bins |
def poly_old(*args):
"""
f(x) = a * x + b * x**2
*args = (x, a, b)
"""
# Unpack arguments
x = args[0]
a = args[1]
b = args[2]
print(args)
fx = a*x + b*(x**2)
return fx |
def isint(integer):
"""Checks if something is an integer."""
try:
int(integer)
return True
except ValueError:
return False |
def format_codepoint(codepoint):
"""Format a codepoint (integer) to a USV (at least 4 hex digits)"""
usv = ''
if codepoint:
usv = f'{codepoint:04X}'
return usv |
def _reencode_string(string):
"""Ensure that the string is encodable into JSON."""
if isinstance(string, bytes):
return string.decode("utf-8", errors="__sqreen_ascii_to_hex")
return string |
def _collate_bytes(msb, lsb):
"""
Helper function for our helper functions.
Collates msb and lsb into one 16-bit value.
:type msb: str
:param msb: Single byte (most significant).
:type lsb: str
:param lsb: Single byte (least significant).
:return: msb and lsb all together in one 16 bit value.
"""
return (ord(msb) << 8) + ord(lsb) |
def _get_horizontalalignment(angle, location, side, is_vertical, is_flipped_x,
is_flipped_y):
"""Return horizontal alignment of a text.
Parameters
----------
angle : {0, 90, -90}
location : {'first', 'last', 'inner', 'outer'}
side : {'first', 'last'}
is_vertical : bool
is_flipped_x : bool
is_flipped_y : bool
Returns
-------
{'left', 'right'}
"""
if is_vertical:
if angle == 0:
if is_flipped_x:
if (location == "first"
or (side == "first" and location == "outer")
or (side == "last" and location == "inner")):
return "left"
else:
return "right"
else:
if (location == "last"
or (side == "first" and location == "inner")
or (side == "last" and location == "outer")):
return "left"
else:
return "right"
elif angle == 90:
if is_flipped_y:
return "right"
else:
return "left"
elif angle == -90:
if is_flipped_y:
return "left"
else:
return "right"
else:
if angle == 0:
if is_flipped_x:
return "right"
else:
return "left"
elif angle == 90:
if is_flipped_y:
if (location == "first"
or (side == "first" and location == "outer")
or (side == "last" and location == "inner")):
return "left"
else:
return "right"
else:
if (location == "last"
or (side == "first" and location == "inner")
or (side == "last" and location == "outer")):
return "left"
else:
return "right"
elif angle == -90:
if is_flipped_y:
if (location == "last"
or (side == "first" and location == "inner")
or (side == "last" and location == "outer")):
return "left"
else:
return "right"
else:
if (location == "first"
or (side == "first" and location == "outer")
or (side == "last" and location == "inner")):
return "left"
else:
return "right" |
def sanitize_eta(eta, tol=1.e-10, exception='error'):
"""
If 'eta' is slightly outside the physically allowed range for
symmetric mass ratio, push it back in. If 'eta' is further
outside the physically allowed range, throw an error
or return a special value.
Explicitly:
- If 'eta' is in [tol, 0.25], return eta.
- If 'eta' is in [0, tol], return tol.
- If 'eta' in is (0.25, 0.25+tol], return 0.25
- If 'eta' < 0 OR eta > 0.25+tol,
- if exception=='error' raise a ValueError
- if exception is anything else, return exception
"""
MIN = 0.
MAX = 0.25
if eta < MIN or eta > MAX+tol:
if exception=='error':
raise ValueError("Value of eta outside the physicaly-allowed range of symmetric mass ratio.")
else:
return exception
elif eta < tol:
return tol
elif eta > MAX:
return MAX
else:
return eta |
def dazzler_get_nblocks(db_stream):
"""Return #blocks in dazzler-db.
"""
nblock = 1
new_db = True
for l in db_stream:
l = l.strip().split()
if l[0] == "blocks" and l[1] == "=":
nblock = int(l[2])
new_db = False
break
return nblock |
def format_auto_pi_set(pi_set):
"""
Parameters
----------
pi_set Pi set in the automatic buckingham DataTable format
Returns Pi set in the Simple Buckingham format
-------
"""
spt = pi_set.split('|')
f_pi_set = ""
for ps in spt:
ps = ps.strip()
ps = ps.replace('=', ' = ')
f_pi_set += ps + "\n"
return f_pi_set |
def serialize_yesno(value):
"""
Serialize a boolean (yes or no) value.
"""
return str(1 if value else 0) |
def largest_rectangle_area(heights):
""" return largest rectangle in histogram """
stack = [-1]
max_area = 0
for i in range(len(heights)):
# we are saving indexes in stack that is why we comparing last element in stack
# with current height to check if last element in stack not bigger then
# current element
while stack[-1] != -1 and heights[stack[-1]] >= heights[i]:
lastElementIndex = stack.pop()
max_area = max(max_area, heights[lastElementIndex] * (i - stack[-1] - 1))
stack.append(i)
# we went through all elements of heights array
# let's check if we have something left in stack
while stack[-1] != -1:
lastElementIndex = stack.pop()
max_area = max(max_area, heights[lastElementIndex] * (len(heights) - stack[-1] - 1))
return max_area |
def is_sorted(t: list) -> bool:
"""Takes a list and return True if the list is sorted, returns False otherwise."""
# lenght = len(t)
# for i in range(1, lenght):
# if t[i - 1] > t[i]:
# return False
# return True
return t == sorted(t) |
def _get_high_res_img_url(img_url):
""" Returns a modified url pointing to the high resolution version of
the image
>>> print(_get_high_res_img_url("https://images-na.ssl-images-amazon.com/\
images/I/513gErH1dML._AC_SX236_SY340_FMwebp_QL65_.jpg"))
https://images-na.ssl-images-amazon.com/\
images/I/513gErH1dML.jpg
>>> print(_get_high_res_img_url("https://images-na.ssl-images-amazon.com/\
images/I/51F48HFHq6L._AC_SX118_SY170_QL70_.jpg"))
https://images-na.ssl-images-amazon.com/\
images/I/51F48HFHq6L.jpg
"""
high_res_url = img_url.split("._")[0] + ".jpg"
return high_res_url |
def calculateGRC(LRC):
"""
Calculate GRC (Global Reaching Centrality)
"""
nodes=len(LRC)
max_LRC = max(LRC)
GRC = 0.0
for lrc in LRC: GRC+=(max_LRC-lrc)
GRC = GRC / (nodes -1)
return GRC |
def _ensure_echarts_is_in_the_front(dependencies):
"""
make sure echarts is the item in the list
require(['echarts'....], function(ec) {..}) need it to
be first but dependencies is a set so has no sequence
"""
if len(dependencies) > 1:
dependencies.remove("echarts")
dependencies = ["echarts"] + list(dependencies)
elif len(dependencies) == 1:
# make a new list
dependencies = list(dependencies)
else:
raise Exception("No js library found. Nothing works!")
return dependencies |
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
return [l[i:i + n]
for i in range(0, len(l), n)] |
def hashpjw(s):
"""A simple and reasonable string hash function due to Peter Weinberger."""
val = 0
for c in s:
val = (val << 4) + ord(c)
tmp = val & 0xf0000000
if tmp != 0:
val = val ^ (tmp >> 24)
val = val ^ tmp
return val |
def calculate_overlap(word_token1: list, word_token2: list) -> float:
""" Get similarity percentage from usage of similar words in two strings """
overlapping_words = []
for word in word_token1:
if word in word_token2:
overlapping_words.append(word)
overlap_percentage = len(overlapping_words) / len(word_token1) * 100
return round(overlap_percentage, 3) |
def regularize_filename(f):
"""
regularize filename so that it's valid on windows
"""
invalids = r'< > : " / \ | ? *'.split(" ")
# invalids = r'<>:"/\\\|\?\*'
out = str(f)
for i in invalids:
out = out.replace(i, "_")
return out |
def get_default_lines():
"""Default Lines for testing"""
default_lines = []
default_lines.append("This is a test line")
default_lines.append("This is another test line")
default_lines.append("Look here, another test line")
default_lines.append("Probably should have another test line that ends differently")
default_lines.append("You want it to be one way")
default_lines.append("But it's the other way")
default_lines.append("I need more test lines")
default_lines.append("Need to increase randomness")
default_lines.append("Who knows how to do that")
default_lines.append("Maybe some way to only chooses \'starter\' words")
default_lines.append("Like the first word in a line")
default_lines.append("I could potentially get into a circular line with this implementation")
return default_lines |
def decode_inpt(inpt_str):
"""
Converts given chess piece string from form "a1" to integers row, col
:param inpt_str: string of length 2
:return: integers row, col
"""
letter, num = list(inpt_str)
row = int(num) - 1
col = ord(letter) - ord("a")
return row, col |
def is_camel_case_with_acronyms(s: str):
"""
Checks if the string passed is Camel Case (with capitalised acronyms allowed).
:param s: string to check
:return: true if the name looks cool as Class name.
"""
return s != s.lower() and s != s.upper() and "_" not in s and s[0].upper() == s[0] |
def get_distances(sequence):
"""
Get distance between two occurrences of each unique element in sequence
Returns 0 if it only occurs once, returns the distance between the last two occurrences if element occurs more
than twice 0 if only occurs once
:param sequence: list
:return: dictionary with (element, distance) as key, value pairs
"""
distances = dict((s, {}) for s in set(sequence))
for i in range(len(sequence)):
distances[sequence[i]]["distance"] = i - distances[sequence[i]].get("last index", i)
distances[sequence[i]]["last index"] = i
return {key: value["distance"] for (key, value) in distances.items()} |
def string_handler(item):
"""
Create a string out of an item if isn't it already.
Parameters:
- item: The variable to make sure it is a string.
Returns:
The input as a string.
"""
return (
str(item)
if not isinstance(item, str) else
item
) |
def permutation_sign(p):
"""Determines the sign of a permutation, given as a sequence of integers."""
q = list(p) # Copy to list.
sign = 1
for n in range(len(p)):
while n != q[n]:
qn = q[n]
q[n], q[qn] = q[qn], q[n] # Flip to make q[qn] = qn.
sign = -sign
return sign |
def get_darwin_os_drive(lr_drive):
"""
Function: Gets the mount point for volume Label LR
:param lr_drive:
:return mount_point:
"""
mount_point = "\mnt\lr"
print("DEBUG: Getting OSX LR Drive", lr_drive)
return mount_point |
def bind_twos_complement(val):
"""
does over/underflows for 32 bit two's complement numbers
:param val:
:return:
"""
if val < -2147483648:
return val + 4294967296
elif val > 2147483647:
return val - 4294967296
return val |
def translate_values(iterable, key, func):
"""Run key values through func and plug that
value in
"""
_iterable = list()
for item in iterable:
_item = dict(item)
if key in _item:
_item[key] = func(_item[key])
_iterable.append(_item)
return _iterable |
def option_to_clblas(x):
"""As above, but for clBLAS data-types"""
return {
'layout': "clblasOrder",
'a_transpose': "clblasTranspose",
'b_transpose': "clblasTranspose",
'ab_transpose': "clblasTranspose",
'side': "clblasSide",
'triangle': "clblasUplo",
'diagonal': "clblasDiag",
}[x] |
def coord_to_name(coord):
"""
Takes a 2d-array coordinate and converts it to a chess board square name, e.g. (0,0) to 'a8' and (4,7) to 'h4'
int row: value between 0 and 7
int col: value between 0 and 7
returns string name: the name of a square, e.g. 'a1' or 'e4'
"""
(row, col) = coord
num_to_rank = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
rank = str(8 - row)
ffile = num_to_rank[col]
return ffile + rank |
def WalkGyp(functor, gypdata):
"""Walk |gypdata| and call |functor| on each node."""
def WalkNode(node):
ret = []
if isinstance(node, dict):
for k, v in node.items():
ret += functor(k, v)
ret += WalkNode(v)
elif isinstance(node, (tuple, list)):
for v in node:
ret += WalkNode(v)
return ret
return WalkNode(gypdata) |
def get_constant_attrs(obj: object) -> tuple:
"""Get the constant attributes of an object. (Uppercase attrs)"""
return tuple(a for a in dir(obj) if a.isupper()) |
def zip_longest_middle(list1, list2, fillvalue=None):
"""Zip longest but spread in the middle."""
len1 = len(list1)
len2 = len(list2)
if len1 == len2:
out1 = zip(list1, list2)
elif len2 > len1:
tmp = [fillvalue] * (len2 - len1)
_ = (len1 + 1) // 2
out1 = list1[: (len1 + 1) // 2] + tmp + list1[_:]
out1 = zip(out1, list2)
else:
tmp = [fillvalue] * (len1 - len2)
_ = (len2 + 1) // 2
out1 = list2[: (len2 + 1) // 2] + tmp + list2[_:]
out1 = zip(list1, out1)
out = []
for elm in out1:
# out += list(elm)
# out += elm # list of numbers
out.append(elm) # list of tuples
return out |
def twocs_to_int(val, bits):
"""compute the int value of a two compliment int"""
assert len(bin(val)) - 2 <= bits, (val, bits) # check if it fits
if (val & (1 << (bits - 1))) != 0: # if sign bit is set -> 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val |
def isfloat(s):
"""
Checks whether the string ``s`` represents a float.
:param s: the candidate string to test
:type s: ``str``
:return: True if s is the string representation of a number
:rtype: ``bool``
"""
try:
x = float(s)
return True
except:
return False |
def ext_checker(fname, ext):
"""Replaces the extension of fname with ext, or adds ext to fname
if fname doesn't already have an extension."""
ext_begin = -len(fname)
for ind in range(1, len(fname) + 1):
if fname[-ind] == ".":
ext_begin = ind
break
return fname[:-ext_begin] + "." + ext |
def get_greatest_common_arch(archs):
"""Get the RISC-V ISA string which contains as many extensions as are supported
by all harts in the design"""
if len(archs) == 1:
return archs[0]
# Get all ISA extensions implemented by any hart
extensions = ''.join(set(''.join([arch[4:] for arch in archs])))
# Get a list of any extensions which aren't supported by all harts
disallowed_extensions = ""
for extension in extensions:
if not all([extension in arch[4:] for arch in archs]):
disallowed_extensions += extension
# Get the longest arch from the list
arch = max(archs, key=len)
# Filter out any disallowed extensions
for extension in disallowed_extensions:
base = arch[:4]
extensions = arch[4:].replace(extension, "")
arch = base + extensions
return arch |
def format_role_order(roles):
"""Given roles, returns them in the format: role_arn,principal_arn.
The format of the attribute value should be role_arn,principal_arn
but lots of blogs list it as principal_arn,role_arn so let's reverse
them if needed.
Args:
roles: List of roles.
Returns:
List of roles in the format: role_arn,principal_arn
"""
for role in roles:
chunks = role.split(',')
if 'saml-provider' in chunks[0]:
_role = chunks[1] + ',' + chunks[0]
index = roles.index(role)
roles.insert(index, _role)
roles.remove(role)
return roles |
def ignore_exception(exc):
"""Used with @retrying.retry to ignore exceptions in a retry loop.
ex. @retrying.retry( retry_on_exception=ignore_exception)
It does verify that the object passed is an exception
"""
return isinstance(exc, Exception) |
def get_trajectory(fnc, x0, steps):
"""
First, a function to calculate the trajectory. Takes a function,
a starting x-value, and a number of steps. Returns a pair of lists:
(time, trajectory).
"""
step = 0
xx = x0
time = list(range(steps+1))
trajectory = [x0]
while step < steps:
step += 1
xx = fnc(xx)
trajectory.append(xx)
return (time, trajectory) |
def make_integer_cut(resultDict, outputfname):
""" This generates gams readable file for integer cut using the resultSet data"""
fid = open(outputfname, 'w+')
for ind, res in sorted(resultDict.items()):
if "pathway" in res:
if "num_reaction" not in res:
#if pathway is incomplete
continue
for rid in sorted(res['pathway'].keys()):
fid.write("'%d'.'%s' 1\n"%(ind, rid))
fid.write("\n")
fid.close()
return 1 |
def midi_note_to_frequency(note):
""" Maps a MIDI note index to a frequency. """
if note is None:
return None
return 440.0 * pow(2, (note - 69.0) / 12) |
def error404(e):
"""
404 error handler.
"""
return ''.join([
'<html><body>',
'<h1>D20 - Page Not Found</h1>',
'<p>The only endpoint available on this entropy micro-service is <a href="/api/entropy">/api/entropy</a>.</p>',
'<p>For more information including the complete source code, visit <a href="https://github.com/AgalmicVentures/D20">the D20 repository</a>.</p>',
'</body></html>',
]), 404 |
def sort_arcs(arcs):
"""
Parameters
----------
arcs: list[(int, int)] or list[(int, int, str)]
Returns
-------
list[(int, int)] or list[(int, int, str)]
"""
return sorted(arcs, key=lambda x: x[1]) |
def decode(state):
"""
decodes the taxi env state (int in [0,500) ) to:
(taxirow, taxicol, passloc, destidx) tuple
:param state: int in [0,500)
:return: (taxirow, taxicol, passloc, destidx) tuple
"""
destidx = state % 4
state = state // 4
passloc = state % 5
state = state // 5
taxicol = state % 5
state = state // 5
taxirow = state
assert 0 <= state < 5
return taxirow, taxicol, passloc, destidx |
def fatorial(num=0,show=False):
"""
-> Calcula o fatorial de um numero
:num: fatorial a ser calculado
:show: se False, nao imprime calculo, se True, imprime calculo
:return: retorna o fatorial calculado
"""
fat = 1
for i in range(num,0,-1):
if show:
print(i,end='')
if i > 1 :
print(" X ",end='',)
else:
print(f' = ',end='')
fat *= i
return fat |
def flatten(tuple_entry):
"""
Given a tuple of tuples and objects, flatten into one tuple of objects
@param {Tuple} tuple_entry Tuple containing possibly nested tuples
@return {Tuple} One dimensional tuple of objects
"""
if len(tuple_entry) == 0:
return tuple_entry
if isinstance(tuple_entry[0], tuple):
return flatten(tuple_entry[0]) + flatten(tuple_entry[1:])
return tuple_entry[:1] + flatten(tuple_entry[1:]) |
def calc_angle_between_two_locs(lon1_deg, lat1_deg, lon2_deg, lat2_deg):
"""
This function reads in two coordinates (in degrees) on the surface of a
sphere and calculates the angle (in degrees) between them.
"""
# Import modules ...
import math
# Convert to radians ...
lon1_rad = math.radians(lon1_deg) # [rad]
lat1_rad = math.radians(lat1_deg) # [rad]
lon2_rad = math.radians(lon2_deg) # [rad]
lat2_rad = math.radians(lat2_deg) # [rad]
# Calculate angle in radians ...
distance_rad = 2.0 * math.asin(
math.hypot(
math.sin((lat1_rad - lat2_rad) / 2.0),
math.cos(lat1_rad) * math.cos(lat2_rad) * math.sin((lon1_rad - lon2_rad) / 2.0)
)
) # [rad]
# Return angle ...
return math.degrees(distance_rad) |
def get_module_name(name):
"""Get the name according to the config parser."""
return name.split('.')[0] |
def ensure_bytes(value):
"""Converts value to bytes.
Converts bytearray and str to bytes. Scanners may create
child files that are one of these types, this method is used on
every file object to ensure the file data is always bytes.
Args:
value: Value that needs conversion to bytes.
Returns:
A byte representation of value.
"""
if isinstance(value, bytearray):
return bytes(value)
elif isinstance(value, str):
return value.encode('utf-8')
return value |
def array_dim(arr):
"""Return the size of a multidimansional array.
"""
dim = []
while True:
try:
dim.append(len(arr))
arr = arr[0]
except TypeError:
return dim |
def url_add_api_key(url_dict: dict, api_key: str) -> str:
"""Attaches the api key to a given url
Args:
url_dict: Dict with the request url and it's relevant metadata.
api_key: User's API key provided by US Census.
Returns:
URL with attached API key information.
"""
return url_dict['url']+f'&key={api_key}' |
def int_to_bitstring(n: int, desired_len=None) -> str:
"""Converts an integer into a bitstring (unicode representation) of desired length. MSB leftmost, LSB rightmost"""
bitstring = bin(n)[2:]
if desired_len:
current_len = len(bitstring)
assert current_len <= desired_len, 'Desired length too short and would result in cut-off'
front = '0' * (desired_len - current_len)
return front + bitstring
return bitstring |
def encode_url(url):
"""encode url for shell to open
:rtype: str
"""
delims = {" ": "%20",
"&": "%26",
"{": "%7B",
"}": "%7D"}
for key, val in delims.items():
url = url.replace(key, val)
return url |
def isBefore(dateA, dateB):
"""returns true if date a is before date b or if a and b are the same day"""
if dateA[0] < dateB[0]: #year
return True
elif dateA[0] > dateB[0]:
return False
elif dateA[1] < dateB[1]: #month
return True
elif dateA[1] > dateB[1]:
return False
elif dateA[2] <= dateB[2]: #day
return True
return False |
def reducer(acc, elem):
""" Sumof the same elements """
return acc + int(elem[0]) if elem[0] == elem[1] else acc |
def print_board_number ( board_number: tuple) -> str:
"""prints the number and an asterisk if that number has been found"""
if board_number[1]:
return f"{board_number[0]:2}*"
else:
return f"{board_number[0]:2} " |
def custom_project_directory(answer):
"""Setup custom project directory - enables prompt for path definition"""
return not answer['rootdir'] |
def extract_event(event):
"""
'path': '/inventory',
'httpMethod': 'GET',
'queryStringParameters': {'item_id': '00000001'},
"""
return dict(
path=event['path'],
http_method=event['httpMethod'],
item_id=event['queryStringParameters']['item_id']
) |
def ang_mesh(frac,fineness,alevs = None):
"""\
Determine the number of points in the angular mesh based on
the fraction of the total radial grid index frac c (0,1).
You can optionally pass in the number of points for
the 5 different regions
"""
if not alevs:
ang_levels = [
[ 6, 14, 26, 26, 14], # Coarse
[ 50, 50,110, 50, 26], # Medium
[ 50,110,194,110, 50], # Fine
[194,194,194,194,194] # ultrafine
]
alevs = ang_levels[fineness]
nang = alevs[0]
if frac > 0.4: nang = alevs[1]
if frac > 0.5: nang = alevs[2]
if frac > 0.7: nang = alevs[3]
if frac > 0.8: nang = alevs[4]
return nang |
def make_cached_ssd_store_options(
cache_budget_mb,
persistent_path,
capacity=None,
size_factor=1,
physical_block_size=512,
host_cache_budget_mb=0,
):
"""make SSD use GPU and host as cache store_options param of MultiTableEmbedding. If cache_budget_mb > 0 and host_cache_budget_mb > 0, use GPU and host memory as multi-level cache.
Args:
cache_budget_mb (int): the MB budget of per GPU as cache.
persistent_path (str, list): persistent storage path of Embedding, must use fast SSD because of frequently random disk access during training. If passed a str, current rank Embedding will be saved in path/rank_id-num_ranks path. If passed a list, the list length must equals num_ranks, each elem of list represent the path of rank_id Embedding.
capacity (int): total capacity of Embedding
size_factor (int, optional): store size factor of embedding_dim, if SGD update, and momentum = 0, should be 1, if momentum > 0, it should be 2. if Adam, should be 3. Defaults to 1.
physical_block_size (int, optional): physical_block_size should be sector size. Defaults to 512.
host_cache_budget_mb (int): the MB budget of host memory as cache per rank. Defaults to 0.
Returns:
dict: SSD use GPU and host as cache store_options param of MultiTableEmbedding
For example:
.. code-block:: python
>>> import oneflow as flow
>>> store_options = flow.one_embedding.make_cached_ssd_store_options(
>>> cache_budget_mb=8192, persistent_path="/your_path_to_ssd", capacity=vocab_size,
>>> )
>>> # pass the store_options to the "store_options" param of flow.one_embedding.MultiTableEmbedding
>>> # ...
"""
assert isinstance(persistent_path, (str, list, tuple))
assert cache_budget_mb > 0 or host_cache_budget_mb > 0
if capacity is not None:
assert capacity > 0
else:
capacity = 0
cache_list = []
if cache_budget_mb > 0:
cache_list.append(
{
"policy": "lru",
"cache_memory_budget_mb": cache_budget_mb,
"value_memory_kind": "device",
}
)
if host_cache_budget_mb > 0:
cache_list.append(
{
"policy": "lru",
"cache_memory_budget_mb": host_cache_budget_mb,
"value_memory_kind": "host",
}
)
options = {
"kv_store": {
"caches": cache_list,
"persistent_table": {
"path": persistent_path,
"physical_block_size": physical_block_size,
"capacity_hint": int(capacity),
},
},
"size_factor": size_factor,
}
return options |
def show_bits(string: int, nbits: int = 16) -> str:
"""Return a string showing the occupations of the bitstring
Args:
string (int) - bit string
nbits (int) - the number of bits to show
"""
return str(bin(string)[2:].zfill(nbits)) |
def get_label_for_integer(integer):
"""
:param integer: int
int that represents label in dataset
:return: string
label
"""
labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '-', '/', '*', '(', ')']
return labels[integer] |
def validate_string(data):
"""
Method to validate data of type string
:params: user input
:response: True, False
"""
if not isinstance(data, str):
return False
return True |
def validate_int_arg(args, name, low=None, high=None):
"""Validate an integer command-line argument."""
value = args.get(name) if isinstance(args, dict) else args
if not value:
return 0
try:
intval = int(value)
except ValueError:
raise ValueError('invalid integer for {0}: {1}'.format(name, value))
if low is not None and intval < low:
raise ValueError('invalid value for {0}: minimum is {1}, got {2}'
.format(name, low, value))
if high is not None and intval > high:
raise ValueError('invalid value for {0}: maximum is {1}, got {2}'
.format(name, high, value))
return intval |
def get_name(pbs_file):
"""
Extract the CMIP5 short variable name from a benchmark data filename.
Parameters
----------
pbs_file : str
A filename that looks like other ILAMB data filenames.
Returns
-------
str
The name of the variable.
Notes
-----
File names are expected in the form:
.. code-block:: bash
nep.nc.MDP
gpp_0.5x0.5.nc.CSDMS
"""
parts = pbs_file.split('.')
name = parts[0].split('_')[0]
return name |
def lerp(pos_x, x0, x1, fx0, fx1):
""" integer linear interpolation """
return fx0 + (fx1 - fx0) * (pos_x - x0) // (x1 - x0) |
def is_int(s):
"""Is this string an int? Use for detecting array indexes in paths."""
try:
int(s)
return True
except ValueError:
return False |
def get_next_free_ind(d:dict):
""" given a dictionary of indices, get the next free index """
return max(d.values()) + 1 |
def clenshaw_curtis_rule_growth(level):
"""
The number of samples in the 1D Clenshaw-Curtis quadrature rule of a given
level.
Parameters
----------
level : integer
The level of the quadrature rule
Return
------
num_samples_1d : integer
The number of samples in the quadrature rule
"""
if level == 0:
return 1
else:
return 2**level+1 |
def validate_replicate_to(replicate_to):
"""
Property: DeploymentStrategy.ReplicateTo
"""
VALID_REPLICATION_DESTINATION = ("NONE", "SSM_DOCUMENT")
if replicate_to not in VALID_REPLICATION_DESTINATION:
raise ValueError(
"DeploymentStrategy ReplicateTo must be one of: %s"
% ", ".join(VALID_REPLICATION_DESTINATION)
)
return replicate_to |
def _interval(from_, to, interval, value, tolerance=1e-9):
"""clamp value to an interval between from_ and to range"""
if interval > (to - from_):
raise ValueError("Invalid increment")
if value < from_ or value > to:
raise ValueError("Invalid value")
if abs(value - from_) < tolerance or abs(value - to) < tolerance:
return value
quotient, remainder = divmod(value, interval)
if remainder < tolerance:
return quotient * interval
half_increment = interval / 2
if remainder > half_increment:
return interval * (quotient + 1)
else:
return interval * quotient |
def _get_chunk_bounds(arr_sizes, chunk_size):
"""Get regular chunks from multiple concatenated NumPy-like arrays."""
assert chunk_size > 0
b = []
n = 0
for arr_size in arr_sizes:
ch = list(range(n, n + arr_size + 1, chunk_size))
if b and ch and ch[0] == b[-1]:
ch = ch[1:]
b.extend(ch)
if b[-1] != n + arr_size:
b.append(n + arr_size)
n += arr_size
return b |
def recall(ground, found):
"""Fraction of ground-truth community members found."""
ground = set(ground)
return len(ground.intersection(found))/float(len(ground)) |
def cleanfieldlower(value):
"""
remove spaces and convert to lower case
so flag error
"""
if not value:
return None
value = str(value)
value = value.strip()
value = value.lower()
return value |
def get_blend_param(season_frac):
"""
When <season_frac> of the season is completed, this returns the fraction of the prediction to take from preseason
Derived empirically by maximizing performance at a variety of points in the season and fitting a power law
"""
return max(0, 1-season_frac)**2.6 |
def get_fun_name(fun):
"""
It's useless
"""
return str(fun).split()[1] |
def wildcard_target_ports(jobs):
"""Fetch list of wildcard target ports from a job list.
Args:
jobs: list of jobs to search for wildcard target ports.
Returns:
possibly empty list of wildcard target ports.
"""
ports = []
for job in jobs:
for static_config in job["static_configs"]:
for target in static_config["targets"]:
if target.startswith("*"):
ports.append(target.split(":")[-1])
return ports |
def collapse(values):
"""Collapse multiple values to a colon-separated list of values"""
if isinstance(values, str):
return values
if values is None:
return 'all'
if isinstance(values, list):
return ';'.join([collapse(v) for v in values])
return str(values) |
def name( path ):
"""Extracts the resource name."""
return path[1+path.rfind('/'):] |
def get_cost_by_distance(distance):
"""
Lookup cost by distance
{0: 0, 1: 1, 2: 3, 3: 6, 4: 10, 5: 15, 6: 21, 7: 28, 8: 36, 9: 45, 10: 55, 11: 66, 12: 78}
Example
<< 9
>> 45
"""
if distance < 2:
return distance
return get_cost_by_distance(distance - 1) + distance |
def huntingBehaviorVerification(behList, huntBehList, sigh):
"""
This function check if there is any behavior ID into the hunting section that doesn't exists in the sighting.
"""
l_error = []
for b in huntBehList:
if b not in behList:
l_error.append(["-", "BehaviorId in huntingQuery that does not exist in behavior section", b, sigh])
return l_error |
def _find(root, word):
"""Find the node after following the path in a trie given by {word}.
:arg dict root: Root of the trie.
:arg str word: A word.
:returns dict: The node if found, {} otherwise.
"""
node = root
for char in word:
if char not in node:
return {}
node = node[char]
return node |
def precision(reference, test):
"""
Given a set of reference values and a set of test values, return
the percentage of test values that appear in the reference set.
In particular, return |C{reference}S{cap}C{test}|/|C{test}|.
If C{test} is empty, then return C{None}.
@type reference: C{Set}
@param reference: A set of reference values.
@type test: C{Set}
@param test: A set of values to compare against the reference set.
@rtype: C{float} or C{None}
"""
if len(test) == 0:
return None
else:
return float(len(reference.intersection(test)))/len(test) |
def eh_posicao_marcada(pm):
""" Pretende descobrir se o argumento inserido e ou nao uma posicao marcada.
:param pm: Um int, posicao marcada. (-1 ou 1).
:return: Um bool, veracidade do argumento.
"""
if type(pm) != int or pm != 1 and pm != -1:
return False
return True |
def process_claim(claim):
"""Convert a claim row into a set of points"""
claim_number, details = [i.strip() for i in claim.split('@')]
# strip the leading #
claim_number = int(claim_number[1:])
coordinates, area = [i.strip() for i in details.split(':')]
column, row = [int(i) for i in coordinates.split(',')]
width, height = [int(i) for i in area.split('x')]
claims = set(
(x, y)
for x in range(row, row + height)
for y in range(column, column + width)
)
return claim_number, claims |
def _str_reduce(strings, pattern, h_pattern):
"""
Reduces the given strings to a regexp by extracting the
commong prefix and suffix
"""
prefix = ''
for ix in range(1, len(strings[0])):
prefix = strings[0][:ix]
matching = len(strings) == len([x for x in strings if x[:ix] == prefix])
if not matching:
prefix = strings[0][:ix-1]
break
suffix = ''
for ix in range(1, len(strings[0]) - len(prefix)):
suffix = strings[0][-ix:]
matching = len(strings) == len([x for x in strings if x[-ix:] == suffix])
if not matching:
if ix > 1:
suffix = strings[0][-(ix-1):]
else:
suffix = ''
break
return (u"%s%s%s" % (prefix, pattern, suffix),
u"%s%s%s" % (prefix, h_pattern, suffix)) |
def chunk(line,chunksize,delimiter=None):
"""Chop a string into 'chunks' no greater than a specified size
Given a string, return a list where each item is a substring
no longer than the specified 'chunksize'.
If delimiter is not None then the chunking will attempt to
chop the substrings on that delimiter. If a delimiter can't be
located (or not is specified) then the substrings will all
be of length 'chunksize'.
"""
chunks = []
# Loop over and chop up string until the remainder is shorter
# than chunksize
while len(line) > chunksize:
if delimiter is not None:
try:
# Locate nearest delimiter before the chunksize limit
i = line[:chunksize].rindex(' ')
except ValueError:
# Unable to locate delimiter so split on the chunksize
# limit
i = chunksize
else:
i = chunksize
chunks.append(line[:i])
line = line[i:]
# Append the remainder and return
chunks.append(line)
return chunks |
def hsl2rgb(h, s, l):
"""
HSL in range 0-1 to rgb in range 0-1 (H-hue, S-saturation, L-lightness)
"""
# see: http://geekymonkey.com/Programming/CSharp/RGB2HSL_HSL2RGB.htm
# default
RGB = [1.0, 1.0, 1.0]
if l <= 0.5:
v = l * (1.0 + s)
else:
v = l + s - l * s
if v > 0:
m = l + l - v
sv = (v - m ) / v
h *= 6.0
sextant = int(h)
fract = h - sextant
vsf = v * sv * fract
mid1 = m + vsf
mid2 = v - vsf
if sextant == 0:
RGB = [v,mid1,m]
elif sextant == 1:
RGB = [mid2,v,m]
elif sextant == 2:
RGB = [m,v,mid1]
elif sextant == 3:
RGB = [m,mid2,v]
elif sextant == 4:
RGB = [mid1,m,v]
elif sextant == 5:
RGB = [v,m,mid2]
return RGB |
def int2hex(number):
"""Returns a string representation of the number as hex.
"""
return "%04X" % number |
def flatten(xs):
"""Flatten a 2D list"""
return [x for y in xs for x in y] |
def flatten(list_of_lists):
"""flatten([[A]]) -> [A]
Flatten a list of lists.
"""
ret = []
for lst in list_of_lists:
if not isinstance(lst, list):
raise ValueError('%r is not a list' % lst)
ret.extend(lst)
return ret |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.