content stringlengths 42 6.51k |
|---|
def unshift_token(text):
"""Remove a token from the front of a string.
:param str text:
:returns: {'text': str, 'separator': str, 'remainder': str}
"""
if len(text) == 0:
return {'text': text, 'separator': '', 'remainder': ''}
token = ''
for i in range(0, len(text)):
char = text[i]
if (char == ' ' and (len(token) >= 1 and token[i - 1] == ' ')):
token += char
elif (char == ' ' and len(token) == 0):
token += char
elif char == ' ':
return {'text': token, 'separator': ' ', 'remainder': text[i + 1:]}
elif char == '\n':
return {
'text': token,
'separator': '\n',
'remainder': text[i + 1:],
}
elif (len(token) >= 1 and token[i - 1] == ' '):
return {
'text': token,
'separator': '',
'remainder': text[len(token):],
}
else:
token += char
return {'text': token, 'separator': '', 'remainder': ''} |
def _convert_bytes(content):
"""
TypeError: str() takes at most 1 argument (2 given) # python2
"""
try:
return str(content, "utf-8")
except TypeError:
return str(content) |
def date_name_converter(date):
"""Convert date strings like "DD-MonthName3Letters-YY" to "MM-DD-YY" """
for month_num, month in enumerate(
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct',
'Nov', 'Dec']):
num_str = str(month_num + 1)
if len(num_str) == 1:
num_str = '0' + num_str
date = date.replace(month, num_str)
date_dd, date_mm, date_yy = date.split('-')
return '%s-%s-%s' % (date_mm, date_dd, date_yy) |
def uses_only(word, letters):
"""return true if word only use a set letters"""
letters = letters.lower()
for letter in word:
if letter.lower() not in letters:
return False
return True |
def unravel_index(index, shape):
""" Analog of numpy.unravel_index. """
out = []
for dim in reversed(shape):
out.append(index % dim)
index = index // dim
return tuple(reversed(out)) |
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: the builtin set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set)):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return set
elif hasattr(specimen, 'set'):
return dict
else:
return default |
def getVals( assembliesList, key ):
""" returns a list of values given a key and the list of assemblies.
"""
v = []
for a in assembliesList:
v.append( a.valuesDict[ key ] )
return v |
def _urls(repository, commit, mirrors):
"""Compute the urls from which an archive of the provided GitHub
repository and commit may be downloaded.
Args:
repository: GitHub repository name in the form organization/project.
commit: git revision for which the archive should be downloaded.
mirrors: dictionary of mirrors, see mirrors.bzl in this directory for
an example.
"""
return [
x.format(
repository = repository,
commit = commit,
)
for x in mirrors.get("github")
] |
def find_least_most( lol, index ):
"""
- finds the number of the min, and the max of a column from a list of lists
- returns ( m_count, b_count )
"""
column = []
for line in lol:
if "diagnosis" in line:
continue
column.append( line.split(",")[ index ] )
return( column.count( "M" ), column.count( "B" ) ) |
def _pictLstToDict(lst):
"""Convert picture from list to dictionary."""
return {i:lst[i] for i in range(len(lst))} |
def parse_filters(filter_list):
"""[will remove = from list items and return just the filter that we can work with]
Args:
filter_list ([list]): [filter list that we need to extract filters from]
Returns:
[List]: [the new filter list after we extract it]
"""
if filter_list:
filters = {}
for filter_item in filter_list:
if '=' in filter_item:
extract_filter = filter_item.split('=')
filters[extract_filter[0]] = extract_filter[1]
else:
filters[filter_item] = True
return filters
return {} |
def _get_payload(instream, identifier):
"""Find the identifier and get the part between {curly brackets}."""
for line in instream:
if '//' in line:
line, _ = line.split('//', 1)
line = line.strip(' \r\n')
# for this to work the declaration must be at the start of the line
# (whitespace excluded). compound statements would break this.
if line.startswith(identifier):
break
else:
raise ValueError('Identifier `{}` not found in file'.format(identifier))
if '{' in line[len(identifier):]:
_, line = line.split('{')
if '}' in line:
line, _ = line.split('}', 1)
return line
payload = [line]
else:
payload = []
for line in instream:
if '//' in line:
line, _ = line.split('//', 1)
line = line.strip(' \r\n')
if '{' in line:
_, line = line.split('{', 1)
if '}' in line:
line, _ = line.split('}', 1)
payload.append(line)
break
if line:
payload.append(line)
return ''.join(payload) |
def encode_complex(obj):
"""Convert a complex number object into a list containing the real and imaginary values."""
return [obj.real, obj.imag] |
def params_schedule_fn_interval(outside_info):
"""
outside_information (dict):
progress (float in [0, 1] interval) a number that indicate progress
"""
assert outside_info != {} and "progress" in outside_info, \
"if this happens during initialization, please add initial_info to env_params to address the issue"
progress = outside_info["progress"]
prop_empty = 0.9 - 0.4 * progress
prop_feats = 0.1 + 0.4 * progress
mdp_params_generated = {
"inner_shape": (7, 5),
"prop_empty": prop_empty,
"prop_feats": prop_feats,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
"rew_shaping_params": None
}
return mdp_params_generated |
def is_prime_v1(n):
"""Return 'True' if 'n' is a prime number. False otherwise"""
if n == 1:
return False # 1 is not a prime number
for d in range(2, n):
if n % d == 0:
return False
return True |
def calculate_total(books):
"""
Returns the minimum cost of books for any conceivable shopping cart
"""
# We store the discounted price of occurences of each type
d = {
0: 0,
1: 800,
2: 1520,
3: 2160,
4: 2560,
5: 3000
}
total_cost = 0
total_cost_lst = 0
counts = [books.count(i) for i in range(1, 6)] # Tells us the number or each type of book in the shopping cart.
counts.sort(reverse=True)
lst = counts.copy()
# First we find the total cost descending from highest groupings of 5 to the lowest groupings of 1.
# We do this since higher groupings have larger absolute discounts.
while counts:
counts = [x for x in counts if x != 0] # Removes zeroes
uniques = len(counts) # This the current highest unique combination
counts = [x - 1 if x > 0 else 0 for x in counts] # Reduce 1 item from entire basket.
total_cost += d[uniques] # Store the total cost here
# Now we calculate costs using preferred groupings of 4! In some cases this yields better discounts.
while lst:
lst = [x for x in lst if x != 0] # Remove zeroes
if len(lst) >= 4:
total_cost_lst += d[4]
for i in range(4):
lst[i] -= 1 # Reduce 1 item from first four elements.
else:
total_cost_lst += d[len(lst)]
lst = [x - 1 if x > 0 else 0 for x in lst] # Reduce 1 item from entire basket.
return min(total_cost_lst, total_cost) |
def _get_id_from_api_filter_link(filter_link):
"""Get the id from an api filter link.
Expects the id to come after the "/service/apifilter/" part of the link.
Example filter_link: '/admin/service/apifilter/12345/'
Example return: '12345'
:return: the api filter id
:rtype: str
"""
link_dirs = filter_link.split('/')
num_dirs = len(link_dirs)
ind = 0
while ind < num_dirs:
if link_dirs[ind] == 'service' and ind < num_dirs - 2 and \
link_dirs[ind + 1] == 'apifilter':
return link_dirs[ind + 2]
ind += 1
return '' |
def Builder(*_args, **_kw):
"""Fake Builder"""
return ["fake"] |
def shape(data):
"""
Given a nested list or a numpy array,
return the shape.
"""
if hasattr(data, "shape"):
return list(data.shape)
else:
try:
length = len(data)
return [length] + shape(data[0])
except TypeError:
return [] |
def get_filtered_filename(filename, filename_key):
"""
Return the 'filtered filename' (according to `filename_key`)
in the following format:
`filename`__`filename_key`__.ext
"""
try:
image_name, ext = filename.rsplit('.', 1)
except ValueError:
image_name = filename
ext = 'jpg'
return "%(image_name)s__%(filename_key)s__.%(ext)s" % ({
'image_name': image_name,
'filename_key': filename_key,
'ext': ext
}) |
def EscapeDelimiters(s):
"""
Changes "" into "\" and "|" into "\|" in the input string.
:param `s`: the string to be analyzed.
:note: This is an internal functions which is used for saving perspectives.
"""
result = s.replace(";", "\\")
result = result.replace("|", "|\\")
return result |
def is_unique(s):
"""Check if the list s has no duplicate."""
return len(s) == len(set(s)) |
def sentence_id(json_sentence):
"""
Return the unique if of a sentence.
"""
return '_'.join([
str(json_sentence['did']),
str(json_sentence['pid']),
str(json_sentence['sid'])
]) |
def fmt_usd(my_price):
"""
Converts a numeric value to US dollar-formatted string, for printing and display purposes.
Param: my_price (int or float) like 4000.444444
Returns: $4,000.44
"""
return f"${my_price:,.2f}" |
def allowednewrequirements(repo):
"""Obtain requirements that can be added to a repository during upgrade.
This is used to disallow proposed requirements from being added when
they weren't present before.
We use a list of allowed requirement additions instead of a list of known
bad additions because the whitelist approach is safer and will prevent
future, unknown requirements from accidentally being added.
"""
return {
'dotencode',
'fncache',
'generaldelta',
} |
def str2bool(string):
"""
converts a string into a bool
"""
if string == True:
return True
if string == False or string[:1].lower() == 'f' or string[:1].lower() == 'n':
return False
else:
return True |
def _aggregate(k, oldValue, newValue, summary):
"""
Apply to numeric values, aummarize the diffence
:param k:
:param oldValue:
:param newValue:
:param summary:
:return:
"""
try:
num1 = float(oldValue)
num2 = float(newValue)
if k in summary:
summary[k] = (summary[k][0]+(num2 - num1),summary[k][1]+1)
else:
summary[k] = ((num2 - num1), 1)
return True
except:
return False |
def _get_set_name(test_data):
"""Get the set_name from test_data
Return the value of 'set_name' key if present in the data
If set_name it is not present in data, return the value of the first key.
If there's no data, leave set_name as ''
"""
set_name = ''
if 'set_name' in test_data:
set_name = test_data['set_name']
else:
data_without_env = dict(test_data)
data_without_env.pop('env', None)
if data_without_env:
set_name = test_data[next(iter(data_without_env))]
return set_name |
def set_true_for_empty_dict(d):
"""
Recursively set value of empty dicts from a dictionary.
For some of entity G Suite API return {} (blank dictionary) which indicates some actions on resource.
Eg. Here, new or upload indicates resource is newly created or uploaded on the server.
{
"new": {}, // An object was created from scratch.
"upload": {}, // An object was uploaded into Drive.
}
:param d: Input dictionary.
:return: Dictionary with all empty dictionary's value set as True.
"""
if not isinstance(d, (dict, list)):
return d
elif isinstance(d, list):
return [value for value in (set_true_for_empty_dict(value) for value in d)]
else:
if d == {}:
return True
return {key: True if value == {} else value for key, value in ((key, set_true_for_empty_dict(value))
for key, value in d.items())} |
def _is_none(s: str) -> bool:
"""Check if a value is a text None."""
if s == 'None':
return True
return False |
def format_isbn_list(isbn_list, isbn_version):
"""
Formats the list for on-wiki publication
"""
text = ""
if len(isbn_list):
isbn_list = sorted(isbn_list)
text += u'== Wrong {}s ==\n'.format(isbn_version)
for t in isbn_list:
text += u'# {{{{Q|{}}}}}: {}\n'.format(t[0], t[1])
return text |
def compute_indentation(props):
"""
Compute the indentation in inches from the properties of a paragraph style.
"""
res = 0
for k, v in props.items():
if k in ['margin-left', 'text-indent']:
try:
res += float(v.replace('in', ''))
except:
pass
return res |
def num_bytes_to_struct_char(n: int):
"""
Given number of bytes, return the struct char that can hold those bytes.
For example,
2 = H
4 = I
"""
if n > 8:
return None
if n > 4:
return "Q"
if n > 2:
return "I"
if n > 1:
return "H"
if n == 1:
return "B"
return None |
def spend_utxo(utxo: str) -> dict:
"""
Get spend UTXO action.
:param utxo: Bytom utxo id.
:type utxo: str
:returns: dict -- Bytom spend utxo action.
>>> from pybytom.transaction.actions import spend_utxo
>>> spend_utxo("169a45be47583f7240115c9059cd0d03e4d4fab70a41536cf298d6f261c0a1ac")
{'type': 'spend_utxo, 'output_id': '169a45be47583f7240115c9059cd0d03e4d4fab70a41536cf298d6f261c0a1ac'}
"""
return dict(type=str("spend_utxo"), output_id=utxo) |
def fact(number):
"""
Calculating the factorial of a number
"""
result = 1
for number in range(1, number + 1):
result *= number
return result |
def unique_list(non_unique_list):
"""
Return list with unique subset of provided list, maintaining list order.
Source: https://stackoverflow.com/a/480227/1069467
"""
seen = set()
return [x for x in non_unique_list if not (x in seen or seen.add(x))] |
def sort_tuple_lists_by_timestamp(norm_lists):
"""
"""
get_timestamp = lambda pair: pair[0]
for k, norm_list in norm_lists.items():
norm_lists[k] = sorted(norm_list, key=get_timestamp)
return norm_lists |
def search_for_pod_info(details, operator_id):
"""
Get operator pod info, such as: name, status and message error (if failed).
Parameters
----------
details : dict
Workflow manifest from pipeline runtime.
operator_id : str
Returns
-------
dict
Pod informations.
"""
info = {}
try:
if "nodes" in details["status"]:
for node in [*details["status"]["nodes"].values()]:
if node["displayName"] == operator_id:
info = {
"name": node["id"],
"status": node["phase"],
"message": node["message"],
}
except KeyError:
pass
return info |
def cut_prefix(s, prefix):
"""Cuts prefix from given string if it's present."""
return s[len(prefix):] if s.startswith(prefix) else s |
def aggregate_initial_architecture(hparams):
"""Helper function to aggregate initial architecture into an array hparam."""
output = hparams.copy()
initial_architecture_size = len(
[hp for hp in hparams.keys() if hp.startswith("initial_architecture")])
output["initial_architecture"] = [
hparams["initial_architecture_{}".format(i)]
for i in range(initial_architecture_size)
]
return output |
def prob_zipf_distrib(q, t, m, alpha):
"""
Probability that a block pattern of q + t blocks contains another block
pattern of q blocks, assuming that all blocks are i.i.d. according to a
zipf distribution with decay parameter alpha. Parameter m represents the
total number of blocks.
"""
#prob_same_block = (harmonic_number(m, 2*alpha)
# / (harmonic_number(m, alpha) ** 2))
# The following would be the distribution if the distribution were uniform
# rather than the zipf distribution.
#prob_same_block = float(1)/m
prob_same_block = float(1)/m
prob_single_block_no_match = (1 - prob_same_block) ** (q + t)
prob_block_pattern_match = (1 - prob_single_block_no_match) ** q
return prob_block_pattern_match |
def c_string_literal(env, string):
"""
Escapes string and adds quotes.
"""
# Warning: Order Matters! Replace '\\' first!
e = [("\\", "\\\\"), ("\'", "\\\'"), ("\"", "\\\""), ("\t", "\\t"), ("\n", "\\n"), ("\f", ""), ("\r", "")]
for r in e:
string = string.replace(r[0], r[1])
return "\"" + string + "\"" |
def _split_list_by_function(l, func):
"""For each item in l, if func(l) is truthy, func(l) will be added to l1.
Otherwise, l will be added to l2.
"""
l1 = []
l2 = []
for item in l:
res = func(item)
if res:
l1.append(res)
else:
l2.append(item)
return l1, l2 |
def format_sec(sec):
"""
format a time
Parameters
----------
sec : float
time in seconds
Returns
-------
string :
formatted time in days, hours, minutes and seconds
"""
m, s = divmod(sec, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if d:
return '%d d %02d h %02d m %02d s' % (d, h, m, s)
if h:
return '%02d h %02d m %02d s' % (h, m, s)
if m:
return '%02d m %02d s' % (m, s)
return '%.3f s' % s |
def convert_str_facet_to_list(facet):
"""
"""
#| - convert_str_facet_to_list
if type(facet) == str:
assert len(facet) == 3, "Facet string must be only 3 in lenght, otherwise I'm not sure how to process"
# [facet[0], facet[1], facet[2], ]
facet_list = []
for str_i in facet:
facet_list.append(int(str_i))
facet_out = facet_list
# print(facet_out)
else:
facet_out = facet
return(facet_out)
#__| |
def sn2numZ3(scenario: list) -> str:
"""
Convert scenario number to str (zero padding of 3)
Parameters
----------
scenario: list
use only index 0
0: number, 1: items (order and recipe), 2:judge
Returns
----------
str
str of scenario number (zero padding of 3)
"""
# return result
return str(scenario[0]).zfill(3) |
def heaviside(x, bias=0):
"""
Heaviside function Theta(x - bias)
returns 1 if x >= bias else 0
:param x:
:param bias:
:return:
"""
indicator = 1 if x >= bias else 0
return indicator |
def make_matrix(num_rows, num_cols, entry_fn):
"""returns a num_rows x num_cols matrix
whose (i,j)th entry is entry_fn(i, j)"""
return [[entry_fn(i, j) # given i, create a list
for j in range(num_cols)] # [entry_fn(i, 0), ...]
for i in range(num_rows)] |
def object_to_dict(xmp):
"""
Extracts all XMP data from a given XMPMeta instance organizing it into a
standard Python dictionary.
"""
dxmp = dict()
if not xmp:
return {}
for item in xmp:
if item[-1]['IS_SCHEMA']:
dxmp[item[0]] = []
else:
dxmp[item[0]].append(item[1:])
return dxmp |
def absolute_value(num: int):
"""
This function returns the absoluate value of the provided number
"""
if num >= 0:
return num
return -num |
def time_recommendation(move_num, seconds_per_move=5, time_limit=15*60,
decay_factor=0.98):
"""
Given current move number and "desired" seconds per move,
return how much time should actually be used. To be used specifically
for CGOS time controls, which are absolute 15 minute time.
The strategy is to spend the maximum time possible using seconds_per_move,
and then switch to an exponentially decaying time usage, calibrated so that
we have enough time for an infinite number of moves.
"""
# divide by two since you only play half the moves in a game.
player_move_num = move_num / 2
# sum of geometric series maxes out at endgame_time seconds.
endgame_time = seconds_per_move / (1 - decay_factor)
if endgame_time > time_limit:
# there is so little main time that we're already in "endgame" mode.
base_time = time_limit * (1 - decay_factor)
return base_time * decay_factor ** player_move_num
# leave over endgame_time seconds for the end, and play at seconds_per_move
# for as long as possible
core_time = time_limit - endgame_time
core_moves = core_time / seconds_per_move
if player_move_num < core_moves:
return seconds_per_move
else:
return seconds_per_move * decay_factor ** (player_move_num - core_moves) |
def _check_user_data_match(cmd_ud, user_data):
"""Check if the command user_data matches the input user_data."""
if cmd_ud is None:
return True
if not user_data and not cmd_ud:
return True
if user_data and not cmd_ud:
return False
if cmd_ud and not user_data:
return False
for field in cmd_ud:
if cmd_ud[field] != user_data.get(field):
return False
return True |
def detuplelize(item):
"""If item is a tuple, return first element, otherwise the item itself.
The tuple syntax is used to implement prejoins, so we have to hide from
the user the fact that more than a single object are being selected at
once.
"""
if type(item) is tuple:
return item[0]
return item |
def IsListlike(arg):
"""
This function just tests to check if the object acts like a list
"""
from six import string_types
if isinstance(arg, string_types):
return False
try:
_ = [x for x in arg]
return True
except TypeError: # catch when for loop fails
return False |
def alignment_zeros(data_len) -> bytearray:
"""Return array of 0s to align to 4."""
alignment = (4 - data_len % 4) % 4
return bytearray(alignment) |
def calculate_score(set_of_tags_1, set_of_tags_2):
"""
:param set_of_tags_1: collection of unique strings
:param set_of_tags_2: same as above
:return: score based on hashcode scoring
"""
one_and_two = len(set_of_tags_1.intersection(set_of_tags_2))
one_not_two = len(set_of_tags_1.difference(set_of_tags_2))
two_not_one = len(set_of_tags_2.difference(set_of_tags_1))
return min(one_and_two, one_not_two, two_not_one) |
def format_time(hour: int, minute: int) -> str:
"""Turns hours and minutes to a string with the format 'HH:MM'. Assumes 24h clock"""
return f"{str(hour).rjust(2, '0')}:{str(minute).rjust(2, '0')}" |
def merge(line):
"""
Function that merges a single row or column in 2048.
"""
# local variables
zero_shift = []
number_merge = []
result = []
blank = 0
cache = 0
# initializing blank list
for idx in range(len(line)):
number_merge.append(0)
# shifting zeroes in initial list
for val in line:
if val != 0:
zero_shift.append(val)
else:
blank += 1
for idx in range(blank):
zero_shift.append(0)
# merging values in shifted list
for idx, val in enumerate(zero_shift):
#print("cache:", cache, "value:", val, "index:", idx, "shift_list", zero_shift, "merge_list", number_merge)
if val == cache and val != 0:
number_merge[idx-1] = val*2
number_merge[idx] = 0
cache = 0
else:
number_merge[idx] = val
cache = val
# shifting zeroes in merged list
blank = 0
for val in number_merge:
if val != 0:
result.append(val)
else:
blank += 1
for idx in range(blank):
result.append(0)
return result |
def _touch(high, low, level, open, close):
"""
was the given level touched
:param high:
:param low:
:param level:
:return:
"""
if high > level and low < level:
if open >= close:
return -1
else:
return 1
else:
return 0 |
def skip_add(n):
""" Takes a number x and returns x + x-2 + x-4 + x-6 + ... + 0.
>>> skip_add(5) # 5 + 3 + 1 + 0
9
>>> skip_add(10) # 10 + 8 + 6 + 4 + 2 + 0
30
"""
if n ==0:
return 0
if n ==1:
return 1
else:
return n + skip_add(n-2) |
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.type(
"""
import re
import unicodedata
from six import text_type
value = text_type(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('utf8')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
value = re.sub(r'[-\s]+', '_', value)
return value |
def radial_blur(blur_width=1.0, blur_height=1.0, sample_num_x=4, sample_num_y=4, center_area=0.0):
"""
Blur filter.
distance = distance apart each sample is
percentage = amount of blurring to apply
sample_num_x = number of samples to apply on the X axis
sample_num_y = number of samples to apply on the Y axis
center_area = what amount of the screen to leave unblurred, from the center outwards. 0.5 = entire screen
Author: SolarLune
"""
return ("""
// Name: Simple 16-Sample (Box?) Blur Effect
// Author: SolarLune
// Date Updated: 6/6/11
uniform sampler2D bgl_RenderedTexture;
void main(void)
{
vec2 xy = gl_TexCoord[0];
float blur_width = 0.002 * """ + str(blur_width) + """;
float blur_height = 0.002 * """ + str(blur_height) + """;
int sample_num_x = """ + str(sample_num_x) + """;
int sample_num_y = """ + str(sample_num_y) + """;
vec4 color;
float blurriness = max(abs(xy.x - 0.5), abs(xy.y - 0.5));
blurriness -= """ + str(center_area) + """;
blurriness = max(blurriness, 0.0);
for (int i = -sample_num_x; i < sample_num_x; i++)
{
for (int j = -sample_num_y; j < sample_num_y; j++)
{
color += texture2D(bgl_RenderedTexture, vec2(xy) + vec2((i * blurriness) *
blur_width, (j * blurriness) * blur_height));
}
}
gl_FragColor = color / (sample_num_x*sample_num_y*4);
}
""") |
def is_gif(data: bytes) -> bool:
"""
Check if the given data is a GIF.
Parameters:
data (bytes): The data to check.
Returns:
True if the data is a GIF, False otherwise.
"""
return data[:6] in (b"GIF87a", b"GIF89a") |
def is_number(value):
"""
Determine whether a value is a number or not
>>> is_number(5)
True
>>> is_number("1")
True
"""
try:
int(value)
return True
except ValueError:
return False |
def get_columns(filters):
"""return columns based on filters"""
columns = [
{
"fieldname":"item",
"fieldtype":"Link",
"label":"Item",
"options":"Item",
"width":250
},
{
"fieldname":"open_qty",
"fieldtype":"Float",
"label":"Opening Qty",
"width":150
},
{
"fieldname":"in_qty",
"fieldtype":"Float",
"label":"In Qty",
"width":150
},
{
"fieldname":"out_qty",
"fieldtype":"Float",
"label":"Out Qty",
"width":150
},
{
"fieldname":"balance_qty",
"fieldtype":"Float",
"label":"Balance Qty",
"width":150
}
]
return columns |
def pack_layers(i, hiddens, o):
"""Create the full NN topology from input size, hidden layers, and output."""
layers = []
layers.append(i)
for h in hiddens:
layers.append(h)
layers.append(o)
return layers |
def _get_package_uri_props(package_uri):
"""
Gets the properties of a debian package from its URI.
"""
uri_filename = package_uri.rsplit("/", 1)[1]
uri_basename = uri_filename.rsplit(".", 1)[0]
uri_name, uri_version, uri_arch = uri_basename.split("_")
return uri_filename, uri_name, uri_version, uri_arch |
def is_numeric(value):
"""Test if a value is numeric."""
return type(value) in (float, int) |
def complement_base(base):
"""Returns the Watson-Crick complement of a base."""
# Convert to lovercase
base = base.lower()
if base == 'a':
return 'T'
elif base == 't':
return 'A'
elif base == 'g':
return 'C'
else:
return 'G' |
def quadrant_update(quadrant_dict, current_angle):
"""
Args:
quadrant_dict:
current_angle:
Returns:
"""
if current_angle > 360:
raise ValueError('You have left the circle, my fiend.')
quadrant = 1
while not (current_angle >= quadrant_dict[quadrant][0] and
current_angle <= quadrant_dict[quadrant][1]):
quadrant += 1
return quadrant |
def _bytes_from_hexstring(hexstr: str) -> bytes:
"""Convert a hex string to a bytes array"""
return bytes(bytearray.fromhex(hexstr)) |
def decay_across_unit_interval(v, p, d):
""" Generalized decay function over unit interval.
Returns: initial value rescaled based on decay factor
Parameters:
v: Starting value
p: Percent completed must be in a unit interval [0,1]
d: Decay trajectory must be in a unit interval [0,1]
Example values for d:
d = 0.00 No decay return starting value
d = 0.25 Slow onset decay slowly and then accelerate
d = 0.50 Linear decay 45 degree) decay across interval
d = 0.75 Fast onset decay fast and then deccelerate
d = 1.00 Immediate decay return
Author: KenYounge@gmail.com
License: GNU General Public License with attribution
"""
# No decay
if d == 0.0:
return v
# Slow onset
if d <= 0.5:
return v * (1 - p ** (1.0 / (d * 2)))
# Linear decay
if d == 0.5:
return v * (1 - p)
# Fast onset
if d > 0.5:
return v * ( decay_across_unit_interval(1, p, 0.5)
- (decay_across_unit_interval(1, 1 - p, 1 - d)
- decay_across_unit_interval(1, 1 - p, 0.5))
)
# Immediate decay
if d == 1.0: return 0 |
def get_unique_pairs(es):
""" Query to directly get matching pairs of PS nodes.
Args:
es (Elasticsearch object): Current elasticsearch connection object.
Returns:
dict: An double aggregation of sources and corresponding destinations.
"""
query = {
"query": {
"bool": {
"filter": [
{
"range": {
"n_hops.avg": {
"gt": 1
}
}
},
{
"range": {
"n_hops.value_count": {
"gt": 1000
}
}
}
]
}
},
"aggs": {
"sources": {
"terms": {
"field": "src",
"size": 60
},
"aggs": {
"destinations": {
"terms": {
"field": "dest",
"size": 59
}
}
}
}
}
}
try:
return es.search(index="trace_derived_v2", body=query)
except Exception as e:
print(e) |
def ValidateParameter_PRODRES(query_para):#{{{
"""Validate the input parameters for PRODRES
query_para is a dictionary
"""
is_valid = True
if not 'errinfo' in query_para:
query_para['errinfo'] = ""
if query_para['pfamscan_evalue'] != "" and query_para['pfamscan_bitscore'] != "":
query_para['errinfo'] += "Parameter setting error!"
query_para['errinfo'] += "Both PfamScan E-value and PfamScan Bit-score "\
"are set! One and only one of them should be set!"
is_valid = False
if query_para['jackhmmer_bitscore'] != "" and query_para['jackhmmer_evalue'] != "":
query_para['errinfo'] += "Parameter setting error!"
query_para['errinfo'] += "Both Jackhmmer E-value and Jackhmmer Bit-score "\
"are set! One and only one of them should be set!"
is_valid = False
query_para['isValidSeq'] = is_valid
return is_valid |
def to_bytes(url):
"""to_bytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed.
# XXX get rid of to_bytes()
if isinstance(url, str):
try:
url = url.encode("ASCII").decode()
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url |
def jobs_as_dict(raw_jobs):
"""Construct a dictionary with job name as key and job status as value."""
return dict((job["name"], job["color"]) for job in raw_jobs if "color" in job) |
def get_page_key(page_url):
"""
Get the page key.
Used to prepend a unique key to internal anchorlinks,
so when we combine all pages into one, we don't get conflicting (duplicate) URLs
Works the same when use_directory_urls is set to true or false in docums.yml
Examples
get_page_key('index.html') --> 'index'
get_page_key('/') --> 'index'
get_page_key('abc/') --> 'abc'
get_page_key('abc.html') --> 'abc'
Args:
page_url (str): The Docums url of the page
"""
page_key = (
page_url.lower()
.strip()
.rstrip("/")
.replace(".html", "")
.replace("/", "-")
.lstrip("-")
)
if len(page_key) > 0:
return page_key
else:
return "index" |
def test_response(resp):
""" some abstract collections raise ValueErrors. Ignore these """
try:
return float(resp) # will evaluate as false if float == 0.0
except ValueError:
return False |
def get_ep_size_tuple(packages):
"""return a list of (ep, size) of the packages"""
return list(set([ (ep, packages[0]["size"]) for ep, packages in packages.items()])) |
def increasing(digits):
""" True if the digits are increasing. """
for i in range(5):
if int(digits[i]) > int(digits[i+1]):
return False
return True |
def convtransp_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):
"""
SOURCE: https://discuss.pytorch.org/t/utility-function-for-calculating-the-shape-of-a-conv-output/11173/6
Utility function for computing output size of convTransposes given the input size and the convT layer parameters.
Args:
h_w (Union[int, Tuple[int]]): The input height and width, either as a single integer number or as a tuple.
kernel_size (int): The layer's kernel size.
stride (int): The layer's stride.
pad (int): The layer's padding.
dilation (int): The layer's dilation.
Returns: A tuple (height, width) with the resulting height and width after layer application.
"""
if type(h_w) is not tuple:
h_w = (h_w, h_w)
if type(kernel_size) is not tuple:
kernel_size = (kernel_size, kernel_size)
if type(stride) is not tuple:
stride = (stride, stride)
if type(pad) is not tuple:
pad = (pad, pad)
h = (h_w[0] - 1) * stride[0] - 2 * pad[0] + (kernel_size[0] - 1) + pad[0]
w = (h_w[1] - 1) * stride[1] - 2 * pad[1] + (kernel_size[1] - 1) + pad[1]
return h, w |
def filter_keys(dct, keys) -> dict:
"""Return filtered dict by given keys"""
return {key: value for key, value in dct.items() if key in keys} |
def check_column(col, sep=":"):
"""Convert input column string to list of columns
:param col: input string
:param sep: default ":"
:return: list of columns
"""
if isinstance(col, str):
col = col.split(sep)
elif not isinstance(col, list):
raise TypeError(f'Columns "{col}" needs to be a string or list of strings')
return col |
def re(rm,rf,beta):
"""Returns cost of equity using CAPM formula."""
return rf + beta*(rm-rf) |
def liquidDensity(T, lDP):
"""
liquidDensity(T, lDP)
liquidDensity (kg/m^3) = A*B^-(1-T/critT)^n*1000.0
Parameters
T, temperature in Kelvin
lDP, A=lDP[0], B=lDP[1], n=lDP[2], critT=lDP[3]
A, B, and n are regression coefficients, critT: critical temperature
Returns
liquid density in kg/m^3 at T
"""
return 1000.0*lDP[0]*lDP[1]**-((1-T/lDP[3])**lDP[2]) |
def get_query_string(query_map):
"""Return the query string given a map of key-value pairs."""
if query_map:
query = []
for (name, values) in query_map.items():
for value in values:
query.append(name + "=" + value)
return "&".join(query)
return None |
def _string_label_to_class_id_postprocessor(
string_label, label_classes, default=-1, **unused_kwargs):
"""Returns index of string_label in label_classes or default if not found."""
if string_label in label_classes:
return label_classes.index(string_label)
else:
return default |
def ListTrueOnly(adict):
"""Return a list of strings for which their values were True in the dict.
Args:
adict: The original dictionary, with string keys and boolean values.
Returns:
A list of strings for which the boolean values were True in the dictionary.
"""
return [x for x in adict if adict[x]] |
def whether_prefix(coords):
"""Determine whether gene IDs should be prefixed with nucleotide IDs.
Parameters
----------
coords : dict
Gene coordinates table.
Returns
-------
bool
Whether gene IDs should be prefixed.
See Also
--------
read_gene_coords
Notes
-----
It is based on a simple mechanism which checks whether there are duplicate
gene IDs, and if so, all gene IDs should be prefixed to avoid confusion.
"""
genes = {}
for nucl, queue in coords.items():
for _, is_start, _, gid in queue:
if gid not in genes:
genes[gid] = is_start
elif genes[gid] == is_start:
return True
return False |
def list_objects(s3_resource, bucket, prefix, suffix=None):
"""
Get list of keys in an S3 bucket, filtering by prefix and suffix. Function
developed by Kaixi Zhang as part of AWS_S3 class and adapted slightly here.
This function retrieves all matching objects, and is not subject to the 1000
item limit.
Params:
s3_resource (object): A boto3 s3 resource object
bucket (str): Name of s3 bucket to list
prefix (str): Prefix within bucket to search
suffix (str, list): Optional string or string list of file endings
Returns:
List of s3 keys
"""
keys = []
if s3_resource is not None:
s3_bucket = s3_resource.Bucket(bucket)
for obj in s3_bucket.objects.filter(Prefix=prefix):
# if no suffix given, add all objects with the prefix
if suffix is None:
keys.append(str(obj.key))
else:
# add all objects that ends with the given suffix
if isinstance(suffix, list):
for _suffix in suffix:
if obj.key.endswith(_suffix):
keys.append(str(obj.key))
break
else:
# suffix is a single string
if obj.key.endswith(suffix):
keys.append(str(obj.key))
else:
print
'Warning: please first create an s3 resource'
return keys |
def get_key(udict, key, missing_value=""):
"""Return a key:value pair as dict
"""
cdict = dict(udict)
return {key: cdict.pop(key, missing_value)} |
def page_number2image_name(number, string="image", padding_size=4):
"""
Utility function to format a number with a padding of size n.
:param number: the number to format (int)
:param string: the prefix to prepend (str)
:param padding_size: the desired lenght of the resulting string (int)
:return: a string, whose length == padding_size and in which the number
of zeroes == padding_size - len(number)
Usage example:
>>> page_number2image_name(24)
'image-0024'
>>> page_number2image_name(24,padding_size=5)
'image-00024'
"""
return "{}-{}".format(string, str(number).zfill(padding_size)) |
def safefloat(value):
"""safely converts value to float or none"""
try:
return float(value)
except ValueError:
return None |
def truncate(text, width=50):
"""
Truncates text to the provided width. Adds a '..' at the end if truncated.
:param text:
:param width:
:return: truncated text if necessary else the same text
"""
return (text[:width] + '..') if len(text) > width else text |
def get_human_readable_size(sizeinbytes):
"""generate human-readable size representation like du command"""
sizeinbytes = abs(sizeinbytes)
output_fmt = '{0}'
for unit in ['bytes', 'K', 'M', 'G', 'T', 'P']:
if sizeinbytes < 1024.0:
return output_fmt.format(sizeinbytes, unit)
output_fmt = '{0:.1f}{1}'
sizeinbytes /= 1024.0
return '{0:.1f}E'.format(sizeinbytes) |
def get_region(reg_str):
"""Transform a string of the form X-Y into a region"""
spl = reg_str.split('-')
if spl[0] == '' and spl[1] == '':
# Fully open region
reg = [0, float('inf')]
elif spl[0] == '':
# Open beginning
reg = [0, int(spl[1])]
elif spl[1] == '':
# open end
reg = [int(spl[0]), float('inf')]
else:
reg = [int(spl[0]), int(spl[1])]
return reg |
def masked_by_quotechar(S, quotechar, escapechar, test_char):
"""Test if a character is always masked by quote characters
>>> masked_by_quotechar('A"B&C"A', '"', '', '&')
True
>>> masked_by_quotechar('A"B&C"A&A', '"', '', '&')
False
>>> masked_by_quotechar('A|"B&C"A', '"', '|', '&')
False
>>> masked_by_quotechar('A"B"C', '"', '', '')
False
"""
if test_char == "":
return False
escape_next = False
in_quotes = False
i = 0
while i < len(S):
s = S[i]
if s == quotechar:
if escape_next:
i += 1
continue
if not in_quotes:
in_quotes = True
else:
if i + 1 < len(S) and S[i + 1] == quotechar:
i += 1
else:
in_quotes = False
elif s == test_char and not in_quotes:
return False
elif s == escapechar:
escape_next = True
i += 1
return True |
def board_rows(board, rows, cols):
""" Returns a list of row of the given board.
"""
return [''.join(board[row * cols:(row + 1) * cols]) for row in range(rows)] |
def is_rev_dir(dir_i):
"""
"""
#| - is_rev_dir
# print("dir_i:", dir_i)
out_dict = dict()
assert dir_i is not None, "dir_i is None"
is_rev_dir_i = False
rev_num = None
if dir_i[0] == "_":
dir_split = dir_i.split("_")
if len(dir_split) == 2:
if dir_split[1].isnumeric():
rev_num = int(dir_split[1])
is_rev_dir_i = True
out_dict["is_rev_dir"] = is_rev_dir_i
out_dict["rev_num"] = rev_num
return(out_dict)
#__| |
def getSQM(header={}):
"""
:param header:
:return:
"""
sqm = max(float(header.get('SQM', 0)),
float(header.get('SKY-QLTY', 0)),
float(header.get('MPSAS', 0)),
)
return sqm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.