content stringlengths 42 6.51k |
|---|
def circle_coordinates(x, y, radius):
"""Calculates the bounds of a circle, given centre and radius."""
x1 = x - radius # Left
x2 = x + radius # Right
y1 = y - radius # Bottom
y2 = y + radius # Top
return (x1, y1, x2, y2) |
def check_property_differences(spec_properties, dev_properties, bidirectional):
"""Compare properties in the parsed YAML
Parameters
----------
spec_data : list
List of dictionaries with specification data properties
E.g [{'name': 'AdminModeDefault'},
{'name': 'AsynchCmdReplyNRetries'},
{'name': 'AsynchCmdReplyTimeout'},
{'name': 'CentralLoggerEnabledDefault'},
{'name': 'ConfigureTaskTimeout'},
{'name': 'ControlModeDefault_B'}]
dev_data : list
List of dictionaries with device data properties
E.g [{'name': 'AdminModeDefault'},
{'name': 'AsynchCmdReplyNRetries'},
{'name': 'AsynchCmdReplyTimeout'},
{'name': 'CentralLoggerEnabledDefault'},
{'name': 'ConfigureTaskTimeout'},
{'name': 'ControlModeDefault_B'}]
bidirectional: bool
Whether to include details on the device that is not in the specification
Returns
-------
issues : list
A list of strings describing the issues, empty list for no issues
"""
issues = []
spec_props = {i["name"] for i in spec_properties}
dev_props = {i["name"] for i in dev_properties}
if spec_props != dev_props:
diff = spec_props.difference(dev_props)
if diff:
diff = sorted(diff)
issues.append(
"Property [{}] differs, specified but missing in device".format(
",".join(diff)
)
)
if bidirectional:
diff = dev_props.difference(spec_props)
if diff:
diff = sorted(diff)
issues.append(
"Property [{}] differs, present in device but not specified".format(
",".join(diff)
)
)
return issues |
def bool_str(val):
"""CloudFormation Template formatted boolean string.
CloudFormation uses all lowercase for boolean values, which means that
str() will not work correctly for boolean values.
Args:
val (bool) : Boolean value to convert to a string
Returns:
(str) : String of representing the boolean value
"""
return "true" if val else "false" |
def kb_ids2known_facts(kb_ids):
"""Creates list of all known facts from kb dict"""
facts = set()
for struct in kb_ids:
arrays = kb_ids[struct][0]
num_facts = len(arrays[0])
for i in range(num_facts):
fact = [x[i] for x in arrays]
facts.add(tuple(fact))
return facts |
def is_casava_v180_or_later(header_line):
"""Check if the header looks like it is Illumina software post-casava v1.8
Parameters
----------
header_line : bytes
A header line
Returns
-------
bool
``True`` for if casava v1.8+, otherwise ``False``
Examples
--------
>>> from skbio.util import is_casava_v180_or_later
>>> is_casava_v180_or_later(b'@foo')
False
>>> id_ = b'@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0'
>>> is_casava_v180_or_later(id_)
True
"""
if not header_line.startswith(b'@'):
raise ValueError("Non-header line passed in.")
fields = header_line.split(b':')
return len(fields) == 10 and fields[7] in b'YN' |
def combine_dictionaries(dict1,dict2):
"""append lists that share the same key, and add new keys
WARNING: this only works if the dictionaries have values that are lists"""
outdict = dict1
for key in dict2:
if key in outdict:
assert(isinstance(dict2[key],list))
assert(isinstance(outdict[key],list))
outdict[key] += dict2[key]
else:
outdict[key] = dict2[key]
return outdict |
def filter_training_seats_only(queryset, name, seats):
"""Limit Memberships to only entries with some training seats allowed."""
if seats:
return queryset.filter(instructor_training_seats_total__gt=0)
else:
return queryset |
def cleanup_data(obj):
"""Removes the None values from the object and returns the object
Args:
obj: object to cleanup
Returns:
object: cleaned object
"""
if isinstance(obj, (list, tuple, set)):
return type(obj)(cleanup_data(x) for x in obj if x is not None)
elif isinstance(obj, dict):
return type(obj)((cleanup_data(k), cleanup_data(v))
for k, v in obj.items() if k is not None and v is not None)
else:
return obj |
def _flatten_nested_iterable(struct):
"""
:param struct:
:return:
"""
result = []
for item in struct:
if hasattr(item, '__iter__') and not isinstance(item, str):
result.extend(_flatten_nested_iterable(item))
else:
result.append(item)
return result |
def fixed_width_repr_of_int(value, width, pad_left=True):
"""
Format the given integer and ensure the result string is of the given
width. The string will be padded space on the left if the number is
small or replaced as a string of asterisks if the number is too big.
:param int value: An integer number to format
:param int width: The result string must have the exact width
:return: A string representation of the given integer.
"""
ret = '{:{pad_dir}{width}d}'.format(value, pad_dir='>' if pad_left else '>', width=width)
return '*' * width if len(ret) > width else ret |
def min_cats(sorted_counts, N=1):
"""Given list of tuples of cats, return a list of cats that appear more
frequently than N times (1 by default). Also return a list of tuples"""
out_list = []
out_tup_list = []
for i in sorted_counts:
if i[1] > N:
out_list.append(i[0])
out_tup_list.append((i[0], i[1]))
return out_list, out_tup_list |
def strip_if_string(val):
"""
:param val: any
:return: str|None
"""
if isinstance(val, str):
val = val.strip()
if '' == val:
val = None
return val |
def _build_hasheable_corpus(corpus):
"""Hashes and get `corpus`.
Parameters
----------
corpus : list of list of (int, int)
Given corpus.
Returns
-------
list of list of (int, int)
Hashable corpus.
"""
return [tuple(doc) for doc in corpus] |
def join_neighs_XOR_notrelpos(idxs0_ki, idxs1_ki):
"""Join neighs with XOR.
Parameters
----------
idxs0_ki: list or np.ndarray
the indices of the neighs of neighbourhood0
idxs1_ki: list or np.ndarray
the indices of the neighs of neighbourhood1
Returns
-------
neighs: list
the joined neighbourhood.
"""
neighs = []
for i in range(len(idxs0_ki)):
if idxs0_ki[i] not in idxs1_ki:
neighs.append(idxs0_ki[i])
for i in range(len(idxs1_ki)):
if idxs1_ki[i] not in idxs0_ki:
neighs.append(idxs1_ki[i])
return neighs |
def hasProperty(propertyName, typeObj):
"""check up if the as parameter given type has a property with the
given name
Keyword arguments:
propertyName -- name of the property to look for
typeObj -- type object to check up
"""
if not hasattr(typeObj, 'properties'):
return False
for property in typeObj.properties:
if property.name == propertyName:
return True
return False |
def _get_short_cid( container_id ):
"""returns a shortened container id. Useful for logging, where using a full length container id
is not necessary and would just add noise to the log.
The shortened container id will contain enough information to uniquely
identify the container for most situations. Note: the returned value
should never be used as a key in a dict for containers because there is
always the remote possibility of a conflict (given a large enough number
of containers).
"""
# return the first 8 chars of the container id.
# we don't need to check for length because even if len( container_id ) < 8
# it's still valid to slice beyond the end of a string. See:
# https://docs.python.org/2/reference/expressions.html#slicings
return container_id[:8] |
def compress(nodelist):
"""
compress will return a hostlist string given a list of hostnames.
:param: nodelist: The hostlist string.
:return: The hostlist string.
"""
if type(nodelist) == str:
left_br = nodelist.replace("[", "")
right_br = left_br.replace("]", "")
nodelist = right_br.split(',')
return '[%s]' % ','.join(map(str, nodelist)) |
def get_clf_mode(train, test):
""" Detect whether we are in single-label to single-label mode or not. """
first = "single"
for example in train:
if example.get("labeled", "multi") == "multi":
first = "multi"
for example in test:
if example.get("labeled", "multi") == "multi":
return first, "multi"
return first, "single" |
def get_dict_properties(item, fields, mixed_case_fields=[], formatters={}):
"""Return a tuple containing the item properties.
:param item: a single dict resource
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
row = []
for field in fields:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = item[field_name] if field_name in item else ''
if field in formatters:
row.append(formatters[field](data))
else:
row.append(data)
return tuple(row) |
def click_dfs(field, num_rows, num_cols, given_i, given_j):
"""
Time: O(num_rows * num_cols)
Space: O(num_rows * num_cols)
"""
if 0 <= given_i < num_rows and 0 <= given_j < num_cols and field[given_i][given_j] == 0:
field[given_i][given_j] = -2
else:
return field
for i in range(given_i - 1, given_i + 2):
for j in range(given_j - 1, given_j + 2):
click_dfs(field, num_rows, num_cols, i, j)
return field |
def rgb2htmlcolor(rgb_tuple):
""" convert an (R, G, B) tuple to #RRGGBB """
return '#%02x%02x%02x' % rgb_tuple |
def get_area(ptlist):
""" Calculate the area of a polygon defined by a list of points.
The variable ptlist is a list of (x, y) point pairs. Be careful,
the implementation can give unexpected results with self-intersecting
polygons.
The output will always be non-negative.
Created: 2015 April 29, msswan
"""
I = lambda pt1, pt2: (pt2[1] + pt1[1]) * (pt2[0] - pt1[0]) / 2.0
area = I(ptlist[-1], ptlist[0])
for idx in range(0, len(ptlist)-1):
area += I(ptlist[idx], ptlist[idx+1])
return abs(area) |
def get_cleaned_log(raw_log):
"""
Method to clean the log from stray spaces and new lines
:param raw_log:
:return: list of string without stray spaces and new lines
"""
return [line.strip() for line in raw_log if line.strip()] |
def find_active_desktop(desktops):
"""Returns the currently active desktop.
Args:
desktops: A list containing all Desktop instances.
Returns:
An instance of Desktop representing the currently active desktop or None if it can't be found.
"""
for d in desktops:
if d.active:
return d |
def check_if_hits(row, column, fleet):
"""
This method checks if the shot of the human player at the square represented by row and column
hits any of the ships of fleet.
:param row: int
:param column: int
:param fleet: list
:returns result: bool - True if so and False otherwise
"""
result = False
for i in range(len(fleet)):
# check if guess already in hits set:
if (row, column) in fleet[i][4]:
break
for j in range(fleet[i][3]):
# if horizontal
if fleet[i][2]:
if row == fleet[i][0] and column == fleet[i][1] + j:
result = True
break
# if vertical
else:
if row == fleet[i][0] + j and column == fleet[i][1]:
result = True
break
return result |
def format_internal_tas(row):
""" Concatenate TAS components into a single field for internal use.
Args:
row: row of data with TAS elements
Returns:
TAS components concatenated into a single string
"""
# This formatting should match formatting in dataactcore.models.stagingModels concat_tas
ata = row['allocation_transfer_agency'].strip() if row['allocation_transfer_agency'] and \
row['allocation_transfer_agency'].strip() else '000'
aid = row['agency_identifier'].strip() if row['agency_identifier'] and row['agency_identifier'].strip() else '000'
bpoa = row['beginning_period_of_availa'].strip() if row['beginning_period_of_availa'] and \
row['beginning_period_of_availa'].strip() else '0000'
epoa = row['ending_period_of_availabil'].strip() if row['ending_period_of_availabil'] and \
row['ending_period_of_availabil'].strip() else '0000'
atc = row['availability_type_code'].strip() if row['availability_type_code'] and \
row['availability_type_code'].strip() else ' '
mac = row['main_account_code'].strip() if row['main_account_code'] and row['main_account_code'].strip() else '0000'
sac = row['sub_account_code'].strip() if row['sub_account_code'] and row['sub_account_code'].strip() else '000'
return ''.join([ata, aid, bpoa, epoa, atc, mac, sac]) |
def parse_fixture_line(line):
""" Parses hold specifications.
Line format is <universe> <address> <panel number> <x> <y> [<route number>]
"""
len_minus_routes = 5
words = line.split()
if len(words) > 0 and words[0] == "#":
return None
assert len(words) >= len_minus_routes
strand, address = int(words[0]), int(words[1])
panel_number = int(words[2])
x, y = (int(words[3]), int(words[4]))
routes = set(map(int, words[len_minus_routes:]))
parsed = {"strand": strand,
"address": address,
"panel_number": panel_number,
"grid_loc": (x, y),
"routes": list(routes)}
return parsed |
def str_to_float(item: str) -> float:
"""[summary]
Args:
item (str): [description]
Returns:
float: [description]
"""
"""Converts a str to a float."""
return float(item) |
def get_1rep_max(weight, reps=1):
"""Reduces weight and rep combination
to 1-rep-max weight.
"""
return int(weight * (reps-1) * .033 + weight) |
def validate_passphrases(passphrases, validator):
"""Validate passphrases with validator function."""
return [passphrase for passphrase in passphrases
if validator(passphrase)] |
def apply_rules(l, m, r, rules):
"""Apply selected rule to cell
Apply rule to cell given its current state m and neighbours
states l and r.
Args:
l: left neighbour cell state.
m: current cell state.
r: right neighbour cell state.
rules: array current rule.
"""
if l == 1 and m == 1 and r == 1:
return rules[0]
if l == 1 and m == 1 and r == 0:
return rules[1]
if l == 1 and m == 0 and r == 1:
return rules[2]
if l == 1 and m == 0 and r == 0:
return rules[3]
if l == 0 and m == 1 and r == 1:
return rules[4]
if l == 0 and m == 1 and r == 0:
return rules[5]
if l == 0 and m == 0 and r == 1:
return rules[6]
if l == 0 and m == 0 and r == 0:
return rules[7] |
def dev_dupe_dicter(finals):
"""
Prepare dictionary to clean duplicate autoloaders.
:param finals: Dict of URL:content-length pairs.
:type finals: dict(str: str)
"""
revo = {}
for key, val in finals.items():
revo.setdefault(val, set()).add(key)
return revo |
def _roitmpl2roiname(roitmpl):
"""
generate roiNames out of roitempl
written in an amazing function to easyly change pattern for roiNames
in all further function at once. This way keep the
featureAttributes (fa) consistent
"""
roiRname = 'Right'+roitmpl
roiLname = 'Left'+roitmpl
return roiLname,roiRname |
def compresoneadjtuple(s):
"""useful to compress adjacent entries"""
if len(s) < 1: return s, True
finals=[]
for pos in range(len(s)-1):
firstt, secondt = s[pos],s[pos+1]
# if (firstt[1]==secondt[0]) or (firstt[1]+1==secondt[0]):
if (firstt[1] == secondt[0]):
finals.append((firstt[0],secondt[1]))
finals.extend(s[pos+2:])
return finals, False
else:
finals.append(firstt)
finals.append(s[-1])
return finals, True |
def find_base_style(masters):
"""Find a base style shared between all masters.
Return empty string if none is found.
"""
if not masters:
return ""
base_style = (masters[0].name or "").split()
for master in masters:
style = master.name.split()
base_style = [s for s in style if s in base_style]
base_style = " ".join(base_style)
return base_style |
def country_list(cities_data):
"""Returns a list of all the countries represented in cities_data.
"""
countries = []
for r in cities_data:
if r['country'] not in countries:
countries.append(r['country'])
return countries |
def get_list_from_config(name, config):
""" Gets a list item from config. If it doesn't exist, gets empty list.
If it is not a list, wrap it in a list """
result = config[name] or []
if not isinstance(result, list):
result = [result]
return result |
def dot(x, y):
"""
Sum of multiplying X and Y elementwise.
:param list or tuple x: 1st array.
:param list or tuple y: 2nd array.
:return: sum of multiplied array.
:rtype: int or float
:raise ValueError: when x or y is empty
"""
if x and y:
return sum([i * j for i, j in zip(x, y)])
else:
raise ValueError('x or y is empty') |
def standard_codec_name(name: str) -> str:
"""
Map a codec name to the preferred standardized version.
The preferred names were taken from this list published by IANA:
U{http://www.iana.org/assignments/character-sets/character-sets.xhtml}
@param name:
Text encoding name, in lower case.
"""
if name.startswith("iso8859"):
return "iso-8859" + name[7:]
return {
"ascii": "us-ascii",
"euc_jp": "euc-jp",
"euc_kr": "euc-kr",
"iso2022_jp": "iso-2022-jp",
"iso2022_jp_2": "iso-2022-jp-2",
"iso2022_kr": "iso-2022-kr",
}.get(name, name) |
def get_title(video):
"""Return title"""
print('Trying: ' + video['title'])
if ' | ' in video['title']:
return video['title'].split(' | ')[1]
elif ' - ' in video['title']:
return video['title'].split(' - ')[1]
else:
return video['title'] |
def getValueTryRemoveTailZero(float_num):
"""
try to remove .0 from an float number, 2.00 -> 2
keep other float as it was, 2.02 -> 2.02
:param float float_num
:return float/int
"""
int_num = int(float_num)
if int_num == float_num:
return int_num
return float_num |
def ndvi(b4, b8):
"""
Normalized Difference Vegetation Index (Rouse Jr et al., 1974).
.. math:: NDVI = (b8 - b4)/(b8 + b4)
:param b4: Red.
:type b4: numpy.ndarray or float
:param b8: NIR.
:type b8: numpy.ndarray or float
:returns NDVI: Index value
.. Tip::
Rouse, J. W., Haas, R. H., Schell, J. A., Deering, D. W. 1974. \
Monitoring vegetation systems in the great plains with ERTS. \
In: Proceedings of the Third Earth Resources Technology Satellite-1 \
Symposium; NASA SP-351 (pp. 309-317).
"""
NDVI = (b8 - b4)/(b8 + b4)
return NDVI |
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0 |
def accuracy(y, yhat):
"""Returns the accuracy. Higher is better.
:param y: true function values
:param yhat: predicted function values
"""
return float(sum(map(lambda x: x[0] == x[1],
zip(y, yhat)))) / len(y) |
def potatoes(p0, w0, p1):
"""
- p1/100 = water1 / water1 + (1 - p0/100) * w0
=> water1 = w0 * p1/100 * (1 - p0/100) / (1 - p1/100)
- dry = w0 * (1 - p0/100)
- w1 = water1 + dry = w0 * (100 - p0) / (100 - p1)
Example:
98/100 = water1 / water1 + (1- 99/100) * 100
water1 = 49
w1 = 49 + 1 = 50
"""
w1 = w0 * (100 - p0) / (100 - p1)
return int(w1) |
def decode_string(string, encoding=None):
"""Decode a string with specified encoding
:type string: str or bytes
:param string: string to decode
:param str encoding: encoding of string to decode
:rtype: str
:return: decoded string
"""
if isinstance(string, str):
return string
if encoding is None:
encoding = 'utf-8'
if isinstance(string, bytes):
return string.decode(encoding)
raise ValueError('invalid string type: {}'.format(type(string))) |
def deep_get(target_dict, *args, **kwargs):
"""
Get a value from target_dict entering in the nested keys. If keys does not exist, it returns None
Example target_dict={a: {b: 5}}; key_list=[a,b] returns 5; both key_list=[a,b,c] and key_list=[f,h] return None
:param target_dict: dictionary to be read
:param args: list of keys to read from target_dict
:param kwargs: only can contain default=value to return if key is not present in the nested dictionary
:return: The wanted value if exist, None or default otherwise
"""
for key in args:
if not isinstance(target_dict, dict) or key not in target_dict:
return kwargs.get("default")
target_dict = target_dict[key]
return target_dict |
def concat_block_texts(blocks: list) -> str:
"""Combine child block texts to get the text for an abstract block."""
return " ".join([b["Text"] for b in blocks]) |
def compare(lhs, rhs):
"""Implements cmp() for Python 2 and 3 alike"""
if lhs == None:
if rhs == None:
return 0
else:
return -1
else:
if rhs == None:
return 1
else:
return (lhs > rhs) - (lhs < rhs) |
def _merge_peaks(l):
"""
Merge signals if the difference of nuclear to cytoplasmic ratio is 1
"""
idx = []
while len(l)>0:
first, *rest = l
first = set(first)
lf = -1
while len(first)>lf:
lf = len(first)
rest2 = []
for r in rest:
if len(first.intersection(set(r)))>0:
first |= set(r)
else:
rest2.append(r)
rest = rest2
first = list(first)
first.sort()
idx.append(first)
l = rest
return idx |
def get_shared_link_header(shared_link, password=None):
"""
Gets the HTTP header required to use a shared link to grant access to a shared item.
:param shared_link:
The shared link.
:type shared_link:
`unicode`
:param password:
The password for the shared link.
:type password:
`unicode`
:return:
The item referred to by the shared link.
:rtype:
:class:`Item`
"""
shared_link_password = '&shared_link_password={0}'.format(password) if password is not None else ''
box_api_header = 'shared_link={0}{1}'.format(shared_link, shared_link_password)
return {'BoxApi': box_api_header} |
def get_url_without_scheme(url: str) -> str:
"""Get the target url without scheme
@type url: str
@param url: The target URL
@returns str: The url without scheme
"""
if '://' in url:
return url[(url.index('://')+3):]
return url |
def depth_to_sample(depth,depth_data):
""" Convert depth to sample index
"""
return int((depth - depth_data['depth_start']) / depth_data['depth_per_pixel'] - 0.5) |
def backbone_edges(dot_bracket: str):
"""Return the RNA backbone edges of the dot-bracket string."""
num_pos = len(dot_bracket)
n1 = range(num_pos - 1)
n2 = range(1, num_pos)
return list(zip(n1, n2)) |
def load_result_to_json(keywords, segmentation_result):
"""Function that converts the segmentation results (keywords and text blocks) into json
:param keywords: resulting or put keywords
:param segmentation_result: resulting text blocks
:return: result in json
"""
id_segment = []
for id, segments in segmentation_result.items():
for segment in segments:
id_segment.append({"id": id, "startIndex": segment[0], "endIndex": segment[1]})
output = {
"keywords": keywords,
"textBlocks": id_segment
}
return output |
def get_tiles(tile_rangs):
"""get each scene id and the tile x y bounds
Args:
tile_rangs(list): save scene id and x y bounds
###########tile_range#######
#[['20200529_003832_100d', [[53910, 53961], [24896, 24921]]]]#
Returns:
tile_xyz(list): a list contains scene id, x, y, z.
###########################
"""
tile_xyz = []
for item in tile_rangs:
for x_bound in range(item[1][0][0], item[1][0][1] + 1):
for y_bound in range(item[1][1][0], item[1][1][1]):
tile_xyz.append(f'{item[0]}-{x_bound}-{y_bound}-16')
return tile_xyz |
def max_cmp(L, cmp=None):
"""
Returns the largest item of a list (or iterable) with respect to a
comparison function.
INPUT:
``L`` -- an iterable
``cmp`` -- an optional comparison function.
``cmp(x, y)`` should return a negative value if `x < y`, `0` if
`x == y`, and a positive value if `x > y`.
OUTPUT: the largest item of ``L`` with respect to ``cmp``.
EXAMPLES::
sage: from sage.misc.sage_itertools import max_cmp
sage: L = [1,-1,3,-1,3,2]
sage: max_cmp(L)
3
sage: def mycmp(x,y): return y - x
sage: max_cmp(L, mycmp)
-1
The input can be any iterable::
sage: max_cmp( (x^2 for x in L) )
9
sage: max_cmp( (x^2 for x in L), mycmp)
1
Computing the max of an empty iterable raises and error::
sage: max_cmp([])
Traceback (most recent call last):
...
ValueError: max() arg is an empty sequence
sage: max_cmp([], mycmp)
Traceback (most recent call last):
...
ValueError: max_cmp() arg is an empty sequence
"""
if cmp is None:
return max(L) # Resort to Python's standard max
iterator = iter(L)
try:
m = next(iterator)
except StopIteration:
raise ValueError("max_cmp() arg is an empty sequence")
for item in iterator:
if cmp(item, m) > 0:
m = item
return m |
def format_by_line_length(possible_votes, max_length=60):
"""
Note: I've tried to format with a nice aligned table but it's not
possible to get it right (unless you hardcode it maybe)
because the font used in the game does not have consistent characters (varying width)
"""
lines = []
line = ""
for i in possible_votes:
line += i + " "
if len(line) > max_length:
lines.append(line)
line = ""
lines.append(line)
return "\n".join(lines) |
def _NormalizeArgd(rawDict):
"""
Normalize the argument dictionary. All parameters start with '#' will be
checked whether it is not None(client did not pass it). After checking a
new dictionary will be generated and returned.
:param rawDict: a dict, contains request args with required attribute flag
:return: A pair. First flag is bool to signal whether this request is valid,
Second is a dict for the generated dict or check failure key.
"""
pureDict = {}
for k in rawDict:
if k[0] == '#' and rawDict[k] is None:
return False, {"key": k[1:len(k)]}
pureDict[k[1:len(k)]] = rawDict[k] if rawDict[k] != "None" else None
return True, pureDict |
def _count(bs) -> int:
"""Given a sequence of bools, count the Trues"""
return sum(1 for b in bs if b) |
def sub_brackets(x):
"""Reformats 'a[0]' to 'a_0'"""
return x.replace("[", "_").replace("]", "") |
def escape_perl_string(v):
"""Escape characters with special meaning in perl"""
return str(v).replace("$","\\$").replace("\"","\\\"").replace("@","\\@") |
def parse_slide_pipes(slide_desc_str):
"""
Parse slide content.
This function parses the content of a slides, constructs the visual
representation.
"""
tile_tokens = slide_desc_str.split("-")
return tile_tokens |
def get_video_url(array):
"""
get video url from list
:param array:
:return:
"""
if isinstance(array, list) and len(array) >= 1:
return array[-1]
return None |
def unigram(wordcount_dict, one_doc_length):
""" Compute probabilities of words in one document, and return a dictionary of term probability. """
wordprob_dict = {}
word_instance = len(list(wordcount_dict.keys()))
for word, wordcount in wordcount_dict.items():
# prob = lidston_smoothing(wordcount, one_doc_length, word_instance, 0.5)
prob = wordcount / one_doc_length
wordprob_dict[word] = prob
return wordprob_dict |
def replace_line_breaks(string):
"""replaces: '\r\n' and '\n\r' and '\r' and '\n' and with '<br />' """
r = '<br />'
return string.replace('\r\n', r).replace('\n\r', r).replace('\r', r).replace('\n', r) |
def lstrip_list(s):
"""
Return list with empty items from start of list removed.
"""
for i in range(len(s)):
if s[i]: break
else:
return []
return s[i:] |
def _escape_token(token, alphabet=None):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, str):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace("\\", "\\\\").replace("_", "\\u")
if alphabet is not None:
chars = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
token = ''.join(chars)
return token + "_" |
def count_occurences(data, feature):
"""Counts how often each unique value of a feature occurs in the whole
dataset.
Args:
data: The dataset to count.
feature: The feature to count.
Returns:
A dictionary where the keys are the data values and the values are the
occurences.
"""
values = [d[feature] for d in data]
return {k: values.count(k) for k in set(values)} |
def get_hsc_psf_url(ra, dec, band='i', rerun='', tract='', patch='', imgtype='coadd'):
"""
see hsc query manual
https://hscdata.mtk.nao.ac.jp/psf/4/manual.html#Bulk_mode
"""
url = 'https://hscdata.mtk.nao.ac.jp/psf/4/cgi/getpsf?ra={ra}&dec={dec}&filter={band}&rerun={rerun}&tract={tract}&patch={patch}&type={imgtype}'.format(ra=ra, dec=dec, band=band, rerun=rerun, tract=tract, patch=patch, imgtype=imgtype)
return url |
def convolution_size_equation(size, filter_size, padding, stride):
""" Output size of convolutional layer """
return (size - filter_size + 2 * padding) // stride + 1 |
def learning_rate_with_decay(lr, global_step, discount_step, discount_factor):
"""
Near-optimal step decay learning rate schedule as proposed by https://arxiv.org/abs/1904.12838.
"""
return lr * discount_factor if global_step % discount_step == 0 and global_step > 0 else lr |
def fix_type(argv): # Fix TypeError caused by argv, agrv returns str
""" Get a list and fix the type errors"""
for x in range(len(argv)):
if isinstance(argv[x], str):
if argv[x].isnumeric() is True:
argv[x] = int(argv[x])
elif '.' in argv[x]:
argv[x] = float(argv[x])
elif argv[x] == 'True' or argv[x] == 'False':
argv[x] = bool(argv[x])
return argv |
def recvFixedLength(open_socket, lengthMsg):
"""
Description
Receive a fixed length message on an open socket.
Parm
lengthMsg, an integer, which is the length in characters of the message to receive.
Return
the message received as a string.
"""
pieces = []
nbBytesRecv = 0
while nbBytesRecv < lengthMsg:
piece = open_socket.recv(min(lengthMsg - nbBytesRecv, 4096))
if piece == '':
# Give up now because nothing was received.
return ''.join(pieces)
pieces.append(piece.decode())
nbBytesRecv = nbBytesRecv + len(piece)
# print 'Fixed receive: ', ''.join(pieces)
return ''.join(pieces) |
def nested_get(ind, coll, lazy=False):
""" Get nested index from collection
Examples
--------
>>> nested_get(1, 'abc')
'b'
>>> nested_get([1, 0], 'abc')
('b', 'a')
>>> nested_get([[1, 0], [0, 1]], 'abc')
(('b', 'a'), ('a', 'b'))
"""
if isinstance(ind, list):
if lazy:
return (nested_get(i, coll, lazy=lazy) for i in ind)
else:
return tuple([nested_get(i, coll, lazy=lazy) for i in ind])
return seq
else:
return coll[ind] |
def midi2freq(note):
"""
convert a midi note to its frequency in hertz
https://en.wikipedia.org/wiki/Scientific_pitch_notation
"""
return 440*2.0**((note-69)/12) |
def firstNN(*args):
"""
Return the first argument not None.
Example
-------
>>> firstNN(None, False, True)
False
>>> firstNN(True, False, True)
True
>>> firstNN(None, None, True)
True
>>> firstNN(None, 2, True)
2
>>> firstNN(None, None, None)
None
>>> firstNN()
None
"""
return next(filter(lambda x: x is not None, args), None) |
def convert_list_to_dict(input_list):
"""
Convert a list of values into a dict with int as keys
:param input_list: list, list to convert
:return: dict -> {<int_keys>: <list_elements>}
"""
return {k: v for k, v in enumerate(input_list)} |
def marker_func(label):
"""Given a label, returns the decided marker"""
if "paw" in label or "hoof" in label:
return "^"
elif "ankle" in label or "middle" in label:
return "D"
elif "knee" in label or "top" in label:
return "s"
return "o" |
def get_prop_cen_z_offset(class_str):
"""Get the proposal z centroid offset depending on the class.
"""
if class_str == 'Car':
offset = 2.17799973487854
elif class_str == 'Pedestrian':
offset = 0.351921409368515
elif class_str == 'Cyclist':
offset = 0.8944902420043945
else:
raise ValueError('Invalid class_str', class_str)
return offset |
def get_2pttype_for_dictkey(dictkey):
"""
Convert key in blinding factor dictionary to sets of strings used
in the fits file to designate which kind of 2pt function is being analyzed.
"""
if dictkey == 'gal_gal_cl':
return 'GPF','GPF'
elif dictkey == 'gal_shear_cl':
return 'GPF','GEF'
elif dictkey == 'shear_shear_cl':
return 'GEF','GEF'
elif dictkey == 'gal_gal_xi':
return 'GPR','GPR'
elif dictkey == 'gal_shear_xi':
return 'GPR','G+R'
elif dictkey == 'shear_shear_xip':
return 'G+R','G+R'
elif dictkey == 'shear_shear_xim':
return 'G-R','G-R' |
def make_one_if_possible(shape):
"""
Format layer's input or output shape.
Parameters
----------
shape : int or tuple
Returns
-------
int or tuple
"""
if isinstance(shape, (tuple, list)) and len(shape) == 1:
return shape[0]
return shape |
def mask_out_bits(segbits, mask, tags_to_mask=None):
"""
Given a set of bits and a list of tags to affect (optional) removes all
the bits from each tag that are present (and equal) in the masking set.
"""
if tags_to_mask is None:
tags_to_mask = segbits.keys()
# Mask out matching bits
for tag in tags_to_mask:
bits = segbits[tag]
bits = set(bits) - set(mask)
segbits[tag] = bits
return segbits |
def tobytes(s):
"""force_bytes(s) -> bytes
Ensures the given argument is of type bytes
Example:
>>> force_bytes(b'abc')
b'abc'
>>> force_bytes('abc')
b'abc'
>>> force_bytes(1)
Traceback (most recent call last):
...
TypeError: Expecting a value of type bytes or str, got 1
"""
if isinstance(s, bytes):
return s
elif isinstance(s, str):
return s.encode('utf8')
else:
raise TypeError('Expecting a value of type bytes or str, got %r' % s) |
def report_from_raw_data(lang, data):
"""
Basic report on raw, flat data from the API (not parsed into a tree yet).
"""
report = {'lang': lang}
# general counts
report['#topics'] = len(data['topics'])
report['#videos'] = len(data['videos'])
report['#exercises'] = len(data['exercises'])
# video stats
translated_videos = []
untranslated_videos = []
has_mp4 = []
has_mp4_low = []
has_mp4_low_ios = []
for v in data['videos']:
vid = v['id']
if v['translatedYoutubeLang'] != 'en':
translated_videos.append(vid)
else:
untranslated_videos.append(vid)
durls = v['downloadUrls']
if 'mp4' in durls:
has_mp4.append(vid)
if 'mp4-low' in durls:
has_mp4_low.append(vid)
if 'mp4-low-ios' in durls:
has_mp4_low_ios.append(vid)
report['#translated_videos'] = len(translated_videos)
report['#untranslated_videos'] = len(untranslated_videos)
report['#has_mp4'] = len(has_mp4)
report['#has_mp4_low'] = len(has_mp4_low)
report['#has_mp4_low_ios'] = len(has_mp4_low_ios)
# Keys <k> that can be used in https://{lang}.khanacademy.org/?curriculum=<k>
report['curriculum_keys'] = []
for topic in data['topics']:
curriculum = topic.get("curriculumKey", None)
if curriculum and curriculum not in report['curriculum_keys']:
report['curriculum_keys'].append(curriculum)
return report |
def _getLastRevision(lines):
"""Returns last revision of the PDB entry, if applicable."""
if lines['REVDAT']:
for i, line in lines['REVDAT']:
return line[13:22]
break
else:
return "No revision yet" |
def escape(s):
"""Escape template string syntax."""
return s.replace("\\", r"\\").replace("`", r"\`").replace("$", r"\$") |
def fib(n):
"""
A recursive implementation of finding the nth number in the fibonacci sequence
"""
if n <= 1:
return n
return fib(n - 1) + fib(n - 2) |
def sign_of(x):
"""
The sign function for a real or integer parameter
Return -1, 0 or 1 depending of the sign of x
"""
return (x and (1, -1)[x < 0]) |
def rigSideSep(text):
"""
@param text: string to be split by '/'
@return: returns string array were split by '/' character
"""
ret = []
obj = str(text)
if str(text) == '':
return ret
parts = obj.split('/')
if len(parts) <= 1:
ret.append(obj)
else:
ret.append(parts[len(parts) - 2] + '_')
ret.append(parts[len(parts) - 1] + '_')
return ret |
def flat_header_val_to_dict(header_val):
"""
Transform a header string of comma separated parameters into a dict
"""
val_dict = {}
val_comps = header_val.rsplit(',')
if len(val_comps):
for val_comp in val_comps:
key, sep, val = val_comp.partition("=")
if sep != "=":
return {}
key = key.strip()
val = val.strip()
val = val.strip('"')
if key in val_dict:
if isinstance(val_dict[key], list):
val_dict[key].append(val)
else:
val_dict[key] = [val_dict[key], val]
else:
val_dict[key] = val
return val_dict |
def type_factor(NT, NP, SC=30.0):
"""
NT - Number of planet types in area
NP - Number of planets in area
SC - a number used to scale how bad it is to differ from the
optimal number of planet types. Lower number means less bad
Returns a number between 0.0 and 1.0 indicating how good the ratio
of different planet types is compared to the optimal ratio
The optimal is to have as many different planet types as possible
"""
max_types = 9.0
if NP < max_types:
max_types = NP
ratio = 0.0
if max_types > 0.0:
ratio = NT / max_types
diff_from_opt = 1.0 - ratio
exponent = -SC * diff_from_opt * diff_from_opt
return pow(2.718281828, exponent) |
def calc_product_matrices(m_1_ij, m_2_ij, *argv):
"""Product of two or more matrices."""
if len(argv) == 0:
m_1_11, m_1_12, m_1_13, m_1_21, m_1_22, m_1_23, m_1_31, m_1_32, \
m_1_33 = m_1_ij
m_2_11, m_2_12, m_2_13, m_2_21, m_2_22, m_2_23, m_2_31, m_2_32, \
m_2_33 = m_2_ij
m_3_11 = m_1_11*m_2_11 + m_1_12*m_2_21 + m_1_13*m_2_31
m_3_12 = m_1_11*m_2_12 + m_1_12*m_2_22 + m_1_13*m_2_32
m_3_13 = m_1_11*m_2_13 + m_1_12*m_2_23 + m_1_13*m_2_33
m_3_21 = m_1_21*m_2_11 + m_1_22*m_2_21 + m_1_23*m_2_31
m_3_22 = m_1_21*m_2_12 + m_1_22*m_2_22 + m_1_23*m_2_32
m_3_23 = m_1_21*m_2_13 + m_1_22*m_2_23 + m_1_23*m_2_33
m_3_31 = m_1_31*m_2_11 + m_1_32*m_2_21 + m_1_33*m_2_31
m_3_32 = m_1_31*m_2_12 + m_1_32*m_2_22 + m_1_33*m_2_32
m_3_33 = m_1_31*m_2_13 + m_1_32*m_2_23 + m_1_33*m_2_33
else:
m_h_ij = calc_product_matrices(m_1_ij, m_2_ij)
m_3_11, m_3_12, m_3_13, m_3_21, m_3_22, m_3_23, m_3_31, m_3_32, \
m_3_33 = calc_product_matrices(m_h_ij, *argv)
return m_3_11, m_3_12, m_3_13, m_3_21, m_3_22, m_3_23, m_3_31, m_3_32, \
m_3_33 |
def horner(x0: float, coefficients: list) -> float:
"""A function that implements the Horner's method for evaluating a
polynomial, with coefficients, at x = x0.
Time complexity: O(n), where n = len(coefficients)."""
assert isinstance(coefficients, list)
assert all(
isinstance(x, float) or isinstance(x, int) for x in coefficients)
assert isinstance(x0, float) or isinstance(x0, int)
p = 0
for c in reversed(coefficients):
p = p * x0 + c
return p |
def convert_uint16_to_array(value):
""" Convert a number into an array of 2 bytes (LSB). """
return [(value >> 0 & 0xFF), (value >> 8 & 0xFF)] |
def bitwise_or (value, mask):
"""Peforms a bitwise-or operation on `value` and `mask` leaving any
"bits" marked "X" unchanged. Note that `value` and `mask` are both
strings containing '0', '1', or 'X' and a string result is
returned.
"""
return ''.join(v if m == '0' else m for v, m in zip(value, mask)) |
def util_key_exists ( keys, key ):
"""Returns boolean for key in list"""
result = False
for i in keys:
if (i == key):
result = True
return result |
def check_uniqueness_in_rows(board: list, row=True):
"""
Check buildings of unique height in each row.
Return True if buildings in a row have unique length, False otherwise.
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
True
>>> check_uniqueness_in_rows(['***21**', '452453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
False
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*553215', \
'*35214*', '*41532*', '*2*1***'])
False
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '441532*', '*22222*'])
True
>>> check_uniqueness_in_rows(['***212*', '412453*', '423145*', '*543215', \
'*35214*', '441532*', '*22222*'])
True
"""
board = board[1: -1] if row else board
for row in board:
buildings = row[1: -1]
if len(buildings) != len(set(buildings)) or \
row[0].isdigit() and row[-1].isdigit():
return False
return True |
def ForceKernel(r, h):
"""
Returns the quantity equivalent to (fraction of mass enclosed)/ r^3 for a cubic-spline mass distribution of compact support radius h. Used to calculate the softened gravitational force.
Arguments:
r - radius
h - softening
"""
if r > h: return 1./(r*r*r)
hinv = 1./h
q = r*hinv
if q <= 0.5:
return (10.666666666666666666 + q*q*(-38.4 + 32.*q))*hinv*hinv*hinv
else:
return (21.333333333333 - 48.0 * q + 38.4 * q * q - 10.666666666667 * q * q * q - 0.066666666667 / (q * q * q))*hinv*hinv*hinv |
def asbytes(s):
"""Turns unicode into bytes, if needed.
Assumes UTF-8.
"""
if isinstance(s, bytes):
return s
else:
return s.encode("utf-8") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.