content stringlengths 42 6.51k |
|---|
def get_start_end_from_string(start_end):
""" 111.222 => 111, 222 """
start, end = start_end.split(".")
start = int(start)
end = int(end)
return(start, end) |
def get_full_policy_path(arn):
"""
Resource string will output strings like the following examples.
Case 1:
Input: arn:aws:iam::aws:policy/aws-service-role/AmazonGuardDutyServiceRolePolicy
Output:
aws-service-role/AmazonGuardDutyServiceRolePolicy
Case 2:
Input: arn:aws:iam::123456789012:role/ExampleRole
Output: ExampleRole
:param arn:
:return:
"""
split_arn = arn.split(":")
resource_string = ":".join(split_arn[5:])
resource_string = resource_string.split("/")[1:]
resource_string = "/".join(resource_string)
return resource_string |
def fib(n):
"""Fibonacci example function
Args:
n (int): integer
Returns:
int: n-th Fibonacci number
"""
assert n > 0
a, b = 1, 1
for i in range(n - 1):
a, b = b, a + b
return a |
def get_copies(rdoc, pformat):
"""
make copies (Single, Duplicated or Triplicated) only for pdf format
"""
copies = ["Original", "Duplicate", "Triplicate"]
return copies.index(rdoc.jasper_report_number_copies) + 1 if pformat == "pdf" else 1 |
def remove_prefixes_of_others(values):
"""
Removes strings that are prefixes of other strings and returns the result.
"""
to_remove = set()
for value in values:
for other_value in values:
if other_value != value and other_value.startswith(value):
to_remove.add(value)
return sorted(set(values) - set(to_remove)) |
def decimalToRoman(index):
"""Converts an int to a roman numerical index"""
assert isinstance(index, int) and index > 0
roman = [(1000, 'M'), (900, 'CM'),
(500, 'D'), (400, 'CD'),
(100, 'C'), (90, 'XC'),
(50, 'L'), (40, 'XL'),
(10, 'X'), (9, 'IX'),
(5, 'V'), (4, 'IV'),
(1, 'I')]
roman_num = ''
while index:
for value, num in roman:
if index >= value:
roman_num += num
index -= value
break
return roman_num |
def calc_partial_interia_dat(contrib_dat, ev):
"""
Partial inertias for datasets/tables.
:return:
"""
print("Calculating partial inertias for the datasets... ", end='')
pid = contrib_dat * ev
print('Done!')
return pid |
def reverse_words_in_a_string(string):
"""
Given an input string, reverse the string word by word.
:param string: given string
:type string: str
:return: reversed string
:rtype: str
"""
# return ' '.join(string.strip().split(' ')[::-1])
splited = [word for word in string.strip().split(' ')
if word != '']
return ' '.join(splited[::-1]) |
def tokenize_english(sentence):
"""English language tokenizer."""
return sentence.split(" ") |
def split(seq, length):
"""
Split a list in chunks of specific length
"""
return [seq[i : i + length] for i in range(0, len(seq), length)] |
def OCEANprice(firm_valuation: float, OCEAN_supply: float) -> float:
"""Return price of OCEAN token, in USD"""
assert OCEAN_supply > 0
return firm_valuation / OCEAN_supply |
def _str_to_list(str_or_list):
"""try to return a non-string iterable in either case"""
if isinstance(str_or_list, (tuple, list, set)):
return str_or_list
if str(str_or_list) == str_or_list:
return [str_or_list]
raise ValueError(str_or_list) |
def find_uniques(
test_list,
expected_list
):
"""checks for unique values between two lists.
Args:
test_list (:obj:`list`): values found in test
expected_list (:obj:`list`): values expected
Returns:
(:obj:`list`): unique_test
(:obj:`list`): unique_expected
"""
unique_test = list(set(test_list) - set(expected_list))
print('Unique test vals: {}'.format(unique_test))
#unique_expected = list(expected_list)
#for key in test_list:
# try:
# unique_expected.remove(key)
# print('removed key: {}'.format(key))
# except Exception:
# print('key not in expected: {}'.format(key))
# pass
#print(unique_expected)
unique_expected = list(set(expected_list) - set(test_list))
print('Unique expected vals: {}'.format(unique_expected))
return unique_test, unique_expected |
def validate_rule_association_id(value):
"""Raise exception if resolver rule association id has invalid length."""
if value and len(value) > 64:
return "have length less than or equal to 64"
return "" |
def dictget(d, l):
"""
Lookup item in nested dict using a list of keys, or None if non-existent
d: nested dict
l: list of keys, or None
"""
try:
dl0 = d[l[0]]
except (KeyError, TypeError):
return None
if len(l) == 1:
return dl0
return dictget(dl0, l[1:]) |
def _mask_dict(in_dict, mask):
"""Given a dict and a list of fields removes all fields not in the list"""
for key in (set(in_dict.keys()) - set(mask)):
in_dict.pop(key)
return in_dict |
def sub(value):
""""Subtracts one from `value`.
Args:
value (int): A number.
Returns:
int: `value` minus one.
"""
return (value - 1) |
def reverse_line(line: list) -> list:
"""
Takes a line of edges [(p0, p1), (p1, p2) ... (pn_m_1, pn)] and returns
[(pn, pn_m_1), ... (p1, p0)], where the p's are (x, y) in map coordinates.
Parameters
----------
line : list
List of edges (p0, p1), where p0, p1 in R^2
Returns
-------
list:
Returns list of edges reversed
"""
return [(p1, p0) for (p0, p1) in reversed(line)] |
def remove_string_escapes(value: str) -> str:
"""Used when parsing string-literal defaults to prevent escaping the string to write arbitrary Python
**REMOVING OR CHANGING THE USAGE OF THIS FUNCTION HAS SECURITY IMPLICATIONS**
See Also:
- https://github.com/openapi-generators/openapi-python-client/security/advisories/GHSA-9x4c-63pf-525f
"""
return value.replace('"', r"\"") |
def _split_by_comma(s, length=50):
"""Group a comma-separated string into a list of at-most
``length``-length words each."""
str_split = s.split(',')
str_list = []
for i in range(0, len(str_split) + length, length):
temp_str = ','.join(str_split[i:i+length])
if temp_str:
str_list.append(temp_str)
return str_list |
def get_dimensions(matrix):
"""
A helper function to get the dimensions of the matrix
Args:
matrix (2D array): A 2D array that is
representing a matrix
Returns:
tuple : A tuple containing the dimensions of
the matrix
"""
return len(matrix), len(matrix[0]) |
def format_query_params(request, config):
"""formats query parameters from config and attached to URL."""
if "query" in config["request"].keys():
request["request"]["url"]["raw"] += config["request"]["query"]
request["request"]["url"]["host"] = [request["request"]["url"]["raw"]]
return request |
def mul_vector_by_scalar(vector, scalar):
"""
Multiplies a vector by a scalar
:param vector: vector
:param scalar: scalar
:return: vector * scalar
"""
return tuple([value * scalar for value in vector]) |
def format_columns(rows, sep=None, align=None):
"""Convert a list (rows) of lists (columns) to a formatted list of lines.
When joined with newlines and printed, the output is similar to
`column -t`.
The optional align may be a list of alignment formatters.
The last (right-most) column will not have any trailing whitespace so that
it wraps as cleanly as possible.
Based on MIT licensed:
https://github.com/ClockworkNet/OpScripts/blob/master/opscripts/utils/v8.py
Based on solution provided by antak in http://stackoverflow.com/a/12065663
"""
lines = list()
if sep is None:
sep = " "
widths = [max(map(len, map(str, col))) for col in zip(*rows)]
for row in rows:
formatted = list()
last_col = len(row) - 1
for i, col in enumerate(row):
# right alighed
if align and align[i].lower() in (">", "r"):
formatted.append(str(col).rjust(widths[i]))
# center aligned
elif align and align[i].lower() in ("^", "c"):
col_formatted = str(col).center(widths[i])
if i == last_col:
col_formatted = col_formatted.rstrip()
formatted.append(col_formatted)
# left aligned
else:
if i == last_col:
formatted.append(str(col))
else:
formatted.append(str(col).ljust(widths[i]))
lines.append("| {} |".format(sep.join(formatted)))
return lines |
def pointsToList(points):
"""Convert query response from point in time, value format to list with values only.
:param points: query response
:return: list[oldest value, -> , newest value]
"""
list_of_points = []
for point in points:
list_of_points.append(point)
return list_of_points |
def ms_to_hours(ms):
"""Convert milliseconds to hours"""
return round(ms / 60.0 / 60.0 / 1000.0, 2) |
def makeGameBoard(n):
"""
Create a gameboard represented as a dictionary.
Game board consistis of numeric placeholders starting
with zero and going to the last number stipulated
-------------------------
| 0| 1| 2| 3| 4| 5| 6| 7|
-------------------------
| 8| 9|10|11|12|13|14|15|
-------------------------
|16|17|18|19|20|21|22|23|
-------------------------
|24|25|26|27|28|29|30|31|
-------------------------
|32|33|34|35|36|37|38|39|
-------------------------
|40|41|42|43|44|45|46|47|
-------------------------
|48|49|50|51|52|53|54|55|
-------------------------
|56|57|58|59|60|61|62|63|
-------------------------
Parameters:
n (int): number of last gameboard cell
Returns:
dict(): Dictionary with key values representing game
board cells, values representing visited or not
"""
gameBoard = {}
for i in range(0, n + 1):
gameBoard[i] = None
return gameBoard |
def TropicalWeight(param):
"""
Returns the emulated fst TropicalWeight
Args:
param (str): The input
Returns:
bool: The arc weight
"""
if param == (float('inf')):
return False
else:
return True |
def _diag(m):
"""Return the diagonal of a matrix."""
return [m[j][j] for j in range(len(m))] |
def reflection_normal(n1, n2):
"""
Fresnel reflection losses for normal incidence.
For normal incidence no difference between s and p polarisation.
Inputs:
n1 : Refractive index of medium 1 (input)
n2 : Refractive index of medium 2 (output)
Returns:
R : The Fresnel
Doctests:
>>> '%.2f' % reflection_normal(1.5,1)
'0.04'
"""
return((n1-n2)/(n1+n2))**2. |
def migrate_instrument_config(instrument_config):
"""utility function to generate old instrument config dictionary"""
cfg_list = []
for detector_id in instrument_config['detectors']:
cfg_list.append(
dict(
detector=instrument_config['detectors'][detector_id],
oscillation_stage=instrument_config['oscillation_stage'],
)
)
return cfg_list |
def get_current_next_indicator(data):
"""Gets the current/next indicator from the given section data
Parses the given array of section data bytes and returns the current/next indicator. If True, then this
is the currently applicable table. If False then it will become applicable some time in the future.
"""
if data[5] & int('00000001', 2): return True
return False |
def seconds_to_timeleft(seconds):
"""Utilty function converting seconds to string representation."""
days, seconds = seconds // 86400, seconds % 86400
hours, seconds = seconds // 3600, seconds % 3600
minutes, seconds = seconds // 60, seconds % 60
timeleft = ''
if days:
timeleft += '{0}d'.format(days)
if hours:
timeleft += '{0}h'.format(hours)
if minutes:
timeleft += '{0}m'.format(minutes)
if seconds:
timeleft += '{0}s'.format(seconds)
return timeleft |
def find_matching_parenthesis(left, equation):
"""
Ghetto function to find ) that matches (
When p = 0 after finding a ), it should be the matching paren
:param left: The parenthesis to match
:param equation: The equation to match it in
:return: int. Index of right paren
"""
nested_parenthesis = 0
for i in range(left, len(equation)): # skip leftmost parenthesis
if equation[i] == "(":
nested_parenthesis += 1
elif equation[i] == ")":
nested_parenthesis -= 1
if nested_parenthesis == 0:
return i
raise SyntaxWarning("No matching parenthesis found") |
def space_replacer(string):
"""
:type string: str
:rtype: str
"""
string = string.replace(" ", "_")
while "__" in string:
string = string.replace("__", "_")
return string |
def determine_archive_version_generic(name, leading_terms, trailing_terms):
"""
Given an archive file name, tries to get version information. Generic version that can cut off leading and trailing
terms and converts to lower case. Give the most special terms first in the list. As many cut offs as possible are
performed.
"""
# to lower case
name = name.lower()
# cut leading terms
for t in leading_terms:
if name.startswith(t):
name = name[len(t):]
# cut trailing terms
for t in trailing_terms:
if name.endswith(t):
name = name[:-len(t)]
return name |
def is_internal(ip_address):
"""Determine if the address is an internal ip address
Note: This is super bad, improve it
"""
# Local networks 10.0.0.0/8, 172.16.0.0/12, '192.168.0.0/16
local_nets = '10.', '172.16.', '192.168.', '169.254', 'fd', 'fe80::'
return any([ip_address.startswith(local) for local in local_nets]) |
def get_unique_in_array(lst: list):
"""Returns a list of the unique values within the given list.
Examples:
>>> mylst = [1,1,2,2,3,2,3,4,5,6]\n
>>> get_unique_in_array(mylst)\n
[1, 2, 3, 4, 5, 6]
>>>
"""
return list(set(lst)) |
def get_symminfo(newsymms: dict) -> str:
"""
Adds text about the symmetry generators used in order to add symmetry generated atoms.
"""
line = 'Symmetry transformations used to generate equivalent atoms:\n'
nitems = len(newsymms)
n = 0
for key, value in newsymms.items():
sep = ';'
if n == nitems:
sep = ''
n += 1
line += "#{}: {}{} ".format(key, value, sep)
if newsymms:
return line
else:
return '' |
def _convert_byte32_arr_to_hex_arr(byte32_arr):
"""
This function takes in an array of byte32 strings and
returns an array of hex strings
"""
hex_ids = []
for byte32_str in byte32_arr:
hex_ids = hex_ids + [byte32_str.hex()]
return hex_ids |
def emote(name, action):
"""Emote an action."""
return "{} {}".format(name, action) |
def faces_indices_vectors_to_matrix(indices, faces):
"""faces_indices_vectors_to_matrix(indices, faces) -> List[List[int]]
PyPRT outputs the GeneratedModel face information as a list of vertex indices
and a list of face indices count. This function converts these two lists into
one list of lists containing the vertex indices per face.
Parameters:
indices: List[int]
faces: List[int]
Returns:
List[List[int]]
Example:
``[[1, 0, 3, 2], [4, 5, 6, 7], [0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]
= faces_indices_vectors_to_matrix(([1, 0, 3, 2, 4, 5, 6, 7, 0, 1, 5, 4, 1, 2, 6, 5,
2, 3, 7, 6, 3, 0, 4, 7],[4, 4, 4, 4, 4, 4]))``
"""
faces_as_matrix = []
offset = 0
for f in faces:
ind_per_face = indices[offset:offset+f]
offset += f
faces_as_matrix.append(ind_per_face)
return faces_as_matrix |
def is_valid_url(url: str) -> bool:
"""
Minimal validation of URLs
Unfortunately urllib.parse.urlparse(...) is not identifying correct URLs
"""
return isinstance(url, (str, bytes)) and len(url) > 8 and url.split('://', 1)[0] in ('http', 'https') |
def tweet_location(tweet):
"""Return a position representing a tweet's location."""
return tweet['latitude'] ,tweet['longitude'] |
def _get_unique_split_data(split_test_targets):
"""Returns all split_test_target 'data' dependencies without duplicates."""
data = []
for split_name in split_test_targets:
split_data = split_test_targets[split_name].get("data", default = [])
[data.append(d) for d in split_data if d not in data]
return data |
def get_batch_asset_task_info(ctx):
"""Parses context data from webpublisher's batch metadata
Returns:
(tuple): asset, task_name (Optional), task_type
"""
task_type = "default_task_type"
task_name = None
asset = None
if ctx["type"] == "task":
items = ctx["path"].split('/')
asset = items[-2]
task_name = ctx["name"]
task_type = ctx["attributes"]["type"]
else:
asset = ctx["name"]
return asset, task_name, task_type |
def klucb(x, d, kl, upperbound, lowerbound=float('-inf'), precision=1e-6, max_iterations=50):
""" The generic KL-UCB index computation.
- x: value of the cum reward,
- d: upper bound on the divergence,
- kl: the KL divergence to be used (:func:`klBern`, :func:`klGauss`, etc),
- upperbound, lowerbound=float('-inf'): the known bound of the values x,
- precision=1e-6: the threshold from where to stop the research,
- max_iterations: max number of iterations of the loop (safer to bound it to reduce time complexity).
.. note:: It uses a **bisection search**, and one call to ``kl`` for each step of the bisection search.
For example, for :func:`klucbBern`, the two steps are to first compute an upperbound (as precise as possible) and the compute the kl-UCB index:
>>> x, d = 0.9, 0.2 # mean x, exploration term d
>>> upperbound = min(1., klucbGauss(x, d, sig2x=0.25)) # variance 1/4 for [0,1] bounded distributions
>>> upperbound # doctest: +ELLIPSIS
1.0
>>> klucb(x, d, klBern, upperbound, lowerbound=0, precision=1e-3, max_iterations=10) # doctest: +ELLIPSIS
0.9941...
>>> klucb(x, d, klBern, upperbound, lowerbound=0, precision=1e-6, max_iterations=10) # doctest: +ELLIPSIS
0.994482...
>>> klucb(x, d, klBern, upperbound, lowerbound=0, precision=1e-3, max_iterations=50) # doctest: +ELLIPSIS
0.9941...
>>> klucb(x, d, klBern, upperbound, lowerbound=0, precision=1e-6, max_iterations=100) # more and more precise! # doctest: +ELLIPSIS
0.994489...
.. note:: See below for more examples for different KL divergence functions.
"""
value = max(x, lowerbound)
u = upperbound
_count_iteration = 0
while _count_iteration < max_iterations and u - value > precision:
_count_iteration += 1
m = (value + u) / 2.
if kl(x, m) > d:
u = m
else:
value = m
return (value + u) / 2. |
def golomb(n: int, map={}):
"""memoized recursive generation of Nth number in Golomb sequence"""
if n == 1:
return 1
if n not in map:
map[n] = 1 + golomb(n - golomb(golomb(n - 1)))
return map[n] |
def to_alpha(anumber):
"""Convert a positive number n to its digit representation in base 26."""
output = ''
if anumber == 0:
pass
else:
while anumber > 0:
anumber = anumber
output += chr(anumber % 26 + ord('A'))
anumber = anumber // 26
return output[::-1] |
def check_chef_recipe(rec):
"""
Checks if the given file is a chef recipe
:param rec: file path
:return: check result
"""
return rec.lower().endswith(".rb") |
def oneD_linear_interpolation(desired_x, known):
"""
utility function that performs 1D linear interpolation with a known energy value
:param desired_x: integer value of the desired attribute/argument
:param known: list of dictionary [{x: <value>, y: <energy>}]
:return energy value with desired attribute/argument
"""
# assume E = ax + c where x is a hardware attribute
ordered_list = []
if known[1]['x'] < known[0]['x']:
ordered_list.append(known[1])
ordered_list.append(known[0])
else:
ordered_list = known
slope = (known[1]['y'] - known[0]['y']) / (known[1]['x'] - known[0]['x'])
desired_energy = slope * (desired_x - ordered_list[0]['x']) + ordered_list[0]['y']
return desired_energy |
def make_unique_name(base, existing=[], format="%s_%s"):
"""
Return a name, unique within a context, based on the specified name.
base: the desired base name of the generated unique name.
existing: a sequence of the existing names to avoid returning.
format: a formatting specification for how the name is made unique.
"""
count = 2
name = base
while name in existing:
name = format % (base, count)
count += 1
return name |
def has_module(modName):
"""Check if the module is installed
Args:
modName (str): module name to check
Returns:
bool: True if installed, otherwise False
"""
from pkgutil import iter_modules
return modName in (name for loader, name, ispkg in iter_modules()) |
def _get_main_object_id_mappings(main_object_ids, all_object_ids,
output_actions, alias_object_id_to_old_object_id):
"""
Return a list of main object IDs, and a mapping from all object Ids to the main ones
:param main_object_ids: Main ids identified by the sampler
:param all_object_ids: All object IDs ever seen
:param output_actions: All output actions -- we might need to add more main object IDs if needed
:param alias_object_id_to_old_object_id: Aliases - e.g. if we chop somethign it changes ID. ugh
:return: new list of main object IDs, and a mapping of objectId to main ind (or 0 otherwise). Starts at 1.
"""
# Create a mapping of objectId -> mainObjectId ind (or nothing!)
# Tack on enough things to main object ids if they're referenced
if isinstance(main_object_ids, str): # Not sure what's going on here
main_object_ids = [main_object_ids]
ref_oids = set([v for a in output_actions for k, v in a.items() if k.endswith('bjectId')])
for roid in sorted(ref_oids):
if roid not in sorted(alias_object_id_to_old_object_id.keys()) + main_object_ids:
main_object_ids.append(roid)
# print("{} objects: {}".format(len(main_object_ids), main_object_ids), flush=True)
object_id_to_main_ind = {oid: -1 for oid in all_object_ids}
for i, mi in enumerate(main_object_ids):
object_id_to_main_ind[mi] = i
for k, v in alias_object_id_to_old_object_id.items():
if v == mi:
object_id_to_main_ind[k] = i
return main_object_ids, object_id_to_main_ind |
def v0_tail(sequence, n):
"""Return the last n items of given sequence.
But this won't pass all our tests. For one thing, the return value will be
the same as the given sequence type. So if the given sequence is a string
then we'll get a string returned and if it's a tuple then we'll get a
tuple returned.
"""
return sequence[-n:] |
def are_all_equal(tiles):
"""
Checks if all tiles are the same.
"""
return len(set(tiles)) <= 1 |
def _parse_semicolon_separated_data(input_data):
"""Reads semicolon-separated Unicode data from an input string.
Reads a Unicode data file already imported into a string. The format is
the Unicode data file format with a list of values separated by
semicolons. The number of the values on different lines may be different
from another.
Example source data file:
http://www.unicode.org/Public/UNIDATA/PropertyValueAliases.txt
Example data:
sc; Cher ; Cherokee
sc; Copt ; Coptic ; Qaac
Args:
input_data: An input string, containing the data.
Returns:
A list of lists corresponding to the input data, with each individual
list containing the values as strings. For example:
[['sc', 'Cher', 'Cherokee'], ['sc', 'Copt', 'Coptic', 'Qaac']]
"""
all_data = []
for line in input_data.split('\n'):
line = line.split('#', 1)[0].strip() # remove the comment
if not line:
continue
fields = line.split(';')
fields = [field.strip() for field in fields]
all_data.append(fields)
return all_data |
def get_addr(host, port):
"""Returns port"""
return "%d" % (port) |
def clamp(x, _min, _max):
"""Clamp a value between a minimum and a maximum"""
return min(_max, max(_min, x)) |
def all_(collection, predicate=None):
""":yaql:all
Returns true if all the elements of a collection evaluate to true.
If a predicate is specified, returns true if the predicate is true for all
elements in the collection.
:signature: collection.all(predicate => null)
:receiverArg collection: input collection
:argType collection: iterable
:arg predicate: lambda function to apply to every collection value. null
by default, which means evaluating collections elements to boolean
with no predicate
:argType predicate: lambda
:returnType: boolean
.. code::
yaql> [1, [], ''].all()
false
yaql> [1, [0], 'a'].all()
true
"""
if predicate is None:
predicate = lambda x: bool(x) # noqa: E731
for t in collection:
if not predicate(t):
return False
return True |
def list_all(node):
"""Retrieve all the key-value pairs in a BST in asc sorted order of keys."""
# To achieve asc sortedness, we can perform an inorder traversal of the BST,
# and then we know from the definition of BST (cf is_bst()) that it's asc. O(N)
if node is None:
return []
return list_all(node.left) + [(node.key, node.value)] + list_all(node.right) |
def symmetric(l):
"""Returns whether a list is symmetric.
>>> symmetric([])
True
>>> symmetric([1])
True
>>> symmetric([1, 4, 5, 1])
False
>>> symmetric([1, 4, 4, 1])
True
>>> symmetric(['l', 'o', 'l'])
True
"""
"*** YOUR CODE HERE ***"
if (len(l) == 0) or (len(l) == 1):
return True
elif l[0] != l[len(l) - 1]:
return False
else:
return symmetric(l[1:len(l) - 1]) |
def dict_compare(d1, d2):
"""Taken from: https://stackoverflow.com/questions/4527942/comparing-two-dictionaries-in-python"""
d1_keys = set(list(d1))
d2_keys = set(list(d2))
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
same = set(o for o in intersect_keys if d1[o] == d2[o])
return added, removed, modified, same |
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10 |
def _make_worker_node_script(module_name, function_name, environ):
"""
Returns a string that is a python-script.
This python-script will be executed on the worker-node.
In here, the environment variables are set explicitly.
It reads the job, runs result = function(job), and writes the result.
The script will be called on the worker-node with a single argument:
python script.py /some/path/to/work_dir/{idx:09d}.pkl
On environment-variables
------------------------
There is the '-V' option in qsub which is meant to export ALL environment-
variables in the batch-job's context. And on many clusters this works fine.
However, I encountered clusters where this does not work.
For example ```LD_LIBRARY_PATH``` is often forced to be empty for reasons
of security. So the admins say.
This is why we set the einvironment-variables here in the
worker-node-script.
"""
add_environ = ""
for key in environ:
add_environ += 'os.environ["{key:s}"] = "{value:s}"\n'.format(
key=key.encode("unicode_escape").decode(),
value=environ[key].encode("unicode_escape").decode(),
)
return (
""
"# I was generated automatically by queue_map_reduce.\n"
"# I will be executed on the worker-nodes.\n"
"# Do not modify me.\n"
"from {module_name:s} import {function_name:s}\n"
"import pickle\n"
"import sys\n"
"import os\n"
"from queue_map_reduce import network_file_system as nfs\n"
"\n"
"{add_environ:s}"
"\n"
"assert(len(sys.argv) == 2)\n"
'job = pickle.loads(nfs.read(sys.argv[1], mode="rb"))\n'
"\n"
"result = {function_name:s}(job)\n"
"\n"
'nfs.write(pickle.dumps(result), sys.argv[1]+".out", mode="wb")\n'
"".format(
module_name=module_name,
function_name=function_name,
add_environ=add_environ,
)
) |
def patch_dict(d, p):
"""Patches the dict `d`.
Patches the dict `d` with values from the "patcher" dict `p`.
"""
for k in p:
if k in d.keys():
if type(d[k]) == dict:
d[k] = patch_dict(d[k], p[k])
else:
d[k] = p[k]
return d |
def count_fixxations(fixxations):
"""
A Function that counts the number of distinct fixxations
:param fixxations: a list with values which indicate if the move from the previos is a fixxations.
:return: a number of indicating the amount of different fixxations
"""
fixxations_count = 0
is_currently = False
for value in fixxations:
if value == 1 and is_currently == False:
fixxations_count += 1
is_currently = True
if value == 0 and is_currently == True:
is_currently = False
return fixxations_count |
def ugly_number(n):
"""
Returns the n'th Ugly number.
Ugly Numbers are numbers whose only prime factors are 2,3 or 5.
Parameters
----------
n : int
represent the position of ugly number
"""
if(n<1):
raise NotImplementedError(
"Enter a valid natural number"
)
ugly = [0]*n
ugly[0] = 1
i2 = i3 = i5 = 0
next_multiple_of_2 = 2
next_multiple_of_3 = 3
next_multiple_of_5 = 5
for l in range(1, n):
ugly[l] = min(next_multiple_of_2,
next_multiple_of_3,
next_multiple_of_5)
if ugly[l] == next_multiple_of_2:
i2 += 1
next_multiple_of_2 = ugly[i2] * 2
if ugly[l] == next_multiple_of_3:
i3 += 1
next_multiple_of_3 = ugly[i3] * 3
if ugly[l] == next_multiple_of_5:
i5 += 1
next_multiple_of_5 = ugly[i5] * 5
return ugly[-1] |
def parse_content_type(data):
"""
Parses the provided content type string retrieving both the multiple
mime types associated with the resource and the extra key to value
items associated with the string in case they are defined (it's optional).
:type data: String
:param data: The content type data that is going to be parsed to
obtain the structure of values for the content type string, this must
be a plain unicode string and not a binary string.
:rtype: Tuple
:return: The sequence of mime types of the the content and the multiple
extra values associated with the content type (eg: charset, boundary, etc.)
"""
# creates the list of final normalized mime types and the
# dictionary to store the extra values.
types = []
extra_m = dict()
# in case no valid type has been sent returns the values
# immediately to avoid further problems
if not data: return types, extra_m
# extracts the mime and the extra parts from the data string
# they are the basis of the processing method
data = data.strip(";")
parts = data.split(";")
mime = parts[0]
extra = parts[1:]
mime = mime.strip()
# runs a series of verifications on the base mime value and in
# case it's not valid returns the default values immediately
if not "/" in mime: return types, extra_m
# strips the complete set of valid extra values, note
# that these values are going to be processed as key
# to value items
extra = [value.strip() for value in extra if extra]
# splits the complete mime type into its type and sub
# type components (first step of normalization)
type, sub_type = mime.split("/", 1)
sub_types = sub_type.split("+")
# iterates over the complete set of sub types to
# create the full mime type for each of them and
# add the new full items to the types list (normalization)
for sub_type in sub_types:
types.append(type + "/" + sub_type)
# goes through all of the extra key to value items
# and converts them into proper dictionary values
for extra_item in extra:
if not "=" in extra_item: continue
extra_item = extra_item.strip()
key, value = extra_item.split("=")
extra_m[key] = value
# returns the final tuple containing both the normalized
# mime types for the content and the extra key to value items
return types, extra_m |
def get_configs(configs: dict, operator: str, configs_type: str):
"""
:param configs: the main config file
:param operator: the id of the operator
:param configs_type: the type of configs you want: 'image' or 'text' etc
:return:
"""
for key in configs.keys():
config_item = configs[key]
operators = config_item.get('operators', [])
if operator in operators:
return config_item.get(configs_type, {})
return configs['pavlov'].get(configs_type, {}) |
def normalize_houndsfield(data_):
"""Normalizes houndsfield values ranging from -1024 to ~+4000 to (0, 1)"""
cpy = data_ + 1024
cpy /= 3000
return cpy |
def apk(actual, predicted, k=10):
"""
Computes the average precision at k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
if p == actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
if not actual:
return 0.0
return score / min(1, k) |
def remove_artifacts(tree):
"""Removes some artifacts introduced by the preprocessing steps from tree['body']
Additionally, modify this function to distort/modify post-wise text from the training files."""
for post in tree:
post["body"] = post["body"].replace(" ", " ")
return tree |
def int2uint64(value):
"""
Convert a signed 64 bits integer into an unsigned 64 bits integer.
>>> print(int2uint64(1))
1
>>> print(int2uint64(2**64 + 1)) # ignore bits larger than 64 bits
1
>>> print(int2uint64(-1))
18446744073709551615
>>> print(int2uint64(-2))
18446744073709551614
"""
return (value & 0xffffffffffffffff) |
def get_day_suffix(day):
"""
Returns the suffix of the day, such as in 1st, 2nd, ...
"""
if day in (1, 11, 21, 31):
return 'st'
elif day in (2, 12, 22):
return 'nd'
elif day in (3, 13, 23):
return 'rd'
else:
return 'th' |
def multiplicity(p, n):
"""
Return the multiplicity of the number p in n; that is, the greatest
number m such that p**m divides n.
Example usage
=============
>>> multiplicity(5, 8)
0
>>> multiplicity(5, 5)
1
>>> multiplicity(5, 25)
2
>>> multiplicity(5, 125)
3
>>> multiplicity(5, 250)
3
"""
m = 0
quot, rem = divmod(n, p)
while rem == 0:
quot, rem = divmod(quot, p)
m += 1
return m |
def find_direct_conflicts(pull_ops, unversioned_ops):
"""
Detect conflicts where there's both unversioned and pulled
operations, update or delete ones, referering to the same tracked
object. This procedure relies on the uniqueness of the primary
keys through time.
"""
return [
(pull_op, local_op)
for pull_op in pull_ops
if pull_op.command == 'u' or pull_op.command == 'd'
for local_op in unversioned_ops
if local_op.command == 'u' or local_op.command == 'd'
if pull_op.row_id == local_op.row_id
if pull_op.content_type_id == local_op.content_type_id] |
def ReorderListByIndices(reorder_list:list, ordering_indices:list):
"""
This function reorder a list by a given list of ordering indices.
:param reorder_list:list: list you want to reorder
:param ordering_indices:list: list of indices with desired value order
"""
try:
return [y for x,y in sorted(zip(ordering_indices,reorder_list))]
except Exception as ex:
template = "An exception of type {0} occurred in [ContentSupport.ReorderListByIndices]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message) |
def tagcloud_opacity(n):
"""
For the tag cloud on some pages - between 0 and 1
"""
if n:
if n <= 9:
return n / 10.0 + 0.3 # otherwise you don't see it
elif n >= 10:
return 1
else:
print(
"ERROR: tags tot count needs to be a number (did you run python manage.py tags_totcount ?"
) |
def _prep_sge_resource(resource):
"""Prepare SGE resource specifications from the command line handling special cases.
"""
resource = resource.strip()
k, v = resource.split("=")
if k in set(["ar"]):
return "#$ -%s %s" % (k, v)
else:
return "#$ -l %s" % resource |
def asn_is_in_ranges(asn, ranges):
"""
Test if an asn falls within any of the ranges provided
Arguments:
- asn<int>
- ranges<list[tuple(min,max)]>
Return:
- bool
"""
asn = int(asn)
for as_range in ranges:
if asn >= as_range[0] and asn <= as_range[1]:
return True
return False |
def check_on_not_eq_vals(val1, val2):
"""
Func which check values on non equivalent and return tuple of vals.
:param val1:
:param val2:
:return:
"""
return (val1, val2) if val1 != val2 else (val1,) |
def generate_result_dics(videos, parents, channel_videos):
"""Create a dictionary for video search results"""
all_results = []
for i in range(len(videos)):
out_dic = {"video_id": videos[i],
"position": i,
"channel_id": parents[i],
"channel_videos": channel_videos[parents[i]]}
all_results.append(out_dic)
return all_results |
def to_str(matrix):
"""
:param matrix: the matrix to compute the size
:type matrix: matrix
:return: a string representation of the matrix
:rtype: str
"""
s = ""
# by design all matrix cols have same size
for row in zip(*matrix):
cells = [str(cell) for cell in row]
s += " ".join(cells) + "\n"
return s |
def check_validity(board, number, pos):
"""
Checks the validity of the board.\n
Arguments:\n
board: The sudoku board.
number: Number to insert.
pos: The position of the number. It is (X, Y) tuple.
"""
# Check Row.
for i in range(0, len(board[0])):
if board[pos[0]][i] == number and pos[1] != i:
return False
# Check Column.
for i in range(0, len(board[0])):
if board[i][pos[1]] == number and pos[0] != i:
return False
# Check Small Box.
box_x = pos[1] // 3
box_y = pos[0] // 3
for i in range(box_y * 3, box_y * 3 + 3):
for j in range(box_x * 3, box_x * 3 + 3):
if board[i][j] == number and (i, j) != pos:
return False
return True |
def _method_with_key_reference(fake_value, other_value):
"""
:type other_value: [list, dict, str]
"""
return other_value.lower() |
def point_line_nearest_point(p1, l1, l2) -> tuple:
"""Returns a point in line (l1, l2) that is closest to p1."""
a = float(p1[0] - l1[0])
b = float(p1[1] - l1[1])
c = float(l2[0] - l1[0])
d = float(l2[1] - l1[1])
dotprod = a * c + b * d
len_sq = c * c + d * d
param = -1
# in case of 0 length line
if len_sq != 0:
param = dotprod / len_sq
if param < 0:
return l1
elif param > 1:
return l2
return l1[0] + param * c, l1[1] + param * d |
def sec2hms(sec):
"""
Convert seconds to hours, minutes and seconds.
"""
hours = int(sec/3600)
minutes = int((sec -3600*hours)/60)
seconds = int(sec -3600*hours -60*minutes)
return hours,minutes,seconds |
def get_control_variation(campaign):
"""Returns control variation from a given campaign
Args:
campaign (dict): Running campaign
Returns:
variation (dict): Control variation from the campaign, ie having id = 1
"""
for variation in campaign.get("variations"):
if int(variation.get("id")) == 1:
return variation
return None |
def no_net_connect_disconnect(on=0):
"""Remover a Opcao Mapear Unidade de Rede
DESCRIPTION
Previne que usuarios facam configuracoes adicionais a partir da opcao
Mapear Unidade de Rede. Isto removera a opcao Mapear Unidade de Rede da
barra de ferramentas do Windows Explorer e o do menu de contexto do "Meu
Computador".
COMPATIBILITY
Windows 2000/Me/XP
MODIFIED VALUES
NoNetConnectDisconnect : dword : 00000000 = Restricao desabilitada;
00000001 = Habilita restricao.
"""
if on:
return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\
CurrentVersion\\Policies\\Explorer]
"NoNetConnectDisconnect"=dword:00000001'''
else:
return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\
CurrentVersion\\Policies\\Explorer]
"NoNetConnectDisconnect"=dword:00000000''' |
def white_backward_continue(i, text):
"""Scan backward and skip characters.
Skipped characters are those that should be included in the current atom.
"""
# Type casting can have spaces before the variable.
# Example: (NSString *) variable
if text[i - 1] == ')':
return i - 1
return i |
def flatten_dict(d):
"""
https://stackoverflow.com/questions/52081545/python-3-flattening-nested-dictionaries-and-lists-within-dictionaries
"""
out = {}
for key, val in d.items():
if isinstance(val, dict):
val = [val]
if isinstance(val, list):
for subdict in val:
deeper = flatten_dict(subdict).items()
out.update({key + '_' + key2: val2 for key2, val2 in deeper})
else:
out[key] = val
return out |
def extract_interaction_labels(fasta_file):
"""
Extracts the binary epitope and ppi annotations from the previously generated fasta files in epitope_dir and/or ppi_dir.
fasta_file format (3 lines):
>1HEZ_E (pdb identifier + optionally UNP <uniprot_id>)
EVTIKVNLIFADGKIQTAEFKGTFEEATAEAYRYADLLAKVNGEYTADLEDGGNHMNIKFA (protein sequence)
0000000000000000000000011001100110011000000011111101100000000 (ppi/epitope annotations)
"""
# Return None if there are no annotations
if fasta_file is None:
return None, None
with open(fasta_file, 'r') as f:
lines = f.readlines()
annotations = lines[2].strip()
annotations_list = [float(c) for c in annotations] # split into list
fasta = lines[1].strip()
return annotations_list, fasta |
def hash_1(key, size):
""" simple hash 1 function """
return sum([i * 256 + ord(v_) for i, v_ in enumerate(str(key))]) % size |
def _gnurl( clientID ):
"""
Helper function to form URL to Gracenote_ API service.
:param str clientID: the Gracenote_ client ID.
:returns: the lower level URL to the Gracenote_ API.
:rtype: str
"""
clientIDprefix = clientID.split('-')[0]
return 'https://c%s.web.cddbp.net/webapi/xml/1.0/' % clientIDprefix |
def format_file_size(size):
"""
Formats the file size as string.
@param size numeric value
@return string (something + unit)
"""
if size >= 2 ** 30:
size = size / 2 ** 30
return "%1.2f Gb" % size
elif size >= 2 ** 20:
size = size / 2 ** 20
return "%1.2f Mb" % size
elif size >= 2 ** 10:
size = size / 2 ** 10
return "%1.2f Kb" % size
else:
return "%d" % size |
def float_to_bin(num, length):
"""
Convert float number to binary systems
:param num: Number to change
:type num: float
:param length: The maximum length of the number in binary system
:type length: int
:return: Returns a number converted to the binary system
:rtype: string
"""
temp_2 = ''
temp = float(num)
for x in range(length):
temp = temp * 2
if temp < 1:
temp_2 += "0"
else:
temp_2 += "1"
temp -= 1
return temp_2 |
def parse_query_string(query_string: str) -> dict:
"""
Query strings are basically looks like "q=my-query&search=true". So, here it's parsing the whole strings and
breaks into dictionary
"""
_query_dict = {
_each.split('=')[0]: _each.split('=')[1] for _each in query_string.split('&') if
_each and len(_each.split('=')) > 1
}
return _query_dict |
def multi(q, r):
""" Multiplies Two Numbers"""
aq = q * r
return aq |
def some_function(x):
""" This can be any function """
return x**2 + x + 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.