content stringlengths 42 6.51k |
|---|
def multi_byte_to_hex(the_input):
""" Take in a list of bytes, separated by a space, and return a hex string corresponding to the list of bytes. The easiest way to do this is by using your solution to the previous question.
Example test case:
[1, 10, 100, 255] -> "010a64ff"
"""
hexString = ''
for value in the_input:
temp = hex(int(value))
temp = temp[2:]
temp = temp.zfill(2)
hexString += temp
return hexString |
def delta4(a, b, c, d):
"""Delta function (4 variables)
"""
if (a == b) & (b == c) & (c == d):
return 1
else:
return 0 |
def deindent(text, numtabs=None, spacespertab=4, docstring=False):
"""
Returns a copy of the string with the common indentation removed.
Note that all tab characters are replaced with ``spacespertab`` spaces.
If the ``docstring`` flag is set, the first line is treated differently and
is assumed to be already correctly tabulated.
If the ``numtabs`` option is given, the amount of indentation to remove is
given explicitly and not the common indentation.
Examples
--------
Normal strings, e.g. function definitions:
>>> multiline = ''' def f(x):
... return x**2'''
>>> print(multiline)
def f(x):
return x**2
>>> print(deindent(multiline))
def f(x):
return x**2
>>> print(deindent(multiline, docstring=True))
def f(x):
return x**2
>>> print(deindent(multiline, numtabs=1, spacespertab=2))
def f(x):
return x**2
Docstrings:
>>> docstring = '''First docstring line.
... This line determines the indentation.'''
>>> print(docstring)
First docstring line.
This line determines the indentation.
>>> print(deindent(docstring, docstring=True))
First docstring line.
This line determines the indentation.
"""
text = text.replace('\t', ' '*spacespertab)
lines = text.split('\n')
# if it's a docstring, we search for the common tabulation starting from
# line 1, otherwise we use all lines
if docstring:
start = 1
else:
start = 0
if docstring and len(lines)<2: # nothing to do
return text
# Find the minimum indentation level
if numtabs is not None:
indentlevel = numtabs*spacespertab
else:
lineseq = [len(line)-len(line.lstrip()) for line in lines[start:] if len(line.strip())]
if len(lineseq)==0:
indentlevel = 0
else:
indentlevel = min(lineseq)
# remove the common indentation
lines[start:] = [line[indentlevel:] for line in lines[start:]]
return '\n'.join(lines) |
def parse_stiff_dict(element):
"""Parse stiffness dictionary
Args :
element (dict) : element dictionary defined in configuration file
Return :
start_node (int)
end_node (int)
stiffness (int or float)
Variables :
E (int or float) : elasticity
A (int or float) : area
L (int or float) : length
"""
start_node = element.get('start_node', None)
end_node = element.get('end_node', None)
stiffness = element.get('stiffness', None)
E = element.get('E', None)
A = element.get('A', None)
L = element.get('L', None)
print(E, A, L )
if start_node and end_node :
# the node notation starts from 1
start_node -= 1
end_node -= 1
if not stiffness :
# suppose the element is truss element
stiffness = E*A/L
return start_node, end_node, stiffness |
def partition(arr, start, end, pivot_index):
"""
Partitions the array such that all elements before pivot_index
are <= pivot element, and all the elements after are >= pivot element.
The pivot element will come at its rightful position at the end.
Returns the rightful index of the pivot.
"""
arr[start], arr[pivot_index] = arr[pivot_index], arr[start]
pivot_index = start
i = start + 1
while i <= end:
if arr[i] < arr[start]:
pivot_index += 1
arr[pivot_index], arr[i] = arr[i], arr[pivot_index]
i += 1
# CAREFUL: Dont forget this step!!
arr[pivot_index], arr[start] = arr[start], arr[pivot_index]
return pivot_index |
def entry_is_empty(data):
""" Tests for a null data field """
if data == None or data == "":
return True
return False |
def abbrev_timestr(s):
"""Chop milliseconds off of a time string, if present."""
arr = s.split("s")
if len(arr) < 3: return "0s"
return arr[0]+"s" |
def norm2_sqr(x, y):
"""
Squared 2-norm for 2d vectors
:param x:
:param y:
:return:
"""
return (x * x) + (y * y) |
def FMScore(x,p,d):
"""Funcion para obtener el score para la frecuencia y para el monto"""
if x <= d[p][0.20]:
return 1
elif x <= d[p][0.4]:
return 2
elif x <= d[p][0.6]:
return 3
elif x <= d[p][0.8]:
return 4
else:
return 5 |
def name_from_dimensions(dimensions):
"""
Build the name of a unit from its dimensions.
Param:
dimensions: List of dimensions
"""
name = ''
for unit in dimensions:
if unit['power'] < 0:
name += 'per '
power = abs(unit['power'])
if power == 1:
name += unit['base']
elif power == 2:
name += 'square ' + unit['base']
elif power == 3:
name += 'cubic ' + unit['base']
elif power > 3:
name += unit['base'] + ' to the %g' % power
name += ' '
name = name.strip()
return name |
def modularity(G, communities):
"""Compute Louvain modularity of graph G, given the partition `communities`.
:param dict G: the graph
:param dict communities: the communities each node is assigned to
"""
m = sum(
[w for _, neighbors in G.items() for _, w in neighbors.items()]
) / 2.0
print("m =", m)
ks = {k: sum([v for _, v in G[k].items()]) for k in G}
print("ks =", ks)
Q = 0
for i in G:
for j in G:
if communities[i] != communities[j]:
# delta function, if c(i) != c(j), modularity does not change
continue
Aij = G[i].get(j, 0)
Q += 1 / (2 * m) * (
Aij - ks[i] * ks[j] / (2 * m)
)
return round(Q, 3) |
def _from_Hex(color):
"""
Converts #RRGGBB hex code into (r,g,b) tuple
"""
color = color.lstrip('#')
return [int(color[i:i+2], 16) for i in range(0, 6, 2)] |
def writeFile(filename, data, mode):
""" Writes/appends data to a file
Parameters
----------
filename : str
name of the file
data : str
text to write to the file
mode: str
append/write etc
Returns
-------
True
"""
with open(filename, mode) as outfile:
outfile.write(data)
return True |
def jwt_response_payload_handler(token, user=None, request=None):
"""
Returns the response data for both the login and refresh views.
Override to return a custom response such as including the
serialized representation of the User.
Example:
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user).data
}
"""
return {
'token': token
} |
def findDefault(template_list, key):
"""Finds if a key exist within a dictionary.
Returns empty string if not found.
return: String
"""
result = ''
if key in template_list:
result = template_list[key]
return result |
def __gen_mac(id):
"""
Generate a MAC address
Args:
id (int): IXIA port ID
Returns:
MAC address (string)
"""
return '00:11:22:33:44:{:02d}'.format(id) |
def parse_output(output_list):
"""Return a string formed like so: '1,4;2,3'"""
output = []
for pair in output_list:
temp = ",".join([str(num) for num in pair])
output.append(temp)
return ";".join(output) |
def secondSmallestElement(a_list):
"""assumes a_list is a list of numbers
returns the second smallest element of a_list
"""
smallest = min(a_list)
a_list.remove(smallest)
return min(a_list) |
def build_scenario_evidence_map(utterances):
"""Builds a map from scenario to evidence"""
scenario_evidence_map = dict()
for utterance in utterances:
scenario = utterance['scenario']
evidence = utterance['evidence']
scenario_evidence_map[scenario] = evidence
return scenario_evidence_map |
def flatten_lists(list_of_lists):
"""
Parameters
----------
list_of_lists: list[list[Any]]
Returns
-------
list[Any]
"""
return [elem for lst in list_of_lists for elem in lst] |
def split_on_uppercase(s):
"""Split characters by uppercase letters.
From: https://stackoverflow.com/a/40382663
"""
string_length = len(s)
is_lower_around = (
lambda: s[i - 1].islower() or string_length > (i + 1) and s[i + 1].islower()
)
start = 0
parts = []
for i in range(1, string_length):
if s[i].isupper() and is_lower_around():
parts.append(s[start:i])
start = i
parts.append(s[start:])
return parts |
def gwrap(some_string):
"""Returns green text."""
return "\033[92m%s\033[0m" % some_string |
def next_event_name(trace: list, prefix_length: int):
"""Return the event event name at prefix length or 0 if out of range.
:param trace:
:param prefix_length:
:return:
"""
if prefix_length < len(trace):
next_event = trace[prefix_length]
name = next_event['concept:name']
return name
else:
return 0 |
def countRun(s, c, maxRun, count):
"""parameter s: a string
parameter c: what we're counting
parameter maxRun: maximum length of run
returns: the number of times that string occurs in a row"""
"""
trial:
def countRunHelp(s,c,maxRun, count):
if count == maxRun:
return c
if s[count] == s[count+1]:
c+=1
return countRunHelp(s, c, maxRun, count+1)
return countRunHelp(s, c, maxRun, 0)
"""
if s == '':
return 0
if count >= maxRun:
return 0
if s[:len(c)] != c:
return 0
return 1 + countRun(s[len(c):], c, maxRun, count + 1) |
def get_interval_from_list(input_list):
"""
Created 20180619 by Magnus Wenzer
Updated
Takes a list and returns a an interval as a list.
Example: [3, 4, 5, 6] => [3, 6]
"""
if type(input_list) != list:
return None
elif not len(set([type(i) for i in input_list])) == 1:
return None
output_list = []
for item in input_list:
if type(item) == list:
output_list.append(get_interval_from_list(item))
else:
return [input_list[0], input_list[-1]]
return output_list |
def merge(dict1, dict2):
"""Returns the merge of dict1 and dict2 but prioritizing dict2's items"""
return {**dict1, **dict2} |
def autocomplete(corpus, prefix):
"""
Given a text corpus and a prefix, return a list
of all words that start with that prefix
"""
class Trie:
def __init__(self, value):
self.value = value
self.children = {}
# allows for quick access of resulting words,
# rather than having cumbersome traversal
self.words = []
if not len(prefix):
return corpus
# construct trie
root = Trie('')
for word in corpus:
cur = root
cur.words.append(word)
for char in word:
if char not in cur.children:
cur.children[char] = Trie(char)
cur = cur.children[char]
cur.words.append(word)
for char in prefix:
if char in root.children:
root = root.children[char]
else:
return []
return root.words |
def _floats_from_string(line):
"""
Split a string using blank spaces and convert the elements to float. If an element
cannot be converted it is skipped.
"""
line_float = []
for value in line.split():
try: line_float.append(float(value))
except ValueError: pass
return line_float |
def commutator(a, b):
"""
Return the commutator: [a, b] = a*b - b*a
"""
return a*b - b*a |
def floor(x,unit=1):
""" Returns greatest multiple of 'unit' below 'x' """
return unit*int(x/unit) |
def join_path(base, *parts: str):
"""Creates urls from base path and additional parts."""
_parts = "/".join((_part.strip("/") for _part in parts))
# _parts = '/'.join(parts)
if base.endswith("/"):
url = base + _parts
else:
url = base + "/" + _parts
return url |
def make_field(name, value):
"""Return a Mastodon-style dict of the name and value."""
return {"name": name, "value": value} |
def make_url(photo: dict):
"""
Get download URL for photo
:param photo: photo data as returned from API
"""
return photo["baseUrl"] + "=d" |
def bin_to_hexadecimal(binary_str: str) -> str:
"""
Converting a binary string into hexadecimal using Grouping Method
>>> bin_to_hexadecimal('101011111')
'0x15f'
>>> bin_to_hexadecimal(' 1010 ')
'0x0a'
>>> bin_to_hexadecimal('-11101')
'-0x1d'
>>> bin_to_hexadecimal('a')
Traceback (most recent call last):
...
ValueError: Non-binary value was passed to the function
>>> bin_to_hexadecimal('')
Traceback (most recent call last):
...
ValueError: Empty string was passed to the function
"""
BITS_TO_HEX = {
"0000": "0",
"0001": "1",
"0010": "2",
"0011": "3",
"0100": "4",
"0101": "5",
"0110": "6",
"0111": "7",
"1000": "8",
"1001": "9",
"1010": "a",
"1011": "b",
"1100": "c",
"1101": "d",
"1110": "e",
"1111": "f",
}
# Sanitising parameter
binary_str = str(binary_str).strip()
# Exceptions
if not binary_str:
raise ValueError("Empty string was passed to the function")
is_negative = binary_str[0] == "-"
binary_str = binary_str[1:] if is_negative else binary_str
if not all(char in "01" for char in binary_str):
raise ValueError("Non-binary value was passed to the function")
binary_str = (
"0" * (4 * (divmod(len(binary_str), 4)[0] + 1) - len(binary_str)) + binary_str
)
hexadecimal = []
for x in range(0, len(binary_str), 4):
hexadecimal.append(BITS_TO_HEX[binary_str[x : x + 4]])
hexadecimal_str = "0x" + "".join(hexadecimal)
return "-" + hexadecimal_str if is_negative else hexadecimal_str |
def _filter_idxs(idxs_lst, filterlst=()):
""" Filter out a tuple
"""
filtered_lst = tuple()
for idxs in idxs_lst:
if not any(set(idxs) <= set(fidxs) for fidxs in filterlst):
filtered_lst += (idxs,)
return filtered_lst |
def key_generator(value):
"""Simple key generator that maps all values to lower case."""
return value.lower() |
def _stmts_dict_to_json(stmt_dict):
"""Make a json representation from dict of statements
This function is the inverse of _json_to_stmts_dict()
Parameters
----------
stmt_dict : dict
Dict with statements keyed by their uuid's: {uuid: stmt}
Returns
-------
list(json)
A list of json statements
"""
return [s.to_json() for _, s in stmt_dict.items()] |
def to_list(head):
""" helper method to turn a linked list into a list for easier printing """
if head == None:
return []
out = [head.value]
out.extend(to_list(head.next))
return out |
def match_title(event_title):
"""
Match bill title against the following possible titles
"""
bill_titles = [
"Bill passed by National Assembly",
"Bill passed by both Houses",
"Bill revived on this date",
"The NCOP rescinded",
"Bill remitted",
]
for title in bill_titles:
if title in event_title:
return True
return False |
def overlap(a1, a2, b1, b2):
"""
Check if ranges a1-a2 and b1-b2 overlap.
"""
a_min = min(a1, a2)
a_max = max(a1, a2)
b_min = min(b1, b2)
b_max = max(b1, b2)
return a_min <= b_min <= a_max or b_min <= a_min <= b_max or \
a_min <= b_max <= a_max or b_min <= a_max <= b_max |
def get_event_abi(abi, event_name):
"""Helper function that extracts the event abi from the given abi.
Args:
abi (list): the contract abi
event_name (str): the event name
Returns:
dict: the event specific abi
"""
for entry in abi:
if 'name' in entry.keys() and entry['name'] == event_name and \
entry['type'] == "event":
return entry
raise ValueError(
'Event `{0}` not found in the contract abi'.format(event_name)) |
def _getNsec ( nsecStr ) :
"""
Turn string into nanoseconds, strings is everything that
appears after decimal dot.
"1" -> 100000000 ns
"123" -> 123000000 ns
"123456789987654321" -> 123456789ns (truncation, no rounding)
"""
ndig = min(len(nsecStr),9)
nsecStr = nsecStr[:ndig] + '0'*(9-ndig)
return int(nsecStr) |
def should_remove_line(line, blacklist):
"""
Helping function to match image in blacklist
:param line: line in datalist file
:param blacklist: list of images to be removed
:return
"""
return any([image in line for image in blacklist]) |
def add_comma_rs(_index, _value, _row_separator):
"""Adds a comma and row separator if index > 0, used in loops when making lists of references."""
if _index > 0:
return ',' + _row_separator + _value
else:
return _value
# Some database functions |
def get_sub_section_letter_from_str(subsection: str) -> str:
"""Returns the subsection letter to get the downloaded ROM to the\
correct alphanumeric directory"""
number: str = '§ion=number'
if number in subsection.lower():
return 'number'
else:
return subsection[-1] |
def variantMetadata_object(processed_request):
"""
Builds the variantAnnotation object.
Since we only have one model for this object we keep it simple.
"""
beacon_variant_metadata_v1_0 = {
"default": {
"version": "beacon-variant-metadata-v1.0",
"value": {
"geneId": "",
"HGVSId": "",
"transcriptId": "",
"alleleId": "",
"variantClassification": "",
"variantType": "",
"disease": "",
"proteinChange": "",
"clinVarId": "",
"pubmedId": "",
"timestamp": "",
"info": { }
}
},
"alternativeSchemas": []
}
return beacon_variant_metadata_v1_0 |
def twos_comp(val, bits):
"""compute the 2's complement of int value val"""
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val # return positive value as is |
def bisect_search2(L, e):
"""
>>> bisect_search2([2,3,4,5, 'andile', 'mzie'], 2)
True
>>> bisect_search2([2,3,4,5, 'andile', 'mzie'], 1)
False
>>> bisect_search2([2,3,4,5], 5)
True
"""
def bisect_search_helper_fn(L, e, low, high):
if high == low:
return L[low] == e
mid = (low + high) // 2
if L[mid] == e:
return True
elif L[mid] > e:
if low == mid: # nothing left to search for
return False
else:
return bisect_search_helper_fn(L, e, low, mid - 1)
else:
return bisect_search_helper_fn(L, e, mid + 1, high)
if len(L) == 0:
return False
else:
return bisect_search_helper_fn(L, e, 0, len(L) - 1) |
def truncate_size(size):
"""Generate a truncated number for a given number.
This is needed to anonymize the statistics, so they can't be traced back
to some dataset.
"""
return round(size, -((len(str(size))) - 1)) |
def _subs(value):
"""Return a list of subclass strings.
The strings represent the ldap objectclass plus any subclasses that
inherit from it. Fakeldap doesn't know about the ldap object structure,
so subclasses need to be defined manually in the dictionary below.
"""
subs = {'groupOfNames': ['keystoneProject',
'keystoneRole',
'keystoneProjectRole']}
if value in subs:
return [value] + subs[value]
return [value] |
def get_compatible_vep_version(vep_cache_version, ftp_source):
"""
Gets VEP version compatible with given cache version, according to which FTP source the cache comes from.
If source is Ensembl Genomes the version should be vep_cache_version + 53, otherwise the versions are equal.
"""
return vep_cache_version + 53 if ftp_source == 'genomes' else vep_cache_version |
def format_time(time, formatter=None):
"""
Function who create a string from a time and a formatter
The time value is considered as seconds
It uses letters in formatter in order to create the appropriate string:
- %D : days
- %H : hours
- %M : minutes
- %S : seconds
- %m : milliseconds
Examples :
format_time(time=600, foramtter="%H hours and %M minutes") -> "10 hours and 0 minutes"
"""
formatter_is_unset = formatter is None
if formatter is None:
formatter = "%D:%H:%M:%S:%m"
require_days = formatter.count("%D")
require_hours = formatter.count("%H")
require_minutes = formatter.count("%M")
require_seconds = formatter.count("%S")
require_milliseconds = formatter.count("%m")
# Days
days = int(time // 86400)
if require_days:
formatter = formatter.replace("%D", "{:0>2}".format(days))
time = time % 86400
# Hours
hours = int(time // 3600)
if require_hours:
formatter = formatter.replace("%H", "{:0>2}".format(hours))
time = time % 3600
# Minutes
minutes = int(time // 60)
if require_minutes:
formatter = formatter.replace("%M", "{:0>2}".format(minutes))
time = time % 60
# Seconds
if require_seconds:
formatter = formatter.replace("%S", "{:0>2}".format(int(time)))
time = time % 1
# milliseconds
if require_milliseconds:
formatter = formatter.replace("%m", "{:0>3}".format(int(time * 1000)))
if formatter_is_unset:
while formatter.startswith("00:") and len(formatter) > 6:
formatter = formatter[3:]
return formatter |
def clamp(x: float, lower=0., upper=1.) -> float:
"""
Clamps a float to within a range (default [0, 1]).
"""
from math import isnan
if x <= lower:
return lower
elif x >= upper:
return upper
elif isnan(x):
raise FloatingPointError('clamp is undefined for NaN')
return x |
def expression_split(src):
"""
parse a string and return a list of pair with
open and close parenthesis
The result is generated in the order that the inner-most and left-most
parenthesis will be at the start of the list, which logically should be processed first
:param:
src: input string
:return:
list of pair contains the index of ( and )
"""
result = []
open_index = []
for ind, _ in enumerate(src):
if src[ind] == "(":
open_index.append(ind)
if src[ind] == ")":
result.append((open_index.pop(), ind))
return result |
def rebuild_command(args):
"""Rebuilds a unicode command string prepared to be stored in a file
"""
return "%s\n" % (" ".join(args)).replace("\\", "\\\\") |
def clean_invite_embed(line):
"""Makes invites not embed"""
return line.replace("discord.gg/", "discord.gg/\u200b") |
def create_mapping(start_offsets, end_offsets, context_to_plaintext_offset):
"""Creates a mapping from context offsets to plaintext offsets.
Args:
start_offsets: List of offsets relative to a TyDi entry's `contexts`.
end_offsets: List of offsets relative to a TyDi entry's `contexts`.
context_to_plaintext_offset: Dict mapping `contexts` offsets to plaintext
offsets.
Returns:
List of offsets relative to the original corpus plaintext.
"""
plaintext_start_offsets = [
context_to_plaintext_offset[i] if i >= 0 else -1 for i in start_offsets
]
plaintext_end_offsets = [
context_to_plaintext_offset[i] if i >= 0 else -1 for i in end_offsets
]
return plaintext_start_offsets, plaintext_end_offsets |
def binary_search_two( item, lst, lo, hi):
""" This binary search is recursive and also returns the index of the item. """
if len(lst) == 0: return -1
if hi < lo: return -1 # no more numbers to search
mid = (lo + hi) // 2 # midpoint in array
if item == lst[mid]:
return mid
elif item < lst[mid]:
return binary_search_two(item, lst, lo, mid -1) # try left side
else:
return binary_search_two(item, lst, mid + 1, hi) # try right side |
def naive_matrix_product(A, B):
""" Naive square matrix multiplication """
msize = len(A)
nsize = len(B[0])
# determine zero matrix
C = [[0 for _ in range(nsize)] for _ in range(msize)]
for i in range(msize):
for j in range(nsize):
for k in range(msize):
C[i][j] += A[i][k] * B[k][j]
return C |
def print_point_accuracy_info(accuracy_x_list, accuracy_y_list, boundary_index_list):
"""
print accuracy info
"""
max_edge_x = 0.0
max_edge_y = 0.0
max_center_x = 0.0
max_center_y = 0.0
for i in range(len(accuracy_x_list)):
if i in boundary_index_list:
if accuracy_x_list[i] > max_edge_x:
max_edge_x = accuracy_x_list[i]
if accuracy_y_list[i] > max_edge_y:
max_edge_y = accuracy_y_list[i]
else:
if accuracy_x_list[i] > max_center_x:
max_center_x = accuracy_x_list[i]
if accuracy_y_list[i] > max_center_y:
max_center_y = accuracy_y_list[i]
# print("\tEdge max accuracy error (%f, %f), point index (%d, %d)"
# % (max_edge_x, max_edge_y,
# accuracy_x_list.index(max_edge_x), accuracy_y_list.index(max_edge_y)))
# print("\tCenter max accuracy error (%f, %f), point index (%d, %d)"
# % (max_center_x, max_center_y,
# accuracy_x_list.index(max_center_x), accuracy_y_list.index(max_center_y)))
print("Accuracy: Center X: %.3fmm, Center Y: %.3fmm" % (max_center_x, max_center_y))
print(" Edge X: %.3fmm, Edge Y: %.3fmm\n" % (max_edge_x, max_edge_y))
if max_edge_x < 1.5 and max_edge_y < 1.5 and max_center_x < 1.0 and max_center_y < 1.0:
return 1
else:
return 0 |
def getPropertyNames(metadataFile):
"""Here we could add more complex behaviour later on.
"""
property_names = metadataFile[0].split("\t")
for i in range(len(property_names)):
property_names[i] = property_names[i].strip().upper()
return property_names |
def mcd(a, b):
"""
Algoritmo euclideano que devuelve el maximo comun divisor de a y b
"""
if (b == 0):
return a
else:
return mcd(b, a % b) |
def climbing_stairs(n):
"""
O(n) time
O(n) space
"""
dico = {0:1, 1:1}
for i in range(2, n+1):
dico[i] = dico[i-1] + dico[i-2]
return dico[n] |
def convert_groups_to_crashes(groups):
"""Convert groups to crashes (in an array of dicts) for JobRun."""
crashes = []
for group in groups:
crashes.append({
'is_new': group.is_new(),
'count': len(group.crashes),
'crash_type': group.main_crash.crash_type,
'crash_state': group.main_crash.crash_state,
'security_flag': group.main_crash.security_flag,
})
return crashes |
def xor(a, b):
"""For function arguments"""
return (a or b) and not (a and b) |
def sizeof_fmt(num, suffix="B"):
"""Format printable versions for bytes"""
if num == -1:
return "large"
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix) |
def get_authn_ctx(session_info):
"""
Get the SAML2 AuthnContext of the currently logged in users session.
session_info is a dict like
{'authn_info': [('http://www.swamid.se/policy/assurance/al1',
['https://dev.idp.eduid.se/idp.xml'])],
...
}
:param session_info: The SAML2 session_info
:return: The first AuthnContext
:rtype: string | None
"""
try:
return session_info['authn_info'][0][0]
except KeyError:
return None |
def split_routes(route):
"""
just a quick helper function that split
[node1, node2, node3, node4 ...]
to
[[node1, node2], [node2, node3], [node3, node4], ...]
easier to get node distance easier.
there are perhaps better way to do this
"""
routes = []
for i in range(0, len(route)):
if i + 2 <= len(route):
routes.append(route[i:i+2])
return routes |
def get_plain_from_html(html):
"""extract plain text from html
>>> test_html = "<div><h1>Hey<h1><p>This is some text</p></div>"
>>> get_plain_from_html(test_html)
'Hey\\nThis is some text'
"""
# import here to avoid high startup cost
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
"""custom HTML parser"""
def __init__(self):
HTMLParser.__init__(self)
self.lines = []
def handle_data(self, data):
self.lines.append(data)
def value(self):
return '\n'.join(self.lines)
parser = MyHTMLParser()
parser.feed(html)
parser.close()
return parser.value() |
def get_item(a, i):
"""
Return an item from an array.
"""
if getattr(a, '__getitem__'):
try:
return a[i]
except KeyError:
pass
except IndexError:
pass
return None |
def find_files(search_path, include=('*', ), exclude=('.*', )):
"""
Find files in a directory.
:param str search_path: Path to search for files.
:param tuple include: Patterns of files to include.
:param tuple exclude: Patterns of files to exclude.
:return: List of files found that matched the include collection but didn't
matched the exclude collection.
:rtype: list
"""
from os import walk
from fnmatch import fnmatch
from os.path import join, normpath
def included_in(fname, patterns):
return any(fnmatch(fname, pattern) for pattern in patterns)
files_collected = []
for root, folders, files in walk(search_path):
files_collected.extend(
normpath(join(root, fname)) for fname in files
if included_in(fname, include) and not included_in(fname, exclude)
)
files_collected.sort()
return files_collected |
def _get_api_version_from_accept_header(api_version_header: str):
"""Split input string and return the api version.
Input must be of the format:
[something];version=[some version]
:param str api_version_header: value of 'Accept' header
:returns: api version specified in the input string.
Returns 0.0 by default.
"""
api_version = '0.0'
if api_version_header:
tokens = api_version_header.split(";")
if len(tokens) == 2:
tokens = tokens[1].split("=")
if len(tokens) == 2:
api_version = tokens[1]
return api_version |
def get_toolbox_app_channel(app_path: str) -> str:
"""Get toolbox app channel by path"""
pos = app_path.find('ch-')
if pos > 0:
return app_path[pos:]
return '' |
def to_units(number):
"""Convert a string to numbers."""
UNITS = ('', 'k', 'm', 'g', 't', 'p', 'e', 'z', 'y')
unit = 0
while number >= 1024.:
unit += 1
number = number / 1024.
if unit == len(UNITS) - 1:
break
if unit:
return '%.2f%s' % (number, UNITS[unit])
return '%d' % number |
def gettime(seconds):
"""
Returns the highest rounded time from seconds
"""
seconds = int(seconds)
gets = lambda x: '' if x == 1 else 's'
minutes = seconds // 60
hours = minutes // 60
days = hours // 24
months = days // 30
years = months // 12
if seconds < 60:
return f'{seconds} second{gets(seconds)}'
elif seconds < 60*60:
return f'{minutes} minute{gets(minutes)}'
elif seconds < 60*60*24:
return f'{hours} hour{gets(hours)}'
elif seconds < 60*60*24*30:
return f'{days} day{gets(days)}'
elif seconds < 60*60*24*30*12:
return f'{months} months{gets(months)}'
else:
return f'{years} year{gets(years)}' |
def word_counter(text):
"""
Given a list of words as `text`, count the occurrence of each
group of characters in any order that they might appear.
Example: foo, ofo, oof are all counting towards foo = 3
"""
counter = {}
delimiters = ['.', ',', '\n']
for d in delimiters:
text = text.replace(d, ' ')
for w in text.split(' '):
word = w.strip()
if len(word):
key = ''.join(sorted(word)).lower()
record = counter.get(key, None)
if record is None:
record = [1, word]
else:
record = [record[0]+1, word]
counter[key] = record
return counter |
def prepare_lists(listen_by_party_and_bundesland):
""" This function quickly prepares the dictionary by including
in each dataframe a column Sitz_Bundestag.
Input:
listen_by_party_and_bundesland (dict): contains for each Bundesland
a dictionary containing parties with their lists
Output:
listen_by_party_and_bundesland (dict): same as input only that
a column Sitz_Bundestag is added
"""
for bundesland in listen_by_party_and_bundesland.keys():
for partei in listen_by_party_and_bundesland[bundesland].keys():
listen_by_party_and_bundesland[bundesland][partei]["Sitz_Bundestag"] = 0
return listen_by_party_and_bundesland |
def Cubeularity(lengths):
"""
A measure of how cubic a molecule is.
With 1 being a perfect cube.
Cubeularity = (Shortest*Medium*Longest)/(Longest)**3
"""
return (lengths[0]*lengths[1]*lengths[2])/(lengths[2]**3) |
def next_turn(events):
"""
list of (turn, move, returnvalue)
return returnvalue where turn is smallest, on tie move is largest
"""
return sorted(events, key=lambda tup: (tup[0], -tup[1]))[0][2] |
def resolve_pattern(pattern, args):
"""
returns a string in which slots have been resolved with args, if the string has slots anyway,
else returns the strng itself (no copy, should we??)
:param pattern:
:param args:
:return:
"""
if args is None or len(args) == 0:
return pattern
elif pattern.find('%') >= 0:
return pattern % args
elif pattern.find("{") >= 0:
# star magic does not work for single args
return pattern.format(*args)
else:
# fixed pattern, no placeholders
return pattern |
def get_pose(rig_object):
"""
This function gets the transforms of the pose bones on the provided rig object.
:param object rig_object: An armature object.
:return dict: A dictionary of pose bone transforms
"""
pose = {}
if rig_object:
for bone in rig_object.pose.bones:
pose[bone.name] = {
'location': bone.location,
'rotation_quaternion': bone.rotation_quaternion,
'rotation_euler': bone.rotation_euler,
'scale': bone.scale
}
return pose |
def buildInsertCmd(numfields):
"""
Create a query string with the given table name and the right
number of format placeholders.
example:
>>> buildInsertCmd("foo", 3)
'insert into foo values (%s, %s, %s)'
"""
assert (numfields > 0)
placeholders = (numfields - 1) * "%s, " + "%s"
# todo adjust according to your table structure
query = ("insert into t_user_info(user_name,crt_id,gender)") + (" values (%s)" % placeholders)
return query |
def get_shape(area, color):
"""Identifies the shape
Args:
area: the area of the shape
color: the numerical value associated with
the color this image was determined to be
Returns:
The name of the shape
"""
# if the area is large enough, it probably was not random noise
# from an empty image and there is an actual object
if area > 1000:
if color == 0:
return "coral"
elif color == 1:
return "star"
elif color == 2:
return "coral fragment"
else:
return "sponge"
else:
return "empty" |
def sep(x):
"""para pasar posibles string-listas a listas-listas"""
if (isinstance(x, str)) and ("[" in x):
return x.replace("'", "").strip("][").split(", ")
else:
return x |
def _ensure_non_negative_delta_time(delta_time):
"""make sure the delta_time - used for computation of interests - is mostly
positive.
"""
return max(delta_time, 0) |
def cnum2(s):
"""
x[y] -> x[y]
z -> z[z]
"""
return s if '[' in s else s + '[' + s + ']' |
def listify( l, no_elements = 1, check_if_list = True ):
"""
Returns a list with no_elements elements duplicate of l unless l is a list
"""
if isinstance(l, list) and check_if_list:
return l
else:
return [l] * no_elements |
def validate_key_value_pairs(string):
""" Validates key-value pairs in the following format: a=b;c=d """
result = None
if string:
kv_list = [x for x in string.split(';') if '=' in x] # key-value pairs
result = dict(x.split('=', 1) for x in kv_list)
return result |
def uhex(num: int) -> str:
"""Uppercase Hex."""
return "0x{:02X}".format(num) |
def group_count(i):
"""An iteration that returns tuples of items and the number of consecutive
occurrences. Thus group_count('aabbbc') yields ('a',2), ('b',3), ('c',1)
"""
from itertools import groupby
return [(item, sum(1 for n in group)) for item, group in groupby(i)] |
def select_min(window):
"""In each window select the minimum hash value. If there is more than one
hash with the minimum value, select the rightmost occurrence. Now save all
selected hashes as the fingerprints of the document.
:param window: A list of (index, hash) tuples.
"""
return min(window, key=lambda x: x[1])[1] |
def _construct_GSE_url(accession):
"""Example URL:
ftp://ftp.ncbi.nlm.nih.gov/geo/platforms/GSE4nnn/GSE4999/matrix/GSE4999.txt.gz
"""
number_digits = len(accession) - 3 # 'GSE' is of length 3.
if number_digits < 4:
folder = accession[:3] + 'nnn' # e.g. GSEnnn.
elif 3 < number_digits < 5:
folder = accession[:4] + 'nnn' # e.g. GSE1nnn.
else:
folder = accession[:5] + 'nnn' # e.g. GSE39nnn.
url = '/'.join(['ftp://ftp.ncbi.nlm.nih.gov/geo/series',
folder,
accession,
'matrix',
accession + '_series_matrix.txt.gz'])
return url |
def get_region_data(data):
"""
Takes in data and returns all the allowed fields
in a dict format
"""
data = {
"region": data.get('region'),
"periodType": data.get('periodType'),
"timeToElapse": data.get('timeToElapse'),
"reportedCases": data.get('reportedCases'),
"population": data.get('population'),
"totalHospitalBeds": data.get('totalHospitalBeds')
}
return data |
def get_interface_id(instance_name: str, interface: str) -> str:
"""Return the interface id."""
return f"{instance_name}-{interface}" |
def sdbm_hash(name):
"""Calculate SDBM hash over a string."""
ret = 0
for ii in name:
ret = (ret * 65599 + ord(ii)) & 0xFFFFFFFF
return "0x%x" % (ret) |
def pluralize(word):
"""Returns the pluralized form of `word`."""
if word.endswith('o'):
return word + 'es'
elif word.endswith('y'):
return word[:-1] + 'ies'
elif word[-1] == 's' or word[-1] == 'x' or word[-2:] == 'sh' or word[-2:] == 'ch':
return word + 'es'
else:
return word + 's' |
def bbcommon(bb, bbother):
"""Old Stars method for bounding box overlap testing.
Also defined in ``pysal.weights._cont_binning``.
Parameters
----------
bb : list
A bounding box.
bbother : list
The bounding box to test against.
Returns
-------
chflag : int
``1`` if ``bb`` overlaps ``bbother``, otherwise ``0``.
Examples
--------
>>> b0 = [0, 0, 10, 10]
>>> b1 = [10, 0, 20, 10]
>>> bbcommon(b0, b1)
1
"""
chflag = 0
if not ((bbother[2] < bb[0]) or (bbother[0] > bb[2])):
if not ((bbother[3] < bb[1]) or (bbother[1] > bb[3])):
chflag = 1
return chflag |
def solar_geometric_mean_longitude(julian_century):
"""Returns the Solar Geometric Mean with Julian Century, julian_century."""
solar_geometric_mean_longitude = (
280.46646 + julian_century * (36000.76983 + julian_century * 0.0003032)
) % 360
return solar_geometric_mean_longitude |
def chunk_generator(l, batch_size):
"""
Given any list and a batch size, returns a list of lists where each element is a list containing
N (BATCH_SIZE) elements.
-----------------------------------------------------------
:param
l: a 1-D list
batch_size: Batch size of a chunk
-----------------------------------------------------------
:return: list of lists of batches
"""
chunks = [l[i:i + batch_size] for i in range(0, len(l), batch_size)]
return chunks |
def _make_version(major, minor, micro, releaselevel, serial):
"""Create a readable version string from version_info tuple components."""
assert releaselevel in ["alpha", "beta", "candidate", "final"]
version = "%d.%d.%d" % (major, minor, micro)
if releaselevel != "final":
short = {"alpha": "a", "beta": "b", "candidate": "rc"}[releaselevel]
version += "%s%d" % (short, serial)
return version |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.