content
stringlengths 42
6.51k
|
|---|
def _get_field_length(bit_config):
"""
Determine length of iso8583 style field
:param bit_config: dictionary of bit config data
:return: length of field
"""
length_size = 0
if bit_config['field_type'] == "LLVAR":
length_size = 2
elif bit_config['field_type'] == "LLLVAR":
length_size = 3
return length_size
|
def paginated_api_call(api_method, response_objects_name, **kwargs):
"""Calls api method and cycles through all pages to get all objects.
Args:
api_method: api method to call
response_objects_name: name of collection in response json
kwargs: url params to pass to call, additionally to limit and cursor which will be added automatically
"""
ret = list()
cursor = None
call_limit = 1000
while cursor != "":
if cursor is not None:
r = api_method(limit=call_limit, cursor=cursor, **kwargs)
else:
r = api_method(limit=call_limit, **kwargs)
response_objects = r.get(response_objects_name)
if response_objects is not None:
for channel in r[response_objects_name]:
if isinstance(channel, str):
channel_name = channel
else:
channel_name = channel.get("name")
ret.append(channel_name)
metadata = r.get("response_metadata")
if metadata is not None:
cursor = metadata["next_cursor"]
else:
cursor = ""
return ret
|
def get_path_from_operation_id(paths_dict, operation_id):
"""Find API path based on operation id."""
paths = paths_dict.keys()
for path in paths:
methods = paths_dict[path].keys()
for method in methods:
if paths_dict[path][method]["operationId"] == operation_id:
return path
return None
|
def basic_listifier(item):
"""Takes strings, tuples, and lists of letters and/or numbers separated by spaces, with and without commas, and returns them in a neat and tidy list (without commas attached at the end."""
if type(item) == tuple or type(item) == list:
final = [x.replace(',','') for x in item]
return final
elif type(item) == str:
final = [x.replace(',','') for x in item.split(' ')]
return final
|
def kadane_max_subarray(A):
"""
Calculate max subarray using Kadane's algorithm.
Used as a test bench against my own algorithms.
"""
max_ending_here = max_so_far = 0
for x in A:
max_ending_here = max(0, max_ending_here + x)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
|
def word_tokenizer(string):
"""
Splits a string by whitespace characters.
Args:
string (string): The string to be split by whitespace characters.
Returns:
list: The words of the string.
"""
return string.split()
|
def print_role(roles):
"""
input: the output from get_role
function prints answer (no output)
"""
rls = ("President", "Prime minister")
for i in range(2):
if len(roles[i]) > 0:
res = rls[i] + " of "
res += ', '.join(roles[i])
print(res)
return None
|
def cleanVersion(version):
""" removes CMSSW_ from the version string if it starts with it """
prefix = "CMSSW_"
if version.startswith(prefix):
return version[len(prefix):]
else:
return version
|
def task_read_files(file_paths):
"""
:param file_paths:
:return:
"""
ret = list()
for file_path in file_paths:
content = open(file_path).read()
ret.append((file_path, content))
return ret
|
def try_divide(x, y, val=0.0):
"""
Try to divide two numbers
"""
if y != 0.0:
val = float(x) / y
return val
|
def make_strain_id_lookup(object_list):
"""a function that lakes of list of strain objects and returns a
dictionary of dictionaries that are keyed first by species, and
then raw strain value.
strains = StrainRaw.objects.values_list("id", "species__abbrev", "strain__strain_code")
This:
[(45, 'ATS', 'TUND'),
(278, 'ATS', 'UNKN'),
(46, 'ATS', 'WR-W'),
(283, 'BKT', ''),
(47, 'BKT', '*')....]
becomes this:
{"ATS": {"TUND":45,"UNKN": 278,...}, "BKT": {"":283, "*": 47}, ...}
"""
lookup = {}
for mu in object_list:
top_key = mu[1]
items = lookup.get(top_key)
if items:
# label:id
items[mu[2]] = mu[0]
else:
items = {mu[2]: mu[0]}
lookup[top_key] = items
return lookup
|
def is_empty(s):
"""
s : string with description of item
returns True if string is too short
else False
"""
return len(s) <= 2
|
def replace_colon(s, replacewith="__"):
"""replace the colon with something"""
return s.replace(":", replacewith)
|
def hue(value):
"""Hue of the light. This is a wrapping value between 0 and 65535."""
value = int(value)
if value < 0 or value > 65535:
raise ValueError('Hue is a value between 0 and 65535')
return value
|
def label_vertices(ast, vi, vertices, var_v):
"""Label each node in the AST with a unique vertex id
vi : vertex id counter
vertices : list of all vertices (modified in place)
"""
def inner(ast):
nonlocal vi
if type(ast) != dict:
if type(ast) == list:
# print(vi)
pass
return ast
ast["vertex_id"] = vi
vertices.append(ast["tag"])
# if not (ast['tag'] in ['EVar', 'LvVar'] and ast['contents'] in var_v):
vi += 1
for k, v in ast.items():
if k != "tag":
inner(v)
return ast
return inner(ast)
|
def between_markers(text: str, begin: str, end: str) -> str:
"""
returns substring between two given markers
"""
m1 = text.find(begin)
m2 = text.find(end)
return text[m1+1:m2]
|
def convert_value(value):
"""
Convert value into string.
:param value: value
:return: string conversion of the value
:rtype: str
"""
if value is not None:
value = str(value)
return value
|
def get_property(name: str) -> str:
"""Provide lookup support for module properties."""
return f'property_{name}'
|
def none_check(value):
"""If value is a string containing 'none', then change it to a
NoneType object.
Parameters
----------
value : str or NoneType
Returns
-------
new_value : NoneType
"""
if isinstance(value, str):
if 'none' in value.lower():
new_value = None
else:
new_value = value
else:
new_value = value
return new_value
|
def find_median_depth(mask_area, num_median, histg):
"""
Iterate through all histogram bins and stop at the median value. This is the
median depth of the mask.
"""
median_counter = 0
centre_depth = "0.00"
for x in range(0, len(histg)):
median_counter += histg[x][0]
if median_counter >= num_median:
# Half of histogram is iterated through,
# Therefore this bin contains the median
centre_depth = x / 50
break
return float(centre_depth)
|
def remove_extension(filename: str) -> str:
"""
Removes the mp3 or wma extension from a string.
:param filename: Filename to parse.
:return: Filename but without the extension.
"""
for ext in [".wma", ".mp3", ".wav"]:
filename = filename.replace(ext, "")
filename = filename.replace(ext.upper(), "")
return filename
|
def significant_round(x, precision):
"""Round value to specified precision.
Parameters
----------
x: float
Value to be rounded
precision: int
Precision to be used for rounding
Returns
-------
float
Rounded value
Examples
--------
>>> significant_round(5.6789, 1)
6.0
>>> significant_round(5.6789, 2)
5.7
>>> significant_round(5.6789, 3)
5.68
"""
r = float(f"%.{precision - 1}e" % x)
return r if r < 10.0 else round(r)
|
def is_palindrome(number):
""" Returns True if `number` is a palindrome, False otherwise.
"""
num_str = str(number)
num_comparisons = len(num_str) // 2
for idx in range(num_comparisons):
if num_str[idx] != num_str[-1-idx]:
return False
return True
|
def generate_status_details(id, version, message=None):
"""Generate Status Details as defined in TAXII 2.1 section (4.3.1) <link here>`__."""
status_details = {
"id": id,
"version": version
}
if message:
status_details["message"] = message
return status_details
|
def find_tag(tag_wrapper, tag_name):
"""Search through a collection of tags and return the tag
with the given name
:param tag_wrapper: A collection of tags in the Google Tag Manager
List Response format
:type tag_wrapper: dict
:param tag_name: The name of a tag to find
:type tag_name: str
:return: A Google Tag Manager tag
:rtype: dict
"""
if "tag" in tag_wrapper:
for tag in tag_wrapper["tag"]:
if tag["name"] == tag_name:
return tag
return None
|
def z(x: float, m_x: float, s_x: float) -> float:
"""
Compute a normalized score.
>>> d = [ 2, 4, 4, 4, 5, 5, 7, 9 ]
>>> list( z( x, mean(d), stdev(d) ) for x in d )
[-1.5, -0.5, -0.5, -0.5, 0.0, 0.0, 1.0, 2.0]
The above example recomputed mean and standard deviation.
Not a best practice.
"""
return (x-m_x)/s_x
|
def _make_text_bold(bold_msg) -> str:
"""
Return bold_msg text with added decoration using ansi code
:param msg:
:return: u"\u001b[1m"+ bold_msg + u"\u001b[0m"
"""
return u"\u001b[1m"+ bold_msg + u"\u001b[0m"
|
def strip_math(s):
"""remove latex formatting from mathtext"""
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove:
s = s.replace(r, '')
return s
|
def de_list(lst):
"""Turn a list into a scalar if it's of length 1, respecting Nones."""
if not lst:
return None
else:
return (lst[0] if len(lst) == 1 else lst)
|
def durationToPoints(duration):
"""
less than 15 is not considered 'away'.
"""
if duration < 15:
points = 0
else:
points = duration
return points
|
def _remove_duplicates(list):
"""
Returns list of unique dummies.
"""
result = []
while list:
i = list.pop()
if i in result:
pass
else:
result.append(i)
result.reverse()
return result
|
def extra_same_elem(list1: list, list2: list) -> list:
"""
extract the same part between 2 lists
Args:
list1: list 1
list2: list 2
Returns:
same part between 2 lists
"""
set1 = set(list1)
set2 = set(list2)
same_elem = set1.intersection(set2)
return list(same_elem)
|
def compare_loginhashblock(a, b, DEBUG=False):
"""
This function is to compare tow login hash block. If these are same, it return True. others cases, it return False
:param a: compare login hash block
:param b: compare login hash block
:return: Bool
"""
if DEBUG:
print("[info:compare_loginhashblock] a: ",a)
print("[info:compare_loginhashblock] b: ",b)
if isinstance(a, str):
a = a.encode("utf-8")
if isinstance(b, str):
b = b.encode("utf-8")
if len(a) != len(b):
return False
for x, y in zip(a, b):
if x != y:
return False
return True
|
def series_sum(n):
"""Create and return the nth number in a Series."""
x, j = 4, 1
if n == 0:
return "0.00"
for i in range(n - 1):
j = j + 1 / x
x = x + 3
return format(j, '.2f')
|
def usage(myname="xia2ccp4i.py"):
"""return a usage string"""
msg = """%s [options]
Options:
-h, --help show this help message and exit
-p FOO, --project=FOO override the project name taken from xia2, and call
the CCP4 project FOO
-x BAR, --xia2dir=BAR specify the location of the top-level xia2 processing
directory (omission implies BAR=".")
Extract project name and data from a xia2 processing run, create a standalone
ccp4i project and start ccp4i with that project.
Please note that this standalone ccp4i project will not be in the global
projects database of ccp4i. However it may be added later using the
'Directories&ProjectDir' button in ccp4i. The list of jobs performed in the
standalone project will remain intact after the import.
"""
return msg % myname
|
def ordered(obj):
"""Order map for comparison
https://stackoverflow.com/questions/25851183/
"""
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
|
def snake_to_camel_case(data):
"""
Convert all keys of a all dictionaries inside given argument from
snake_case to camelCase.
Inspired by: https://stackoverflow.com/a/19053800/1073222
"""
if isinstance(data, dict):
camel_case_dict = {}
for key, value in data.items():
components = key.split('_')
key = components[0] + ''.join(x.title() for x in components[1:])
camel_case_dict[key] = snake_to_camel_case(value)
return camel_case_dict
if isinstance(data, list):
camel_case_list = []
for value in data:
camel_case_list.append(snake_to_camel_case(value))
return camel_case_list
return data
|
def convert_labels(data_in, label_map):
"""Convert labels in data=[(txt, lbl)] according to the map
Drop if label does not exist in map
"""
data = []
missing = []
for (txt, lbl) in data_in:
if lbl not in label_map:
missing.append(lbl)
else:
# convert label
lbl = label_map[lbl]
# add to data
data.append((txt, lbl))
return data, missing
|
def wordToCamelCase(w: str) -> str:
"""
Transform a word w to its CamelCase version.
This word may already be a CamelCase.
Cannot correctly handle two successive UPPERWORDS. Will output: Upperwords
"""
res = ""
for i, c in enumerate(w):
if c.isupper() and (
i == 0 or not w[i - 1].isupper() or
i != len(w) - 1 and not w[i + 1].isupper()
):
res += c
else:
res += c.lower()
return res
|
def redact_after_symbol(text:str, symbol:str) ->str:
"""Replaces all characters after a certain symbol appears with XXXXX"""
r_start = text.find(symbol)
r_len = len(text) - r_start
to_redact = text[r_start:]
return text.replace(to_redact, "X"*r_len)
|
def is_dict_like(value):
"""Check if value is dict-like."""
return hasattr(value, 'keys') and hasattr(value, '__getitem__')
|
def get_maximum_order_unary_only(per_layer_orders_and_multiplicities):
"""
determine what spherical harmonics we need to pre-compute. if we have the
unary term only, we need to compare all adjacent layers
the spherical harmonics function depends on J (irrep order) purely, which is dedfined by
order_irreps = list(range(abs(order_in - order_out), order_in + order_out + 1))
simplification: we only care about the maximum (in some circumstances that means we calculate a few lower
order spherical harmonics which we won't actually need)
:param per_layer_orders_and_multiplicities: nested list of lists of 2-tuples
:return: integer indicating maximum order J
"""
n_layers = len(per_layer_orders_and_multiplicities)
# extract orders only
per_layer_orders = []
for i in range(n_layers):
cur = per_layer_orders_and_multiplicities[i]
cur = [o for (m, o) in cur]
per_layer_orders.append(cur)
track_max = 0
# compare two (adjacent) layers at a time
for i in range(n_layers - 1):
cur = per_layer_orders[i]
nex = per_layer_orders[i + 1]
track_max = max(max(cur) + max(nex), track_max)
return track_max
|
def extractData(line):
"""Assume the first colon (:) separates the key from the value."""
separator = ':'
line_data = line.strip().split(separator)
key = line_data[0]
# the value may contain a separator so will need to be reassembled from
# possibly multiple elements
value = separator.join(line_data[1:]).strip()
numeric_columns = ("CI type","Volume","Range","Discrimination")
if key in numeric_columns and value != "" :
value = float(value)
return key,value
|
def trunc(str, max_size=140):
"""Basic sring truncation"""
if len(str) > max_size:
return str[0:max_size - 3] + "..."
return str
|
def is_public(data, action):
"""Check if the record is fully public.
In practice this means that the record doesn't have the ``access`` key or
the action is not inside access or is empty.
"""
return "_access" not in data or not data.get("_access", {}).get(action)
|
def guess_newline(value: str, unit: str) -> float:
"""
Tries to guess where a newline in the given value with the given unit could
be, splits there and returns the first value. The unit expected to be
casefolded.
This is done because pandas has some trouble reading in table values with a
newline. pandas interprets this:
1234
5678
not as two values "1234" and "5678", but instead as "12345678"! This causes
some GPUs having an unrealistic score.
"""
# first, strip away everything which is not a digit AND in front of the
# string
number_begin = 0
for (i, char) in enumerate(value):
if char.isdigit():
number_begin = i
break
value = value[number_begin:]
# second, split up values like "2-4" to only contain the first number
for i, char in enumerate(value):
if not char.isdigit():
value = value[:i]
break
if value[-1] == "7":
# evil footnote delegator, don't ask
value = value[:-1]
if unit == "gb" or unit == "gib" or unit == "ghz":
# pretend that the maximum realistic value is 64, so 2 characters
if len(value) % 2:
# example:
# 32
# 64
return float(value[:2])
else:
return float(value)
elif unit == "mb" or unit == "mib" or unit == "mhz":
# *begins to cry*
if len(value) == 7:
return float(value[:3])
# the realistic character limit here is 4:
# 1234
# 5678
elif len(value) % 4 == 0:
return float(value[:4])
# or not, a lot of amd GPUs got their memory listed like this
# 123
# 456
# 7890
# so... welp, just search for length 10 and take the first 3 ones I guess
elif len(value) == 10:
return float(value[:3])
# or it can be even worse and this
# 123
# 4567
# 8901
elif len(value) == 11:
return float(value[:3])
# here it is 3, many amd cards are displayed as normal clock first, and
# boost clock second
# 123
# 456
# we only care about the core clock
# sometimes also there are *three* values, so I'll just use modulo here
# see GeForce GT 650M
# 123
# 456
# 789
elif len(value) % 3 == 0:
return float(value[:3])
return float(value)
|
def build_http_request(method: str, path: str, host: str, extra_headers=[], body: str = "") -> str:
"""
Returns a valid HTTP request from the given parameters.
Parameters:
- `method` - valid HTTP methods (e.g. "POST" or "GET")
- `path` - the path part of a URL (e.g. "/" or "/index.html")
- `host` - the host of the endpoint (e.g. "google.com" or "ualberta.ca")
- `extra_headers` - an optional list of strings to be included as part
of the request headers (e.g. ["Content-Type": "application/json"])
- `body` - the optional body of the request (if any)
Returns:
A string representation of a valid HTTP request
"""
status_line = f"{method} {path} HTTP/1.1"
headers = [
f"Host: {host}",
"Connection: close",
"User-Agent: sumitro-client/1.0"
]
if len(extra_headers) > 0:
headers.extend(extra_headers)
payload = "\r\n"
if len(body) > 0 or method == "POST":
payload += body
headers.append(f"Content-Length: {len(body)}")
request_body = "\r\n".join([status_line, "\r\n".join(headers), payload])
return request_body
|
def idf(duration, A, B, C):
"""Calculate intensity for specified duration given IDF parameters A, B, and C.
Args:
duration: duration to calculate intensity for. Unit: minutes
A: constant A in equation i = A / (duration + B)**C.
B: constant B in equation i = A / (duration + B)**C.
C: constant C in equation i = A / (duration + B)**C.
Returns:
Returns intensity in mm/hr.
"""
return A / (duration + B)**C
|
def sensor_data(date, quantity, value, unit,
temp, temp_unit,
sensor_info, sensor_name, sensor_id,
instrument_name, instrument_id):
"""
Single exit point of data to ensure uniform dict structure.
"""
return {"date": date,
"quantity": quantity, "value": value, "unit": unit,
"temp": temp, "temp_unit": temp_unit,
"sensor_info": sensor_info, "sensor_name": sensor_name, "sensor_id": sensor_id,
"instrument_name": instrument_name, "instrument_id": instrument_id
}
|
def rivers_with_station(stations):
"""Takes a list of stations and returns a set of the names of the rivers"""
set_of_rivers = set() # create set (to avoid duplicates)
for s in stations:
set_of_rivers.add(s.river) # add rivers to set
return sorted(set_of_rivers)
|
def parse_int(string):
"""
Like int(), but just returns None if it doesn't parse properly
"""
try:
return int(string)
except ValueError:
pass
|
def access_issued(issued):
""" Access function for issued field. """
try:
return issued['date-parts'][0][0]
except:
pass
|
def common(string1, string2):
""" Return common words across strings1 1 & 2 """
s1 = set(string1.lower().split())
s2 = set(string2.lower().split())
return s1.intersection(s2)
|
def daily_et( alpha_pt, delta_pt, ghamma_pt, rnet, g0 ):
"""
Calculates the diurnal evapotranspiration after Prestley and Taylor (1972) in mm/day.
alpha_pt = 1.26 , this is the recommended Prestley-Taylor Coefficient
PT_daily_et( alpha_pt, delta_pt, ghamma_pt, rnet, g0 )
"""
result = (alpha_pt/28.588) * ( delta_pt / ( delta_pt + ghamma_pt ) ) * ( rnet - g0 )
return result
|
def set_font_size(locDbl_harmonic_order):
"""
.. _set_font_size :
Decrese the font size with increased harmonic order.
A deadband is included (between 4 pt and 10 pt).
Equation for calculation:
.. code:: python
locDbl_font_size = -0.5 * abs(locDbl_harmonic_order) + 11
If the input harmonic order is bigger than 15, the font size would be set
to 1e-6.
.. figure:: images/set_font_size.svg
:width: 500
:alt: Font size vs Input harmonic order
Parameters
----------
locDbl_harmonic_order : float
The input harmonic order
Returns
-------
locDbl_font_size : float
The calculated font size.
Examples
--------
>>> set_font_size(2)
10.0
>>> set_font_size(15)
4.0
>>> set_font_size(16)
1e-06
"""
# Decrease the font size with increased harmonic order
locDbl_font_size = -0.5 * abs(locDbl_harmonic_order) + 11
if locDbl_font_size > 10.0:
locDbl_font_size = 10.0
elif locDbl_font_size < 4.0:
locDbl_font_size = 4.0
if abs(locDbl_harmonic_order) > 15.0:
locDbl_font_size = 1.0e-6
return locDbl_font_size
|
def format_all_sides(value):
"""Convert a single value (padding or border) to a dict
with keys 'top', 'bottom', 'left' and 'right'"""
all = {}
for pos in ('top', 'bottom', 'left', 'right'):
all[pos] = value
return all
|
def parse_git_version(git) :
"""Parses the version number for git.
Keyword arguments:
git - The result of querying the version from git.
"""
return git.split()[2]
|
def get_index(fields, keys):
""" Get indices of *keys*, each of which is in list *fields* """
assert isinstance(keys, list)
assert isinstance(fields, list)
return [fields.index(k) for k in keys]
|
def britishize_americanize(string, final_dict):
"""
Parameters:
string(str): original string
final_dict(dict): dictionary with all the different possible words in american and british english
Returns:
str: String after replacing the words
"""
string = " ".join([final_dict.get(word, word) for word in string.split()])
return string
|
def get_num_label_vols(md):
"""
Get the number of volumes used for labelling - e.g. 2 for tag-control pair data
"""
iaf = md.get("iaf", "tc")
if iaf in ("tc", "ct"):
return 2
elif iaf == "diff":
return 1
elif iaf == "mp":
return md.get("nphases", 8)
elif iaf == "ve":
return md.get("nenc", 8)
|
def fetch_page_details(resp):
"""
Parse total element, page number and total pages from the page.
:param resp: json response.
:return: page details.
"""
total_element = resp.get('page', {}).get('totalElements', 0)
page_number = resp.get('page', {}).get('number', 0)
total_pages = resp.get('page', {}).get('totalPages', 0)
return page_number, total_element, total_pages
|
def at_index(string, element_index):
""" Get the string at the specified index of a list seperated by ',' """
len_string = len(string)
current_index = 0
current_ptr = 0
is_in_quote = False
is_escaped = False
while current_ptr < len_string and current_index < element_index:
if not (is_in_quote or is_escaped) and string[current_ptr] == ",":
current_index += 1
if not is_escaped and string[current_ptr] in {r"'", r'"'}:
is_in_quote = not is_in_quote
if not is_escaped and string[current_ptr] == "\\":
is_escaped = True
elif is_escaped:
is_escaped = False
current_ptr += 1
if current_ptr == len_string:
raise RuntimeError("Bad string.")
element_start = current_ptr
while current_ptr < len_string and (is_in_quote or is_escaped or
string[current_ptr] != ","):
if not is_escaped and string[current_ptr] in {r"'", r'"'}:
is_in_quote = not is_in_quote
if not is_escaped and string[current_ptr] == "\\":
is_escaped = True
elif is_escaped:
is_escaped = False
current_ptr += 1
return string[element_start:current_ptr]
|
def sanitize_word(s):
"""Ensure that a string is in fact a single word with alphanumeric characters.
Useful for avoiding code injection.
Args:
s (str): string to be inserted into code.
Returns:
(str): the same string, stripped of whitespace.
Raises:
ValueError: the string contained a character that is not alphanumeric.
"""
s = s.strip()
if not s.isalnum():
raise ValueError('unexpected string "{}" received when a single word was expected')
return s
|
def first(iterable, predicate=lambda x: True):
"""
Returns the first item in the `iterable` that
satisfies the `condition`.
If the condition is not given, returns the first item of
the iterable.
Raises `StopIteration` if no item satisfying the condition is found.
"""
return next(x for x in iterable if predicate(x))
|
def parse_list(string, convert_fn):
"""Parse a (possibly empty) colon-separated list of values."""
string = string.strip()
if string:
return [convert_fn(piece) for piece in string.split(':')]
else:
return []
|
def should_show_entry_state(entry, current_api_id):
"""Returns wether or not entry state should be shown.
:param entry: Contentful entry.
:param current_api_id: Current API selected.
:return: True/False
"""
return (
current_api_id == 'cpa' and
(
entry.__dict__.get('draft', False) or
entry.__dict__.get('pending_changes', False)
)
)
|
def twoSum(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
d = {}
for indx in range(0, len(nums)):
# print((target - nums[indx]) in d)
if ((target - nums[indx]) in d):
return [d[target - nums[indx]], indx]
d[nums[indx]] = indx
|
def best_match(names, i):
"""
Find matches starting from index i.
Matches have to be at least 3 letters, otherwise it's a mess for big sets.
Go with largest set of matches instead of longer matches.
Return (number of matches, matching chars)
"""
matchlen = 3
nmatches = 1
while (i+nmatches < len(names) and names[i+nmatches][:matchlen] ==
names[i][:matchlen]):
nmatches += 1
while matchlen < 8 and all([names[i+j][:matchlen+1] ==
names[i][:matchlen+1] for j in range(1,nmatches)]):
matchlen += 1
return nmatches, names[i][:matchlen]
|
def parse_time(timestr):
"""Parse a human-writable time string into a number of seconds"""
if not timestr: return 0
if ":" not in timestr:
return int(timestr)
neg = timestr.startswith("-") # "-5:30" means -330 seconds
min, sec = timestr.strip("-").split(":")
time = int(min) * 60 + int(sec)
if neg: return -time
return time
|
def resolveGeneration(dependencies):
""" resolve those who i depend on """
generated = []
def __append(me):
if me not in generated:
generated.append(me)
def __resolve(me, my_dependencies):
if len(my_dependencies) == 0:
""" i dont depend on anyone really """
__append(me)
return
""" resolve those who i depend on before i resolve myself """
for them in my_dependencies:
__resolve(them, dependencies[them])
""" now i have resolved my friends, so resolve myself """
__append(me)
for some_guy in dependencies:
their_dependencies = dependencies[some_guy]
__resolve(some_guy, their_dependencies)
return generated
|
def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type,
artifact_checksum, artifact_label, artifact_openchain,
prev, cur, timestamp, artifact_list=[], uri_list=[]):
"""
Constructs the payload to be stored in the state storage.
Args:
artifact_uuid (str): The uuid of the artifact
artifact_alias (str): The alias of the artifact
artifact_name (str): The name of the artifact
artifact_type (str): The type of the artifact
artifact_checksum (str): The checksum of the artifact
artifact_label (str): The label of the artifact
artifact_openchain (str): The openchain of the artifact
prev (str): The previous block id of the transaction (default "0")
cur (str): the current block id of the transaction
timestamp (str): The UTC time for when the transaction was submitted
artifact_list (list of dict):
The list of the artifact uuid associated with the artifact
(default [])
uri_list (list of dict):
The list of the uri associated with the artifact (default [])
Returns:
type: dict
The dictionary pertaining all the param is created and returned to
be stored on the state storage.
"""
return {
"uuid" : artifact_id,
"alias" : artifact_alias,
"name" : artifact_name,
"content_type" : artifact_type,
"checksum" : artifact_checksum,
"label" : artifact_label,
"openchain" : artifact_openchain,
"prev_block" : prev,
"cur_block" : cur,
"timestamp" : timestamp,
"artifact_list" : artifact_list,
"uri_list" : uri_list
}
|
def CRP_next(lambdas,topic):
"""
Description
---------
Funcion: Chinese Restaurant Process
Parameter
---------
alpha: concentration parameter
topic: the exist tables
Return
------
p: the probability for a new customers to sit in each of the tables
"""
import numpy as np
N=len(topic) # number of tables
word_list=[] # total customers
for t in topic:
word_list=word_list+t
m=len(word_list) # customers' number
tables = np.array([len(x) for x in topic]) # tables with their customers
p_old=tables/(lambdas+m) # the probability of sitting in a table with other people
p_new=lambdas/(lambdas+m) # the probability of sitting in a new table
p=[p_new]+list(p_old) # the last probability is the probability to sit in a new table
return(p)
|
def float_repr(value: float) -> str:
""" Pretty print float """
spl = '{:e}'.format(value).split('e')
exp = 'e' + spl[1]
if abs(int(spl[1][-2:])) <= 3:
spl[0] = '{:.8f}'.format(value)
exp = ''
return spl[0].rstrip('0').rstrip('.') + exp
|
def make_GeoJson(geoms, attrs):
"""
Creates GeoJson structure with given geom and attribute lists; throws exception if both lists have different length
:param geoms: list of geometries (needs to be encoded as geojson features)
:param attrs: list of attributes
:return: dict in GeoJson structure
"""
assert len(geoms) == len(attrs), "lengths of geoms and attrs are different (%d/%d)" % (len(geoms), len(attrs))
geojson_structure = dict()
type = 'FeatureCollection'
geojson_structure['type'] = type
geojson_structure['features'] = list()
geojson_structure["crs"] = {"type": "name", "properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}}
counter = 0
for g, a in zip(geoms, attrs):
counter += 1
feat = dict()
feat["type"] = 'Feature'
if not "gid" in a.keys():
a['gid'] = counter
feat["properties"] = a
feat["geometry"] = g
geojson_structure['features'].append(feat)
return geojson_structure
|
def concatonate_my_arguments(one, two, three):
"""In this challenge you need to concatenate all the arguments together and return the result
The arguments will always be strings
"""
return one + two + three
|
def get_largest_item_size(iterable):
"""
Given a iterable, get the size/length of its largest key value.
"""
largest_key = 0
for key in iterable:
if len( key ) > largest_key:
largest_key = len( key )
return largest_key
|
def is_relative_url(url):
"""Return true if url is relative to the server."""
return "://" not in url
|
def _get_final_result(x):
"""Apply function for providing a final status of the application"""
result_code, attending, waitlisted, deferred, stage, app_type = x
if result_code == "denied":
return "Denied"
elif result_code in ["accepted", "cond. accept", "summer admit"]:
if attending == "yes":
return "CHOICE!"
else:
return "Accepted!"
elif result_code == "guar. transfer":
return "Guar. Xfer"
elif (waitlisted == 1) | (waitlisted == "1"):
return "Waitlist"
elif (deferred == 1) | (deferred == "1"):
return "Deferred"
elif stage == "pending":
return "Pending"
elif stage in [
"initial materials submitted",
"mid-year submitted",
"final submitted",
]:
return "Submitted"
elif app_type == "interest":
return "Interest"
else:
return "?"
|
def to_list(outputs):
"""Converts layer outputs to a nested list, for easier equality testing.
Args:
outputs: A tensor or tuple/list of tensors coming from the forward
application of a layer. Each tensor is NumPy ndarray-like, which
complicates simple equality testing (e.g., via `assertEquals`):
such tensors require equality testing to use either `all` (all
elements match) or `any` (at least one element matches), which is not
directly supported in `absltest`.
Returns:
A nested list structure containing all the output values, but now directly
testable using `assertEquals`.
"""
if isinstance(outputs, (list, tuple)):
return [y.tolist() for y in outputs]
else:
return outputs.tolist()
|
def _time_to_minutes(time_dhms):
""" Converting time from 'd-hh:mm:ss' to total minutes """
x = time_dhms.split('-')
if len(x) > 1:
days = int(x.pop(0))
else:
days = 0
x = x[0].split(':')
hours = int(x[0])
minutes = int(x[1])
# return number of minutes
return days * (24 * 60) + hours * 60 + minutes
|
def r_to_depth(x, interval):
"""Computes rainfall depth (mm) from rainfall intensity (mm/h)
Parameters
----------
x : float,
float or array of float
rainfall intensity in mm/h
interval : number
time interval (s) the values of `x` represent
Returns
-------
output : float
float or array of float
rainfall depth (mm)
"""
return x * interval / 3600.
|
def format_list_of_string_2_postgres_array(list_of_string):
"""
removes internal spaces
:param list_of_string: List of String
:return: String
"""
return "{" + str(list_of_string)[1:-1].replace(" ", "").replace("'", '"') + "}"
|
def fib(n):
"""defines n as
a fibonacci number, fib()
is is a function in the Python standard library)"""
i = 0
j = 1
n = n - 1
"""assigns initial values to the varibles
i & j, assigns a calculation to the variable n"""
while n >= 0:
i, j = j, i + j
n = n - 1
return i
"""interates over the loop provided (while!') n is
greater than or equal to 0, preventing negative counting"""
|
def encode_http_response(
status, reason, version="HTTP/1.1",
headers=None, entity=None, **kwargs):
"""return http message encoding response"""
buf = []
# 'dictify' headers
headers = dict(headers or [])
# add status line
buf.append("%s %i %s\r\n" % (version, status, reason))
# add entity description in headers
if entity:
headers["Content-Length"] = len(entity)
headers.setdefault("Content-Type", "text/plain")
# render headers
for name, value in headers.items():
buf.append("%s: %s\r\n" % (name.title(), value))
# add empty line
buf.append("\r\n")
if entity:
buf.append(entity)
return "".join(buf)
|
def translate_special_params(func_params, attributes_map):
"""
:param func_params:
:param attributes_map:
:return:
"""
params = {}
for key, value in func_params.items():
if key in attributes_map:
params[attributes_map[key]] = value
else:
params[key] = value
return params
|
def rasterizeSegment(start_x, start_y, end_x, end_y):
"""Implementation of Bresenham's line rasterization routine.
This is a slightly modified version of the Python implementation
one Rosetta code: https://rosettacode.org/wiki/Bitmap/Bresenham%27s_line_algorithm#Python
Args:
start_x: the x-coordinate of the start point of the segment
start_y: the y-coordinate of the start point of the segment
end_x: the x-coordinate of the end point of the segment
end_y: the y-coordinate of the end point of the segment
Returns:
A list [(x, y)] of the image pixel coordinates along the line
"""
result = []
dx = abs(end_x - start_x)
dy = abs(end_y - start_y)
x, y = start_x, start_y
sx = -1 if start_x > end_x else 1
sy = -1 if start_y > end_y else 1
if dx > dy:
err = dx / 2.0
while x != end_x:
result.append((x, y))
err -= dy
if err < 0:
y += sy
err += dx
x += sx
else:
err = dy / 2.0
while y != end_y:
result.append((x, y))
err -= dx
if err < 0:
x += sx
err += dy
y += sy
result.append((x, y))
return result
|
def build_eval_subdir(list_of_label_value):
"""Create evaluation subdir name from a list of evaluation parameters"""
tokens = []
for label,value in list_of_label_value:
if value is None:
continue
tokens.append('%s(%s)' % (label, value))
result = "_".join(tokens)
if len(result) == 0:
result = 'eval'
result = result.replace('/', '::')
return result
|
def ordinal(n):
"""
Convert a positive integer into its ordinal representation
"""
suffix = ["th", "st", "nd", "rd", "th"][min(n % 10, 4)]
if 11 <= (n % 100) <= 13:
suffix = "th"
return str(n) + suffix
|
def approvals_percentage(
approvals, decimal_digits=2, default_percentage_value=0
):
"""Calculates supervisor's approvals rate percentage value.
:param approvals: A dict consisting of forms processed by supervisor
and forms sent for review by supervisor.
:param decimal_digits: The number of digits to round off the percentage
value, default is two.
:param default_percentage_value: The default percentage value returned
when forms_sent_for_review is not True
:returns: Percentage float value rounded in two dencimal points
if forms sent for review is greater than zero,
Zero if forms sent for review is not greater than zero.
"""
forms_approved = approvals['forms_approved']
forms_sent_for_review = approvals['forms_sent_for_review']
if forms_sent_for_review:
return round(
100 * forms_approved/forms_sent_for_review, decimal_digits)
return default_percentage_value
|
def binary_to_decimal( binary_string ):
"""
Takes a binary number (as a string) and returns its decimal equivalent
"""
decimal = 0
for i in range( len( binary_string ) ):
decimal += 2**i * int( binary_string[-i-1] )
return decimal
|
def filter_datastores_by_hubs(hubs, datastores):
"""Get filtered subset of datastores corresponding to the given hub list.
:param hubs: list of PbmPlacementHub morefs
:param datastores: all candidate datastores
:returns: subset of datastores corresponding to the given hub list
"""
filtered_dss = []
hub_ids = [hub.hubId for hub in hubs]
for ds in datastores:
if ds.value in hub_ids:
filtered_dss.append(ds)
return filtered_dss
|
def rna_id(entry):
"""
Get the UPI for the entry, or fail if there is none.
"""
if entry["DB"] == "RNAcentral":
return entry["DB_Object_ID"]
raise ValueError("All entries are expected to come from RNAcentral")
|
def _mangle_user(name):
"""Mangle user variable name
"""
return "__user_{}".format(name)
|
def stateful(o):
"""Mark an object as stateful
In:
- ``o`` -- the object
Return:
- ``o``
"""
if hasattr(o, '_persistent_id'):
del o._persistent_id
return o
|
def calculate_distance(point1, point2):
"""Calculate the distance (in miles) between point1 and point2.
point1 and point2 must have the format [latitude, longitude].
The return value is a float.
Modified and converted to Python from:
http://www.movable-type.co.uk/scripts/latlong.html
"""
import math
def convert_to_radians(degrees):
return degrees * math.pi / 180
radius_earth = 6.371E3 # km
phi1 = convert_to_radians(point1[0])
phi2 = convert_to_radians(point2[0])
delta_phi = convert_to_radians(point1[0] - point2[0])
delta_lam = convert_to_radians(point1[1] - point2[1])
a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return radius_earth * c / 1.60934
|
def suma_listas (l_1, l_2):
"""
list, list --> list
OBJ: sumar l_1 + l_2
"""
l_3 = []
for i in range (len(l_1)):
l_3.append (l_1[i] + l_2[i])
return l_3
|
def when_did_it_die(P0, days):
"""
Given a P0 and an array of days in which it was alive, censored or dead,
figure out its lifespan and the number of days it was alive'
Note, the lifespan is considered to last up to the last observation.
I.e., a worm that was observed to live 3 days, and died on day 4
would have a lifespan of 4 days.
If a worm wasn't observed to die, it is considered censored for all practical
purposes
P0= a unique identifier, typically a number, always a scalar or a string
days= an array or array-like of floats or ints consisting of -1, 0, or 1
returns:
Worm Identifier, Lifespan, Death Observed(1) or Censored (0)
"""
counter=0
for day in days:
if day == 0: #worm died, add last day and return
# counter+=1
return [P0, counter, 1]
elif day == -1: #worm was censored, add last day and return
# counter+=1
return [P0, counter, 0]
elif day == 1:
counter+=1
return [P0, counter, 0]
|
def sub2ind(range, x, y):
"""Convert subscripts to linear indices"""
return y * range + x
|
def kid_friendly_validation(children):
""" Decide if kid_friendly input valid.
Parameters:
children(str): A user's input to the kid-friendly choice.
Return:
(str): A single valid string, such as "1" or "2".
"""
while children != "1" and children != "2" :
print("\nI'm sorry, but " + children + " is not a valid choice. Please try again.")
children = input("\nWill you be travelling with children?"
+ "\n 1) Yes"
+ "\n 2) No"
+ "\n> ")
return children
|
def find_rank_type(ranks):
"""Find and return the rank type of the 3 ranks given
Rank type results:
1: no particularly interesting rank order, i.e. High Card
2: pair rank
4: straight
5: three of a kind
"""
ranks.sort()
if ranks[0] == ranks[1] == ranks[2]:
return 5
elif ranks[1] == ranks[2] or ranks[0] == ranks[1] or ranks[0] == ranks[2]:
return 2
elif (ranks[0] + 1) == ranks[1] and (ranks[1] + 1) == ranks[2]:
return 4
elif 14 in ranks and 2 in ranks and 3 in ranks:
return 4
else:
return 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.