content stringlengths 42 6.51k |
|---|
def __get_results_by_code(code, results) -> list:
"""
Get a list of results filtered from results dict, using code.
"""
return [i for i in results if i["status"] == code] |
def indent(string):
"""Return an indented string."""
indent_char_count = 4
return ' '*indent_char_count + str(string) |
def _trim_msg(msg):
"""
msg : bytearray
Somewhere in msg, 32 elements are 0x29. Returns the msg before that
"""
i = msg.find(b'\x29'*32)
if i == -1:
raise Exception('trim_msg error; end of msg not found')
return msg[:i] |
def find_number(compare):
"""Takes in a compare function which takes the guessed number as parameter
and returns 0 for the correct number, -1 if your number is smaller than the
searched number and 1 if your guessed number is greater than the searched number.
"""
start = 0
end = 100
guess = (start + end) // 2
while True:
if compare(guess) == 0:
return guess
elif compare(guess) == 1:
end = guess
elif compare(guess) == -1:
start = guess
guess = round((start + end) // 2, 5) |
def getKeysFromString(word, delimiter='|'):
"""Descrn: Get a list of CCPN data model keys given an object identifier string.
Inputs: String
Output: List of Keys (Words or Ints)
"""
items = word.split(delimiter)
keys = []
for item in items:
try:
key = int(item)
except:
key = item
keys.append(key)
return keys |
def upper(input_ch):
"""
If users input lower case, the program would upper it.
:param input_ch: an alpha, users input to run the program.
:return: upper alpha.
"""
ans = ''
if input_ch.islower():
ans += input_ch.upper()
else:
ans += input_ch
return ans |
def triangle_type(a, b, c):
""" return triangle type:
0 : if triangle cannot be made with given sides
1 : acute triangle
2 : right triangle
3 : obtuse triangle
"""
a,b,c = sorted([a,b,c]) # reduce redundancy in conditions
if a + b <= c:
return 0
a2,b2,c2 = (x*x for x in (a,b,c))
if a2 + b2 == c2:
return 2
if a2 + b2 < c2:
return 3
return 1 |
def file_path_from_request(request_obj):
"""Extract the file path from the incoming request object."""
if request_obj:
# return request_obj["file_path"].replace("%20", " ")
return request_obj["file_path"].replace("%20", " ").rstrip("/")
return None |
def jwt_get_user_id_from_payload_handler(payload):
"""
Override this function if user_id is formatted differently in payload
"""
return payload.get("id") |
def encode(o):
"""Encodes with UTF-8"""
return str(o).encode('UTF-8') |
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str |
def int_to_dtype(x, n, signed):
"""
Convert the Python integer x into an n bit signed or unsigned number.
"""
mask = (1 << n) - 1
x &= mask
if signed:
highest_bit = 1 << (n-1)
if x & highest_bit:
x = -((~x & mask) + 1)
return x |
def recursive_dict_of_lists(d, helper=None, prev_key=None):
"""
Builds dictionary of lists by recursively traversing a JSON-like
structure.
Arguments:
d (dict): JSON-like dictionary.
prev_key (str): Prefix used to create dictionary keys like: prefix_key.
Passed by recursive step, not intended to be used.
helper (dict): In case d contains nested dictionaries, you can specify
a helper dictionary with 'key' and 'value' keys to specify where to
look for keys and values instead of recursive step. It helps with
cases like: {'action': {'type': 'step', 'amount': 1}}, by passing
{'key': 'type', 'value': 'amount'} as a helper you'd get
{'action_step': [1]} as a result.
"""
d_o_l = {}
if helper is not None and helper['key'] in d.keys() and helper['value'] in d.keys():
if prev_key is not None:
key = f"{prev_key}_{helper['key']}"
else:
key = helper['key']
if key not in d_o_l.keys():
d_o_l[key] = []
d_o_l[key].append(d[helper['value']])
return d_o_l
for k, v in d.items():
if isinstance(v, dict):
d_o_l.update(recursive_dict_of_lists(v, helper=helper, prev_key=k))
else:
if prev_key is not None:
key = f'{prev_key}_{k}'
else:
key = k
if key not in d_o_l.keys():
d_o_l[key] = []
if isinstance(v, list):
d_o_l[key].extend(v)
else:
d_o_l[key].append(v)
return d_o_l |
def lin(i1, i2):
"""Linear interpolation."""
idum1 = max(i1, i2)
idum2 = min(i1, i2)
lin = idum2 + idum1 * (idum1 - 1) / 2
return lin |
def convert_to_target(panopto_content, config):
"""
Implement this method to convert to target format
"""
target_content = {'id': panopto_content['Id']}
return target_content |
def _flag_single_gifti(img_files):
"""Test if the paired input files are giftis."""
# Possibly two gifti; if file is not correct, will be caught
if isinstance(img_files[0], list):
return False
flag_single_gifti = [] # gifti in pairs
for img in img_files:
ext = ".".join(img.split(".")[-2:])
flag_single_gifti.append((ext == "func.gii"))
return all(flag_single_gifti) |
def eastPlaceCheck(shipSize, xIndexStart, yStart, xAxis):
"""Given a start position on the x index and a ship size will check if
if the y position is a valid location.
Args:
shipSize (INT): Ship size taken from Ship Class
xIndexStart (INT): Start index location
yStart (STR): The start position on the Y Axis, is a letter
yAxis ([INT]): The x x axis range in a list
Returns:
[List]: If range is a valid location for the y axis start will return
the valid list, else it will return false.
"""
eastShipPlace = []
tempLastIndex = xIndexStart + shipSize
if tempLastIndex >= 0 and tempLastIndex <= 10:
for i in range(xIndexStart, tempLastIndex):
eastShipPlace.append(f'{yStart}:{xAxis[i]}')
return eastShipPlace
else:
return False |
def kind(n, ranks):
"""Return the first rank that this hand has exactly n of.
Return None if there is no n-of-a-kind in the hand.
"""
for r in ranks:
if ranks.count(r) == n:
return r
return None |
def _check_type(value, expected_type):
"""Perform type checking on the provided value
This is a helper that will raise ``TypeError`` if the provided value is
not an instance of the provided type. This method should be used sparingly
but can be good for preventing problems earlier when you want to restrict
duck typing to make the types of fields more obvious.
If the value passed the type check it will be returned from the call.
"""
if not isinstance(value, expected_type):
raise TypeError("Value {value!r} has unexpected type {actual_type!r}, expected {expected_type!r}".format(
value=value,
expected_type=expected_type,
actual_type=type(value),
))
return value |
def make_reg_05h_byte(channel: int) -> int:
"""
Make the byte for REG2 (channel)
Channel control (CH) 0-83. 84 channels in total
18 default for SX1262, 23 default for SX1268
850.125 + CH *1MHz. Default 868.125MHz(SX1262),
410.125 + CH *1MHz. Default 433.125MHz(SX1268)
:param channel: The channel.
:return: The channel / value for REG2.
"""
if 0 <= channel <= 83:
return channel
else:
raise RuntimeError(
f"Invalid channel, channel must be between 0-83, but was {channel}."
) |
def pal_draw_condition_2(slope):
"""
Second draw condition of polygonal audience lines. The slope of the
lines must be zero or positive
Parameters
----------
slope : float
Slope of the polygonal audience line.
Returns
-------
condition_2 : bool
True if condition has passed, otherwise False.
"""
if slope < 0:
return False
else:
return True |
def minOperations(n):
"""number of operations to copy paste
Args:
n: given number
Returns:
num: number of operations.
"""
p = 2
num = 0
if type(n) != int or n <= 1:
return 0
while n != 1:
if n % p == 0:
n = n / p
num = num + p
else:
p = p + 1
return int(num) |
def update_dict(d1, d2):
"""Update dictionary with missing keys and related values."""
d1.update(((k, v) for k, v in d2.items() if k not in d1))
return d1 |
def knapsack_0_1_dp(w, wt, vt, n):
"""
A Dynamic Programming based solution for 0-1 Knapsack problem
Since sub-problems are evaluated again, this problem has Overlapping
Sub-problems property. So the 0-1 Knapsack problem has both properties
of a dynamic programming problem. Like other typical Dynamic Programming
(DP) problems, re-computations of same sub-problems can be avoided by
constructing a temporary array dp[][] in bottom up manner.
Time Complexity: O(n*w) where n is the number of items and w is the
capacity of knapsack.
:param w: total capacity
:type w: int
:param wt: weight of each element
:type wt: list[int]
:param vt: value of each element
:type vt: list[int]
:param n: number of elements
:type n: int
:return: the maximum value that can be put in a knapsack of capacity w
:rtype: int
"""
dp = [[0 for _ in range(w + 1)] for _ in range(n + 1)]
# build table K[][] in bottom up manner
for n_idx in range(n + 1):
for w_idx in range(w + 1):
if n_idx == 0 or w_idx == 0:
dp[n_idx][w_idx] = 0
elif wt[n_idx - 1] > w:
dp[n_idx][w_idx] = dp[n_idx - 1][w_idx]
else:
dp[n_idx][w_idx] = max(
vt[n_idx - 1] + dp[n_idx - 1][w_idx - wt[n_idx - 1]],
dp[n_idx - 1][w_idx])
return dp[n][w] |
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
"""User warning formatter"""
# pylint: disable=unused-argument
return "{}:{}: {}: {}\n".format(filename, lineno, category.__name__, message) |
def moffat(x, amplitude=1, center=0., sigma=1, beta=1.):
""" 1 dimensional moffat function:
moffat(amplitude, center, sigma, beta) = amplitude / (((x - center)/sigma)**2 + 1)**beta
"""
return amplitude / (((x - center)/sigma)**2 + 1)**beta |
def apply_first(seq):
"""Call the first item in a sequence with the remaining
sequence as positional arguments."""
f, *args = seq
return f(*args) |
def sql_bound(max_or_min, *fields):
"""
Produces sql to return the maximum of two fields.
Parameters
----------
max_or_min : str
One of MAX or MIN, depending on behaviour desired.
fields : str
Field names of the inputs to the max/min function.
Returns
-------
str
String like 'SELECT [MAX or MIN](n) FROM (VALUES) as VALUE(n)'
"""
cmd = max_or_min.upper()
if cmd != "MAX" and cmd != "MIN":
raise ValueError("Invalid command: must be MAX or MIN")
field_str = ",".join([f"({field})" for field in fields])
sql = f"SELECT {max_or_min}(n) FROM (VALUES {field_str}) as value(n)"
return sql |
def pad(blocksize, data):
"""Adds PKCS#7 padding.
This function adds PKCS#7 padding at the end of the last block from
`data` for a multiple of `blocksize`.
:parameter:
blocksize : int
The size of the block of the CBC mode.
data : string
The data to be padded.
:return: A string, the data padded in PKCS7, must be a multiple of
`blocksize`.
"""
length = blocksize - (len(data) % blocksize)
data += bytes([length]) * length
return data |
def mod_mapping(codon, productions):
""" Default mapping function introduced by GE."""
return codon%len(productions) |
def redact_pii(text, matches):
"""Takes a match as defined in the detect_pii function and redacts it from the full string, returning a <redacted text, metadata> tuple."""
redacted_str = text
metadata = []
for match in matches:
matched_str = match[0]
tag = match[3]
redact_tag = "PI:" + tag
redacted_str = redacted_str.replace(matched_str, redact_tag)
# Create the "metadata" as all of the information we had before redaction
metadata += [(match)]
return (redacted_str, metadata) |
def f(t, p):
"""
defines the disturbance force input to the system
"""
m, k, c, Distance, StartTime, Amax, Vmax, DistStart, F_amp = p
# Select one of the two inputs below
# Be sure to comment out the one you're not using
# Input Option 1:
# Just a step in force beginning at t=DistStart
# f = F_amp * (t >= DistStart)
# Input Option 2:
# A pulse in force beginning at t=DistStart and ending at t=(DistStart+0.1)
f = F_amp * (t >= DistStart) * (t <= DistStart + 0.1)
return f |
def fuel_requirement(mass: int) -> int:
"""Base fuel requirements for a single module."""
return (mass // 3) - 2 |
def exchange(pgms, a, b):
"""Swap the programs at position a and b.
>>> exchange(['e', 'a', 'b', 'c', 'd'], 3, 4)
['e', 'a', 'b', 'd', 'c']
"""
res = pgms[:]
res[a], res[b] = res[b], res[a]
return res |
def parsedefaultmarker(text):
"""given a text 'abc (DEFAULT: def.ghi)',
returns (b'abc', (b'def', b'ghi')). Otherwise return None"""
if text[-1:] == b')':
marker = b' (DEFAULT: '
pos = text.find(marker)
if pos >= 0:
item = text[pos + len(marker) : -1]
return text[:pos], item.split(b'.', 2) |
def normalize_yaml(yaml):
"""Normalize the YAML from project and role lookups.
These are returned as a list of tuples.
"""
if isinstance(yaml, list):
# Normalize the roles YAML data
normalized_yaml = [(x['name'], x['src'], x.get('version', 'HEAD'))
for x in yaml]
else:
# Extract the project names from the roles YAML and create a list of
# tuples.
projects = [x[:-9] for x in yaml.keys() if x.endswith('git_repo')]
normalized_yaml = []
for project in projects:
repo_url = yaml['{0}_git_repo'.format(project)]
commit_sha = yaml['{0}_git_install_branch'.format(project)]
normalized_yaml.append((project, repo_url, commit_sha))
return normalized_yaml |
def find_tree(user, input_tree_name):
"""Loop over trees and find one based on the name. Return tree."""
tree_to_display = None
tree_name = input_tree_name.lower()
for tree in user["trees"]:
if tree["name"].lower() == tree_name:
tree_to_display = tree
break
if tree_to_display == None:
return None
return tree_to_display |
def lower(text):
"""
Funcion que convierte el texto a minusculas
:param str text: texto a procesar
"""
return text.lower() |
def shorten_line(text, maxlen=75):
"""Truncates content to at most `maxlen` characters.
This tries to be (a bit) clever and attempts to find a proper word
boundary for doing so.
"""
if len(text or '') < maxlen:
return text
cut = max(text.rfind(' ', 0, maxlen), text.rfind('\n', 0, maxlen))
if cut < 0:
cut = maxlen
return text[:cut] + ' ...' |
def stalinsort(iterable, key=None, ascending=False):
"""Sorts iterable according to the single pass O(n) StalinSort algorithm.
Parameters
----------
iterable: iterable object
key: function
A function of one argument that is used to extract a comparison key
from each element. Default is None.
Returns
-------
survivors: list
List of surviving elements of iterable.
Example
-------
>>>from stalinsort import stalinsort
>>>a = [3, 2, 5, 7, 1, 3]
>>>stalinsort(a)
[3, 2, 1]
"""
ascending = False # There is only descent under communism.
if key is not None:
keys = iterable.apply(key)
else:
keys = list(iterable)
survivors = iterable[:1] # I prefer to think in terms of survivors.
for index, victim in enumerate(iterable[1:]):
if survivors[-1] >= keys[index + 1]:
survivors.append(victim)
return survivors |
def _get_cores_and_type(numcores, paralleltype, scheduler):
"""Return core and parallelization approach from command line providing sane defaults.
"""
if scheduler is not None:
paralleltype = "ipython"
if paralleltype is None:
paralleltype = "local"
if not numcores or int(numcores) < 1:
numcores = 1
return paralleltype, int(numcores) |
def get_all_keys_in_dataset(raw_dataset):
"""Determines if there are keys shared by
the whole dataset"""
allKeys = []
for item in raw_dataset:
for elem in list(item.keys()):
if elem not in allKeys:
allKeys.append(elem)
return allKeys |
def threeInARow(m1, m2, m3):
"""Checks if the the marks form a triple."""
if " " in [m1, m2, m3]:
return False
else:
return m1 == m2 and m1 == m3 |
def get_measure_worker_instance_template_name(experiment: str):
"""Returns an instance template name for measurer workers running in
|experiment|."""
return 'worker-' + experiment |
def test_for(mod):
"""Test to see if mod is importable."""
try:
__import__(mod)
except ImportError:
return False
else:
return True |
def replace_dict_value_inplace(d, bad_val, good_val):
"""
IN PLACE replaces values in the original dictionary
"""
for key, value in d.items():
if value == bad_val:
d[key] = good_val
return d |
def union_keys(*pos_args):
"""Return a union of all the keys in multiple dicts.
Args:
pos_args: [list of dict]
"""
return set().union(*pos_args) |
def isDominated(wvalues1, wvalues2):
"""Returns wheter or not *wvalues1* dominates *wvalues2*.
:param wvalues1: The weighted fitness values that would be dominated.
:param wvalues2: The weighted fitness values of the dominant.
:returns: :obj:`True` if wvalues2 dominates wvalues1, :obj:`False`
otherwise.
"""
not_equal = False
for self_wvalue, other_wvalue in zip(wvalues1, wvalues2):
if self_wvalue > other_wvalue:
return False
elif self_wvalue < other_wvalue:
not_equal = True
return not_equal |
def inurl(needles, haystack, position='any'):
"""convenience function to make string.find return bool"""
count = 0
# lowercase everything to do case-insensitive search
haystack2 = haystack.lower()
for needle in needles:
needle2 = needle.lower()
if position == 'any':
if haystack2.find(needle2) > -1:
count += 1
elif position == 'end':
if haystack2.endswith(needle2):
count += 1
elif position == 'begin':
if haystack2.startswith(needle2):
count += 1
# assessment
if count > 0:
return True
return False |
def with_uppercase(wrapped, *args, uppercase=False, **kwargs):
"""
Formatting options:
:param uppercase: Print output in capitals
"""
ret = wrapped(*args, **kwargs)
if uppercase:
return str(ret).upper()
else:
return ret |
def intersection(l1, l2):
"""Find all elements that are in both list 1 and list 2."""
return set(l1) & set(l2) |
def _moog_par_format_fluxlimits (fluxlimits):
"""
fluxlimits = [start, stop, step]
```
fluxlimits
5555.0 5600.0 10.0
```
"""
if fluxlimits is None:
return ""
lines = [\
"fluxlimits",
(" {:10}"*3).format(*list(map(float,fluxlimits)))
]
return "\n".join(lines) |
def format_stringv2(value):
"""Return a vCard v2 string. Any embedded commas or semi-colons are removed."""
return value.replace("\\", "").replace(",", "").replace(";", "") |
def from_decimal(num, base):
"""Converts a number from decimal to a 'base-list', which can be converted into a readable format
using ```convert_base_list```."""
lst = []
while num:
lst.append(num % base)
num = num//base
return lst[::-1] |
def js_splice(arr, start, delete_count=None, *items):
"""Implementation of javascript's splice function.
:param list arr:
Array to splice
:param int start:
Index at which to start changing the array
:param int delete_count:
Number of elements to delete from the array
:param *items:
Items to add to the array
Reference: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/splice # noqa:E501
"""
# Special conditions for start value
try:
if start > len(arr):
start = len(arr)
# If start is negative, count backwards from end
if start < 0:
start = len(arr) - start
except TypeError:
# Non-integer start values are treated as 0 in js
start = 0
# Special condition when delete_count is greater than remaining elements
if not delete_count or delete_count >= len(arr) - start:
delete_count = len(arr) - start # noqa: N806
deleted_elements = arr[start:start + delete_count]
# Splice appropriately.
new_arr = arr[:start] + list(items) + arr[start + delete_count:]
# Replace contents of input array
del arr[:]
for el in new_arr:
arr.append(el)
return deleted_elements |
def baseline_candidates(test_query_rel, edges, obj_dist, rel_obj_dist):
"""
Define the answer candidates based on the object distribution as a simple baseline.
Parameters:
test_query_rel (int): test query relation
edges (dict): edges from the data on which the rules should be learned
obj_dist (dict): overall object distribution
rel_obj_dist (dict): object distribution for each relation
Returns:
candidates (dict): candidates along with their distribution values
"""
if test_query_rel in edges:
candidates = rel_obj_dist[test_query_rel]
else:
candidates = obj_dist
return candidates |
def modsplit(name):
"""Split importable"""
if ':' in name:
_part = name.split(':')
if len(_part) != 2:
raise ValueError("Syntax error: {s}")
return _part[0], _part[1]
_part = name.split('.')
if len(_part) < 2:
raise ValueError("Syntax error: {s}")
return '.'.join(_part[:-1]), _part[-1] |
def sort_schedules_by_day(schedule_in_tuples):
"""tuple in the form of (day, period_ind, course_name)"""
schedule_dict = {'Monday':[], 'Tuesday':[], 'Wednesday':[], 'Thursday':[], 'Friday':[]}
for schedule in schedule_in_tuples:
schedule_dict[schedule[0]].append(schedule)
return schedule_dict |
def read_hhmmss_no_colon(field: str) -> int:
"""Read a HH:MM:SS field and return us since midnight."""
if field != "":
hour = int(field[0:2])
minute = int(field[2:4])
second = int(field[4:6])
return 1000000 * ((3600 * hour) + (60 * minute) + second)
else:
return 0 |
def get_host_details_hr(host_dict):
"""
Prepare host detail dictionary for human readable in 'risksense-get-host-detail' command.
:param host_dict: Dictionary containing host detail.
:return: List containing host detail dictionary.
"""
return [{
'Name': host_dict.get('hostName', ''),
'IP': host_dict.get('ipAddress', ''),
'RS3': host_dict.get('rs3', ''),
'Discovered On': host_dict.get('discoveredOn', ''),
'Last Found On': host_dict.get('lastFoundOn', '')
}, {}] |
def escape_char_data7(src, quote=False):
"""Escapes reserved and non-ASCII characters.
src
A character string
quote (defaults to False)
When True, will surround the output in either single
or double quotes (preferred) depending on the contents
of src.
Characters outside the ASCII range are replaced with character
references."""
dst = []
if quote:
if "'" in src:
q = '"'
qstr = '"'
elif '"' in src:
q = "'"
qstr = '''
else:
q = '"'
qstr = '"'
dst.append(q)
else:
q = None
qstr = ''
for c in src:
if ord(c) > 0x7F:
if ord(c) > 0xFF:
if ord(c) > 0xFFFF:
if ord(c) > 0xFFFFFF:
dst.append("&#x%08X;" % ord(c))
else:
dst.append("&#x%06X;" % ord(c))
else:
dst.append("&#x%04X;" % ord(c))
else:
dst.append("&#x%02X;" % ord(c))
elif c == '<':
dst.append("<")
elif c == '&':
dst.append("&")
elif c == '>':
dst.append(">")
elif c == '\r':
dst.append("
")
elif c == q:
dst.append(qstr)
else:
dst.append(c)
if quote:
dst.append(q)
return ''.join(dst) |
def is_maple_invoke_bp_plt_disabled(buf):
"""
determine where Maple breakpoint plt disable or not
params:
buf: a string output of m_util.gdb_exec_to_str("info b")
"""
match_pattern = "<maple::maple_invoke_method(maple::method_header_t const*, maple::MFunction const*)@plt>"
buf = buf.split('\n')
for line in buf:
if match_pattern in line:
on_off = line.split()[1]
if on_off is 'y': # it is enabled
return False
else:
return True
return True |
def _get_oshape_of_gather_nd_op(dshape, ishape):
"""Given data and index shapes, get the output `NDArray` shape.
This basically implements the infer shape logic of op gather_nd."""
assert len(dshape) > 0 and len(ishape) > 0
oshape = list(ishape[1:])
if ishape[0] < len(dshape):
oshape.extend(dshape[ishape[0]:])
return tuple(oshape) |
def has_path(d, path):
"""Check if a dot-separated path is in the given nested dict/list"""
for k in path.split('.'):
if k.isdigit():
k = int(k)
if k >= len(d):
return False
else:
if k not in d:
return False
d = d[k]
return True |
def base88_encode(num):
"""Convert a number into a list of decimal numbers 0-87, or basically a
base-88 number.
Modified from http://stackoverflow.com/a/1119769"""
if (num == 0):
return [0]
arr = []
while num:
rem = num % 88
num = num // 88
arr.append(rem)
arr.reverse()
return arr |
def count_resolve(siblings):
"""collapse sibling steps into one"""
first = siblings.pop()
for s in siblings:
if s is not None: # ?
# all siblings ought to have the same 'last'
# assert first['last'] == s['last']
first['step'] += s['step']
return first |
def get_file_type(file_name):
"""
Determine file type by extracting suffix of file_name
"""
if file_name is None:
return None
file_type = file_name.split(".")[-1].strip()
if file_type == "nc":
file_type = "netcdf"
if file_type == "prmtop":
file_type = "parm7"
return file_type |
def _dict_for_query(device_data: dict) -> dict:
"""
GET requests send device data as a nested object.
To avoid storing the device data block in two
formats, we are just going to compute the flat
dictionary.
"""
return {f"deviceData[{key}]": value for key, value in device_data.items()} |
def percent_rain(pc):
"""Return percentage of rain in image minus sundries (time, legend)"""
sundries = 1.87
return (pc-sundries) |
def create_schedule(list):
""" Create a schedule for the teams in the list and return it"""
s = []
if len(list) % 2 == 1: list = list + ["BYE"]
for i in range(len(list)-1):
mid = int(len(list) / 2)
l1 = list[:mid]
l2 = list[mid:]
l2.reverse()
# Switch sides after each round
if(i % 2 == 1):
s = s + [ zip(l1, l2) ]
else:
s = s + [ zip(l2, l1) ]
list.insert(1, list.pop())
return s |
def str_to_index(axis):
"""
Convert x y z axis string to 0 1 2 axis index
Parameters
----------
axis: str
Axis value (x, y or z)
Returns
-------
index: int or None
Axis index
"""
axis = axis.lower()
axes = {'x': 0, 'y': 1, 'z': 2}
if axis in axes:
return axes[axis]
return None |
def _label_string(label):
"""Convert the given (optional) Label to a string."""
if not label:
return "None"
else:
return '"%s"' % label |
def sarcastic(str):
"""ReTuRnS tHe SaRcAsTiC vErSiOn Of A sTrInG"""
new_string = ''
capitalize = True
for letter in str:
if letter.isalpha():
new_string += letter.upper() if capitalize else letter.lower()
capitalize = not capitalize
else:
new_string += letter
return new_string |
def is_isogram(string):
"""
Test whether the given string parameter is an isogram or not
"""
processed_input = string.lower().replace("-", "").replace(" ", "")
return len(set(processed_input)) == len(processed_input) |
def _out_of_str(n1: int, n2: int) -> str:
"""
:return A string in the format [n1 / n2], where "n1" and "n2" are the passed integers padded to the same length
"""
width = len(str(max(n1, n2)))
return '[%s / %s]' % (str(n1).rjust(width), str(n2).rjust(width)) |
def parse_search_results(search_results):
"""
Takes the raw indicator results and makes them into one result line
:param search_results:
:return parsed_results:
"""
return ", ".join(search_results) |
def spark_points_flat(lst, key):
"""
:param lst: list of dictionary
:param key: key value want to flat
:return: string flat with ','
"""
return ','.join(str(dic[key]) for dic in lst) |
def error_email_body_content(doi, jats_content, error_messages):
"""body content of an error email"""
content = ""
if error_messages:
content += str(error_messages)
content += (
"\n\nMore details about the error may be found in the worker.log file\n\n"
)
if doi:
content += "Article DOI: %s\n\n" % doi
content += "JATS content: %s\n\n" % jats_content
return content |
def indentation_lv(s):
""" Must be tab indented """
lv = 0
for i in s:
if i == '\t':
lv += 1
else:
break
return lv |
def _get_only_relevant_data(video_data):
"""
Method to build ES document with only the relevant information
"""
return {
"kind": video_data["kind"],
"id": video_data["id"],
"published_at": video_data["snippet"]["publishedAt"],
"title": video_data["snippet"]["title"],
"description": video_data["snippet"]["description"],
"thumbnail_url": video_data["snippet"]["thumbnails"]["default"]["url"],
"channel_title": video_data["snippet"]["channelTitle"],
} |
def break_into_lines(s, max_len=40):
"""Break s into lines, only at locations where there is whitespace in
s, at most `max_len` characters long. Allow longer lines in
the returned string if there is no whitespace"""
words = s.split()
out_lines = []
cur_line = ""
for word in words:
if (len(cur_line) + 1 + len(word)) > max_len:
if len(cur_line) == 0:
out_lines.append(word)
else:
out_lines.append(cur_line)
cur_line = word
else:
if len(cur_line) > 0:
cur_line += " "
cur_line += word
if len(cur_line) > 0:
out_lines.append(cur_line)
return '\n'.join(out_lines) |
def annual_health_related_expenses(responses, derived):
""" Return the annual cost of the monthly cost of health related expense """
try:
return float(responses.get('annual_health_related_expenses', 0))
except ValueError:
return 0 |
def float_to_string(f):
"""Unitiliy float encoding which clamps floats close to 0 and 1 and uses %g instead of repr()."""
if abs(f) < 1e-6:
return "0"
elif abs(1 - f) < 1e-6:
return "1"
return "%g" % (f) |
def limit_data(data_dict: dict, limit: int = -1) -> dict:
"""apply limitation to data set
Args:
data_dict (dict): set with source and target
limit (int, optional): limit of data to process.
Defaults to -1.
Returns:
[dict]: dict with limited source and target
"""
if limit == -1:
return data_dict
new_dict = dict()
for item in ["source", "target"]:
new_dict[item] = {
"input_ids": data_dict[item]['input_ids'][:limit],
"attention_mask": data_dict[item]['attention_mask'][:limit]
}
return new_dict |
def _bold(text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text) |
def remap_value(value, old_min, old_max, new_min, new_max):
"""
Remap value taking into consideration its old range and the new one
:param value: float
:param old_min: float
:param old_max: float
:param new_min: float
:param new_max: float
:return: float
"""
return new_min + (value - old_min) * (new_max - new_min) / (old_max - old_min) |
def fix_filename(s):
"""
Clean up a file name by removing invalid characters and converting
spaces to underscores.
Parameters
----------
s : `str`
File name
Returns
-------
`str`
Cleaned file name
"""
fname = "".join(c for c in s if c.isalnum() or c in (" ._~"))
fname = fname.replace(" ", "_")
return fname |
def get_simult_lines(tests, results, test_line_num=0):
"""Returns a line from the input/output, ensuring that
we are sync'd up between the two."""
test = tests.pop(0)
result = results.pop(0)
test_line_num += 1
if test != result:
raise Exception("Lost sync between files at input line %d.\n INPUT: %s\n OUTPUT: %s" % (test_line_num, test, result))
return test |
def get_model_family_color(model_family):
"""Returns the canonical color for a model family."""
# Derived from sns.color_palette("colorblind").
canonical_colors = {
"vit": "#0173B2",
"bit": "#DE8F05",
"simclr": "#029E73",
"efficientnet-noisy-student": "#555555",
"wsl": "#CC78BC",
"clip": "#CA9161",
"vgg": "#949494",
"alexnet": "#949494",
"mixer": "#D55E00",
"guo": "#000000",
}
assert model_family in canonical_colors, f"Specify color for {model_family}."
return canonical_colors[model_family] |
def _constant_time_compare(val_a, val_b):
""" Compares the two input values in a way that prevents timing analysis.
Args:
val_a: A string.
val_b: A string.
Returns:
A boolean indicating whether or not the given strings are equal.
"""
if len(val_a) != len(val_b):
return False
values_equal = True
for char_a, char_b in zip(val_a, val_b):
if char_a != char_b:
# Do not break early here in order to keep the compare constant time.
values_equal = False
return values_equal |
def default_batch_norm_params(is_training=False):
"""Returns default batch normalization parameters for DSNs.
Args:
is_training: whether or not the model is training.
Returns:
a dictionary that maps batch norm parameter names (strings) to values.
"""
return {
# Decay for the moving averages.
'decay': 0.5,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
'is_training': is_training
} |
def objectToDictionary(objectToWalk):
"""
Args:
objectToWalk:
Returns:
"""
return dict(
(key, getattr(objectToWalk, key)) for key in dir(objectToWalk) if key not in dir(objectToWalk.__class__)) |
def get_author_citation_string(desc_MD):
"""Build author citation string up to six names"""
author_citation_string = ""
creator_list = desc_MD.get('creator', [])
names = []
for creator_item in creator_list:
creator_content = creator_item.get('content', {})
# verify creator_item has a content dictionary
if isinstance(creator_content, dict):
creator_type = creator_content.get('type', '')
if creator_type == 'per':
names.append(creator_content.get('name', '').strip())
elif not names and creator_type in ('org', ''):
# first given creator is 'org' or '', that's enough for us
author_citation_string = creator_content.get(
'name', '').strip()
break
# we only want six names at most
if len(names) > 6:
# build author_citation_string now, since we know the length
author_citation_string = '; '.join(names[:6]) + ' et al.'
names = None
break
# if there were 'per' types less than seven, build author string
if names:
if len(names) == 1:
author_citation_string = names[0]
else:
author_citation_string = '; '.join(
names[:-1]) + ' & ' + names[-1]
return author_citation_string |
def parse_organization_name(organization_name, separator='-'):
"""Splits the organization name into an acronym and the full name.
Parameters
----------
organization_name: str, required
The organization name.
separator: str, optional
The separator string that is used to split name from acronym.
Default is '-'.
Returns
-------
(name, acronym): tuple of str
The name and the acronym (if available) of the organization.
"""
name, acronym = [], []
for part in organization_name.split(separator):
part = part.strip()
if part.isupper():
acronym.append(part)
else:
name.append(part)
return '-'.join(name).strip(), '-'.join(acronym).strip() |
def InitMutator(current, value):
"""Initialize the value if it is None"""
if current is None:
return value
return current |
def set_value(data, value):
"""{{ request|select_value:'foobar'|set_value:42 }} """
data['object'][data['key']] = value
return data['object'] |
def _std_error_message(details, payload):
"""Internal helper to standardize error message. This allows for simpler splunk alerts."""
return f'zendesk_proxy action required\n{details}\nNo ticket created for payload {payload}' |
def all(p, xs):
"""``all :: (a -> Bool) -> [a] -> Bool``
Applied to a predicate and a list, all determines if all elements of the
list satisfy the predicate. For the result to be True, the list must be
finite; False, however, results from a False value for the predicate
applied to an element at a finite index of a finite or infinite list.
"""
return False not in ((p(x) for x in xs)) |
def is_number(value):
"""Checks whether a string is a floating point number.
This method checks whether a string is a floating point number by first removing any commas
then attempting to cast the string to a float. If the cast fails, then return false.
:param value: A string to evaluate.
:returns: True if the string can be cast to a floating point number."""
try:
float(value.replace(',', ''))
except ValueError:
return False
return True |
def save(pairs, path):
""" Saves thbest[0]e pairs in the output file
args:
pairs -- list<tuple<str, str, float>>
path -- str: path to the output file
returns:
str -- the path of the output file
"""
path = path if "".endswith(".txt") else path + ".txt"
with open(path, mode='w') as f:
for p in pairs:
f.write("{} {} {}\n".format(*p[0], p[1]))
print("Successfully saved to {}".format(path))
return path |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.