content stringlengths 42 6.51k |
|---|
def standard_date_format(input_date):
"""
Try to convert input date into standard format
"""
# in case input date is in datetime or pandas timestamp format
if type(input_date) != str:
input_date = "{0}-{1:02}-{2:02}".format(
input_date.year, input_date.month, input_date.day
)
# in case input date is in format YYYY/MM/DD or YYYY MM DD
input_date = input_date.replace("/", "-")
input_date = input_date.replace(" ", "-")
# in case input date is in format YYYYMMDD or DDMMYYYY
if len(input_date) == 8 and "-" not in input_date:
if input_date.startswith(("20", "19")):
input_date = input_date[:4] + "-" + input_date[4:6] + "-" + input_date[6:]
else:
input_date = input_date[4:] + "-" + input_date[2:4] + "-" + input_date[:2]
return input_date |
def merge(old, new, update):
"""
Merge `new` data into `old` data.
:param list(dict) old: old data
:param list(dict) new: new data
:return: the new data merged into the old data
:rtype: list(dict)
"""
old_dict = {entry["ID"]: entry for entry in old}
new_dict = {entry["ID"]: entry for entry in new}
final_dict = {}
for entry in old_dict:
old_entry = old_dict.get(entry, {})
new_entry = new_dict.pop(entry, {})
# Inject new values into the old entry.
# Existing values will not be overriden unless they are empty.
merged_entries = old_entry
for key, value in new_entry.items():
old_entry_value = old_entry.get(key)
if isinstance(old_entry_value, str):
old_entry_value = old_entry_value.strip()
if update:
merged_entries[key] = value
else:
if not old_entry_value:
merged_entries[key] = value
final_dict[entry] = merged_entries
final_dict.update(new_dict)
return list(final_dict.values()) |
def _fetch_ret_config(ret):
"""
Fetches 'ret_config' if available.
@see :func:`get_returner_options`
"""
if not ret:
return None
if "ret_config" not in ret:
return ""
return str(ret["ret_config"]) |
def BoringCallers(mangled, use_re_wildcards):
"""Return a list of 'boring' function names (optinally mangled)
with */? wildcards (optionally .*/.).
Boring = we drop off the bottom of stack traces below such functions.
"""
need_mangling = [
# Don't show our testing framework:
("testing::Test::Run", "_ZN7testing4Test3RunEv"),
("testing::TestInfo::Run", "_ZN7testing8TestInfo3RunEv"),
("testing::internal::Handle*ExceptionsInMethodIfSupported*",
"_ZN7testing8internal3?Handle*ExceptionsInMethodIfSupported*"),
# Depend on scheduling:
("MessageLoop::Run", "_ZN11MessageLoop3RunEv"),
("MessageLoop::RunTask", "_ZN11MessageLoop7RunTask*"),
("RunnableMethod*", "_ZN14RunnableMethod*"),
("DispatchToMethod*", "_Z*16DispatchToMethod*"),
("base::internal::Invoker*::DoInvoke*",
"_ZN4base8internal8Invoker*DoInvoke*"), # Invoker{1,2,3}
("base::internal::RunnableAdapter*::Run*",
"_ZN4base8internal15RunnableAdapter*Run*"),
]
ret = []
for pair in need_mangling:
ret.append(pair[1 if mangled else 0])
ret += [
# Also don't show the internals of libc/pthread.
"start_thread",
"main",
"BaseThreadInitThunk",
]
if use_re_wildcards:
for i in range(0, len(ret)):
ret[i] = ret[i].replace('*', '.*').replace('?', '.')
return ret |
def sort(data, delimiter=","):
"""
Sorts an array of CSV data rows stored as strings and returns it as a list.
Args:
data (list): input list of string rows or row values as a list to sort.
delimiter (str): delimiter of the CSV format to use for value splitting.
Returns:
list of sorted rows in string format.
"""
data_sorted = []
for row in data:
# Convert our string row into a list of strings if it's not already one.
values = row.split(delimiter) if type(row) is str else row
# Store our identifier.
id = values[0]
# Sort our row and add the identifier back to the start.
row_sorted = sorted(values[1:])
row_sorted.insert(0, id)
# Insert our sorted data back into list.
data_sorted.append(row_sorted)
return data_sorted |
def _extract(content: str, name: str):
"""Extracts information from document content.
:param content: document text content
:type content: str
:param name: item to extract name
:type name: str
:return: parsed number
"""
try:
splitted_content = content.split(name)
content_behind = splitted_content[1]
trimmed_number = content_behind.split('|')[1]
parsed = float(trimmed_number.replace(',', '.'))
except IndexError:
parsed = None
return parsed |
def validateUser(username, password, authProfile="", timeout=60000):
"""Tests credentials (username and password) against an
authentication profile.
Returns a boolean based upon whether or not the authentication
profile accepts the credentials. If the authentication profile name
is omitted, then the current project's default authentication
profile is used.
Args:
username (str): The username to validate.
password (str): The password for the user.
authProfile (str): The name of the authentication profile to run
against. Optional. Leaving this out will use the project's
default profile.
timeout (int): Timeout for client-to-gateway communication.
Optional. (default: 60,000ms)
Returns:
bool: False(0) if the user failed to authenticate, True(1) if
the username/password was a valid combination.
"""
print(username, password, authProfile, timeout)
return True |
def removeDuplicatesFromListOfDicts(list_of_dicts, key):
"""
remove duplicates from a list of dicts based on key,
returns the list in the format it arrived.
only checks key. Does not check anything else.
"""
ulist = []
newlist = []
dupecount = 0
for line in list_of_dicts:
if line[key] in ulist:
# don't write this enty,
dupecount +=1
else:
# add to ulist and write to file;
if line[key]: # if column is empty don't add it to the list, but write to file
ulist.append(line[key])
newlist.append(line)
#print ">> Duplicates Found:", dupecount
return newlist |
def get_vgci_warnings(tc):
"""
extract warnings from the parsed test case stderr
"""
warnings = []
if tc['stderr']:
for line in tc['stderr'].split('\n'):
# For each line
if "WARNING" in line and "vgci" in line:
# If it is a CI warning, keep it
warnings.append(line.rstrip())
return warnings |
def generate_caption(data):
"""
return
raw: raw picture in jpeg formate
caption: generated image caption in string formate
attention: attention picture in jpeg formate
"""
# results = classify(data)
# return jsonify({'res': classify(data)})
return data,'helloworld',data |
def dot_vectors(u, v):
"""Compute the dot product of two vectors.
Parameters
----------
u : tuple, list, Vector
XYZ components of the first vector.
v : tuple, list, Vector
XYZ components of the second vector.
Returns
-------
dot : float
The dot product of the two vectors.
Examples
--------
>>> dot_vectors([1.0, 0, 0], [2.0, 0, 0])
2.0
"""
return sum(a * b for a, b in zip(u, v)) |
def frontiers_from_time_to_frame_idx(seq, hop_length_seconds):
"""
Converts a sequence of frontiers in time to their values in frame indexes.
Parameters
----------
seq : list of float/times
The list of times to convert.
hop_length_seconds : float
hop_length (time between two consecutive frames), in seconds.
Returns
-------
list of integers
The sequence, as a list, in frame indexes.
"""
return [int(round(frontier/hop_length_seconds)) for frontier in seq] |
def log_stability(x):
"""
"""
if x==0:
return 10e-9
elif x==1:
return 1.0-10e-9
else:
return x |
def validate_list(option, value):
"""Validates that 'value' is a list."""
if not isinstance(value, list):
raise TypeError("%s must be a list" % (option,))
return value |
def nvidiaDevice(devices):
"""
Checks if any Nvidia device exist
"""
nvDevices = []
for device in devices:
if device.startswith('nv'):
nvDevices.append('/dev/{}'.format(device))
return nvDevices |
def is_matrix(A):
"""Tells whether an input is actually a matrix or not.
Args
----
A (compulsory)
A matrix.
Returns
-------
bool
True if the input is a matrix, False otherwise.
"""
for row in A:
if len(row) != len(A[0]):
return False
for element in row:
if type(element) not in [int, float, complex]:
return False
return True |
def _start_time_from_groupdict(groupdict):
"""Convert the argument hour/minute/seconds minute into a millisecond value.
"""
if groupdict['hours'] is None:
groupdict['hours'] = 0
return (int(groupdict['hours']) * 3600 +
int(groupdict['minutes']) * 60 +
int(groupdict['seconds'])) * 1000 |
def is_subset(remote_dic, local_dic):
"""remote_dic is or not subset of local_dic"""
return set(remote_dic).issubset(set(local_dic)) |
def equiangular_dimension_unpack(nodes, ratio):
"""Calculate the two underlying dimensions
from the total number of nodes
Args:
nodes (int): combined dimensions
ratio (float): ratio between the two dimensions
Returns:
int, int: separated dimensions
"""
dim1 = int((nodes / ratio) ** 0.5)
dim2 = int((nodes * ratio) ** 0.5)
if dim1 * dim2 != nodes: # Try to correct dim1 or dim2 if ratio is wrong
if nodes % dim1 == 0:
dim2 = nodes // dim1
if nodes % dim2 == 0:
dim1 = nodes // dim2
assert dim1 * dim2 == nodes, f'Unable to unpack nodes: {nodes}, ratio: {ratio}'
return dim1, dim2 |
def insert(serverName, itemId, value, date, quality):
"""Insert values on the OPC-HDA server if the given item ID does not
exist.
Args:
serverName (str): The name of the defined OPC-HDA server.
itemId (str): The item ID to perform the operation on.
value (object): The value to insert.
date (object): The date to insert.
quality (int): The quality to insert.
Returns:
int: The items quality form the operation.
"""
print(serverName, itemId, value, date, quality)
return 192 |
def _expand_total_pairs(total_pairs):
"""Expands total_pairs so that it has one key per direction.
e.g., 'breitbart-foxnews' and 'foxnews-breitbart' exist as keys.
If expansion is skipped, then total_pairs only has one key per pair of (unordered) elements.
e.g., 'breitbart-foxnews' exists as a key but 'foxnews-breitbart' does not.
The total per element-pair is the same value for both directions.
e.g., 'breitbart-foxnews' and 'foxnews-breitbart' are assigned the same total count.
"""
total_pairs_expanded = {}
for k, v in total_pairs.items():
total_pairs_expanded[(k[0], k[1])] = v
total_pairs_expanded[(k[1], k[0])] = v
total_pairs_expanded = dict(sorted(total_pairs_expanded.items()))
return total_pairs_expanded |
def list_dropdownScatter(dic_df):
"""
"""
l_choice = []
for key_cont, value_cont in dic_df['var_Y'].items():
for key_cat, value_cat in dic_df['var_X'].items():
l_choice.append(value_cont['name'] + '-' + value_cat['name'])
l_choice = ['-'] + l_choice
return l_choice |
def len_column(table):
"""
Add length column containing the length of the original entry in the seq column.
Insert a number column with numbered entries for each row.
"""
for pos, i in enumerate(table):
i.insert(0, pos)
i.append(len(i[1]))
return table |
def calculate_change(total: float) -> float:
"""Determine how much change is needed."""
return round(1 - (total - int(total)), 2) |
def _get_location_end(element: str) -> str:
"""Translates element suffix to end of location code"""
if "_Sat" in element:
return "1"
if "_Dist" in element:
return "D"
if "_SQ" in element:
return "Q"
if "_SV" in element:
return "V"
return "0" |
def is_args(x):
""" Is x a traditional iterable? """
return type(x) in (tuple, list, set) |
def calculate_temperature_rise_power_loss_surface(
power_operating: float,
area: float,
) -> float:
"""Calculate the temperature rise based on the power loss and surface area.
:param power_operating: the power loss in W.
:param area: the radiating surface area of the case in sq. inches.
:return: _temperature_rise; the calculated temperature rise in C.
:rtype: float
:raise: ZeroDivisionError if passed an area=0.0.
"""
return 125.0 * power_operating / area |
def some_function(var_one, var_two,
var_three):
"""This is a function that do things"""
if var_one > var_two:
if var_two*var_three > var_one:
return "blab" #this happens
else:
return "blob"
else:
return "fish" |
def get_name(name: str, incl_ext: bool=False) -> str:
""" Get the name of a directory of file, specified in path
Path can either be relative or absolute
:param name: the name of a directory or a file, specified in path
:param incl_ext: flag to include the extension if it is a file
:return: the name of the directory or the file, excluding path
"""
import os
ext_delim = "."
# Filter the directory delimiter and get the last element
# It is assumed here that if it is a directory, it will not end with "/"
name = os.path.split(name)[-1]
if ext_delim in name:
# Then it is a file with an extension
if incl_ext:
return name
else:
return name.split(ext_delim)[0]
else:
# Then it is a directory
return name |
def takemod(nvols):
"""
Determine if the input is odd or even values and
return a of 0 and 1 depending on the truth value
Parameters
----------
nvols : int
Returns
-------
decisions : int
"""
mod = int(int(nvols) % 2)
if mod == 1:
return 0
else:
return 1 |
def names2dnsrepr(x):
"""
Take as input a list of DNS names or a single DNS name
and encode it in DNS format (with possible compression)
If a string that is already a DNS name in DNS format
is passed, it is returned unmodified. Result is a string.
!!! At the moment, compression is not implemented !!!
"""
if type(x) is str:
if x and x[-1] == '\x00': # stupid heuristic
return x
x = [x]
res = []
for n in x:
termin = "\x00"
if n.count('.') == 0: # single-component gets one more
termin += '\x00'
n = "".join(map(lambda y: chr(len(y))+y, n.split("."))) + termin
res.append(n)
return "".join(res) |
def _policy_dict_at_state(callable_policy, state):
"""Turns a policy function into a dictionary at a specific state.
Args:
callable_policy: A function from `state` -> lis of (action, prob),
state: the specific state to extract the policy from.
Returns:
A dictionary of action -> prob at this state.
"""
infostate_policy_list = callable_policy(state)
infostate_policy = {}
for ap in infostate_policy_list:
infostate_policy[ap[0]] = ap[1]
return infostate_policy |
def _get_reason(cluster_or_step):
"""Get state change reason message."""
# StateChangeReason is {} before the first state change
return cluster_or_step['Status']['StateChangeReason'].get('Message', '') |
def oracle_add_escape(_value, _escape_character):
"""Add the oracle escape ke_delimiteryword if nessesary."""
if _escape_character not in ('', None):
return _value + ' ESCAPE \'' + _escape_character + '\''
else:
return _value |
def colorCycle(color,polyNumber):
""" Adjusts contents of self.color as needed for plotting methods."""
if len(color) < polyNumber:
for i in range(polyNumber - len(color)):
color.append(color[i])
else:
for i in range(len(color) - polyNumber):
del color[-(i+1)]
return color |
def _float_or_none(x, digits=3):
""" Helper function to format a variable that can be a float or None"""
if x is None:
return str(x)
fmtstr = "{0:.{digits}g}".format(x, digits=digits)
return fmtstr.format(x) |
def replace_value(template, pattern, value, count=1):
"""Set the given pattern to the desired value in the template,
after making sure that the pattern is found exactly once."""
assert isinstance(template, str)
assert template.count(pattern) == count
return template.replace(pattern, value) |
def kkt_get_nthreads(d):
"""Return sorted list of #threads used in the data. Return also the
corresponding exespace tuples."""
s = set()
for es in d.keys():
s.add((es[1]*es[2], es))
s = list(s)
s.sort()
return [i[0] for i in s], [i[1] for i in s] |
def split(alist, split):
""" split a list into 'split' items """
newlist = []
accum = []
for i, element in enumerate(alist):
accum.append(element)
if (len(accum) == split) or (i == len(alist)-1):
newlist.append(accum)
accum = []
return newlist |
def solve_for_scale(a_scale, a_n_bits, b_scale, b_n_bits, y_n_bits):
""" Finds the scale factor for the following case:
y = a * b
We know the scale factor and the number of bits of a, as well as b. We also know the number of
bits for y. This function computes the resulting scale factor for y
Parameters:
- a_scale: float
- a_n_bits: int
- b_scale: float
- b_n_bits: int
- y_n_bits: int
Returns: float, resulting scale factor for y, can be used for dequantization
"""
a_range = (1 << (a_n_bits - 1))
b_range = (1 << (b_n_bits - 1))
y_range = (1 << (y_n_bits - 1))
return (y_range * a_scale * b_scale) / (a_range * b_range) |
def translate(x: str, d: dict) -> str:
"""
Convert english digits to persian digits.
:param x: string to translate
:param d: dict for using on translate
:return: translated string
"""
if not isinstance(x, str):
raise TypeError("x is not string")
if not isinstance(d, dict):
raise TypeError("d is not dict")
trans = str.maketrans(d)
return x.translate(trans) |
def absoadd(n, m):
"""(num, num) -> num
Gives the absolute value of the difference of two numbers.
>>> absoadd(5, 6)
11
"""
first = abs(n - m)
return (first) |
def pairwiseCombinations(initialList):
"""
Creates the minimum set of combinations that will make available
every possible pair available.
"""
r = [len(x) for x in initialList]
m = min(r)
M = max(r)
n = len(initialList)
R = set()
for i in range(m):
t = [initialList[x][i] for x in range(n)]
R.add(tuple(t))
for i in range(m,M):
t = [initialList[x][min(r[x]-1,i)] for x in range(n)]
R.add(tuple(t))
for i in range(m):
for j in range(n):
for k in range(i+1,r[j]):
prejth = [initialList[x][i] for x in range(j)]
jth = initialList[j][k]
postjth = [initialList[x][i] for x in range(j+1, n)]
t = prejth + [jth] + postjth
R.add(tuple(t))
for i in range(m,M):
for j in range(n):
if r[i] < i:
continue
for k in range(i+1,r[j]):
prejth = [initialList[x][min(r[x]-1,i)] for x in range(j)]
jth = initialList[j][k]
postjth = [initialList[x][min(r[x]-1,i)] for x in range(j+1, n)]
t = prejth + [jth] + postjth
R.add(tuple(t))
return R |
def parse_executable_str(exec_str):
"""
Split the string describing the executable into a path part and an
options part.
"""
first_space = exec_str.find(" ")
if first_space == -1:
first_space = len(exec_str)
return exec_str[:first_space], exec_str[first_space:] |
def fetch_json_by_path(json_object, path):
"""Recursively fetch a string value from a JSON object given an xpath."""
first_id = path.find("#")
second_id = path.find("#", first_id + 1)
first_key = path.find("/")
second_key = path.find("/", first_key + 1)
indices = [x for x in [first_id, second_id, first_key, second_key] if x > 0]
indices.sort()
indices.append(len(path) + 1)
if isinstance(json_object, str):
return str(json_object)
elif path[0] == "#" and isinstance(json_object, list):
child_id = path[1:indices[0]]
path_remainder = path[indices[0]:]
for sub_object in json_object:
try:
if sub_object["id"] == child_id:
return fetch_json_by_path(sub_object, path_remainder)
except KeyError:
pass
elif path[0] == "/" and isinstance(json_object, dict):
child_key = path[1:indices[0]]
path_remainder = path[indices[0]:]
try:
sub_object = json_object[child_key]
return fetch_json_by_path(sub_object, path_remainder)
except KeyError:
pass
return None |
def jim_orders(orders):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/jim-and-the-orders/problem
Jim's Burgers has a line of hungry customers. Orders vary in the time it takes to prepare them. Determine the order
the customers receive their orders. Start by numbering each of the customers from 1 to n, front of the line to the
back. You will then be given an order number and a preparation time for each customer.
The time of delivery is calculated as the sum of the order number and the preparation time. If two orders are
delivered at the same time, assume they are delivered in ascending customer number order.
Solve:
We store the orders in a dictionary with the key being the total prep time for each customer. The value is then
a list containing the customer number, so if we have multiple customers with the same prep time, we just append
to the list. We then iterate through the dictionary in increasing prep time order and extend those customers
to a final list containing the order in which customers will receive their burgers
Args:
orders (list): list of tuples containing the order number and prep time for customer[i]
Returns:
list: list containing the order in which customers will receive their burgers
"""
times = {}
for i, order in enumerate(orders):
time = sum(order)
if time in times:
times[time].append(i+1)
else:
times[time] = [i+1]
customers = []
for i in sorted(times):
customers.extend(times[i])
return customers |
def get_clean_tree_str(tree_str):
"""Remove all blanks and return a very clean tree string.
>>> get_clean_tree_str('((a ,((b, c), (d, e))), (f, g));'')
'((a,((b,c),(d,e))),(f,g));'
"""
return tree_str.replace(' ', '').replace('\n', '').replace('\t', '') |
def module_level_function(param1, param2=None, *args, **kwargs):
"""This is an example of a module level function.
Function parameters should be documented in the ``Args`` section. The name
of each parameter is required. The type and description of each parameter
is optional, but should be included if not obvious.
If \*args or \*\*kwargs are accepted,
they should be listed as ``*args`` and ``**kwargs``.
The format for a parameter is::
name (type): description
The description may span multiple lines. Following
lines should be indented. The "(type)" is optional.
Multiple paragraphs are supported in parameter
descriptions.
Args:
param1 (int): The first parameter.
param2 (:obj:`str`, optional): The second parameter. Defaults to None.
Second line of description should be indented.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
bool: True if successful, False otherwise.
The return type is optional and may be specified at the beginning of
the ``Returns`` section followed by a colon.
The ``Returns`` section may span multiple lines and paragraphs.
Following lines should be indented to match the first line.
The ``Returns`` section supports any reStructuredText formatting,
including literal blocks::
{
'param1': param1,
'param2': param2
}
Raises:
AttributeError: The ``Raises`` section is a list of all exceptions
that are relevant to the interface.
ValueError: If `param2` is equal to `param1`.
"""
if param1 == param2:
raise ValueError('param1 may not be equal to param2')
return True |
def determine_issues(project):
"""
Get the list of issues of a project.
:rtype: list
"""
issues = project["Issue"]
if not isinstance(issues, list):
return [issues]
return issues |
def token_to_dirname(token: str) -> str:
"""Cleans up a given token from quotes, strip from spaces, etc.
The returned string is a valid directory name on the current platform.
Args:
token(str): 'dirty' folder name obained from the user through CLI
Returns:
valid directory name
"""
foldername = token
foldername = foldername.replace('"', '')
foldername = foldername.strip(' ')
return foldername |
def is_pangram(sentence):
"""Determines if a sentence is a pangram."""
from string import ascii_lowercase
sentence = sentence.lower()
for letter in ascii_lowercase:
if letter not in sentence:
return False
return True |
def _sort_pages(pages):
"""Sort given pages by date"""
return sorted(pages, key=lambda p: p['date'], reverse=True) |
def fileconcat(f):
"""Return concatination of all numbers in file f"""
return int("".join(l.strip() for l in f)) |
def str_format(l):
"""
Take a list l and returns a string
of those elements converted to strings.
"""
if len(l) < 1:
return ""
new_string = ""
for e in l:
new_string += str(e)
return new_string |
def create_serialization_name(cls, serialization_name=None):
""" Helper function for creating a serialization name """
_serialization_name = serialization_name
if serialization_name:
if callable(serialization_name):
_serialization_name = serialization_name(cls)
return _serialization_name |
def format_sqlite_value(in_value):
""" will add quotes around it if its a string or convert the string to NULL if it is a python None value
:param
in_value: an object that is to be returned with or without quotes. NULL if None
"""
if type(in_value) in (int, float, bool):
return str(in_value)
elif in_value is None:
return "NULL"
else:
# escape strings with single-quotes
in_value = in_value.replace("'", "''")
return "'{}'".format(in_value) |
def lowercase(string: str):
"""Safely recast a string to lowercase"""
try:
return string.lower()
except AttributeError:
return string |
def parent(name: str, *, level: int = 1) -> str:
"""Get parent name with level."""
return name.rsplit('.', maxsplit=level)[0] |
def _analyse_gdal_output(output):
"""
Analyse the output from gpt to find if it executes successfully.
Parameters
----------
output : str
Ouptut from gpt.
Returns
-------
flag : boolean
False if "Error" is found and True if not.
"""
# return false if "Error" is found.
if 'error' in output.lower():
return False
# return true if "100%" is found.
elif '100 - done' in output.lower():
return True
# otherwise return false.
else:
return False |
def validateTransferBody(data):
"""Validates incoming json body
Arguments:
data {[dict]} -- [json body]
Returns:
[bool] -- [if validation passes]
[dict] -- [if validation fails]
"""
keys = [*data]
allowedKeys = ["routingNumber", "accountNumber", "amount"]
if len(keys) != 3:
return {"error": "three keys are required in transfer body"}
for key in keys:
if key not in allowedKeys:
return {
"error": "only the following keys are allowed in transfer body:"
+ ",".join(allowedKeys)
}
return True |
def no_of_misplaced_tiles(node):
"""Function to get the number of misplaced tiles for a
particular configuration
Parameters
----------
node : [list]
[list to check for the heuristics]
Return
------
[int]
[returns the heuristic distance for a particular node]
"""
h_score = 0
goal_state = [0, 1, 2, 3, 4, 5, 6, 7, 8]
for idx, i in enumerate(goal_state):
if goal_state[idx] != node[idx]:
h_score += 1
return h_score |
def usn_in_range(usn, range):
"""Check if the usn is in one of the range provided.
To do so, the value is checked to be between the lower bound and
higher bound of a range
:param usn: A integer value corresponding to the usn that we want to update
:param range: A list of integer representing ranges, lower bounds are in
the even indices, higher in odd indices
:return: True if the usn is in one of the range, False otherwise
"""
idx = 0
cont = True
ok = False
while cont:
if idx == len(range):
cont = False
continue
if usn < int(range[idx]):
if idx %2 == 1:
ok = True
cont = False
if usn == int(range[idx]):
cont = False
ok = True
idx = idx + 1
return ok |
def reduceDimensions(matrix):
"""
Decrease any list/matrix dimensions by one.
Parameters
----------
matrix: Multidimensional Array
Returns
-------
list: The matrix after reducing it's dimensions.
"""
# Doesn't accept vectors/(1-dimensional matrices)
# If matrix is a vector return it without changes
if (type(matrix[0]) is not list): return matrix
new_matrix = []
for item in matrix:
new_matrix += item
return new_matrix |
def calc_bpe_bulk_electrolyte_resistance(characteristic_length, sigma):
"""
The area specific charge transfer resistance through the bulk electrolyte
units: Ohm*m2
Notes:
Adjari, 2006 - "(area specific) bulk electrolyte resistance"
Squires, 2010 - does not explicitly define this but uses the same equation
Inputs:
char_length: (m) length of BPE
sigma (S/m) conductivity of electrolyte/buffer
Output:
Resistance: Ohm*m^2
"""
R_0 = characteristic_length / sigma
return R_0 |
def resize(matrix, row, col=0):
"""resize the matrix to requried size"""
current_size = len(matrix)
new_size = (row if row > col else col) + 1
if new_size > current_size:
new_matrix = [[None for x in range(new_size)] for y in range(new_size)]
for i in range(len(matrix)):
for j in range(len(matrix[i])):
new_matrix[i][j] = matrix[i][j]
return new_matrix
return matrix |
def population_nucleotide_diversity(H, p, D):
"""
# ========================================================================
POPULATION NUCLEOTIDE DIVERSITY
PURPOSE
-------
Calculates the population nucleotide diversity.
INPUT
-----
[INT] [H]
The number of haplotypes.
[FLOAT] [p]
A list of (relative) frequencies.
[2D ARRAY] [D]
A distance matrix of haplotypes pair-wise genetic distances
(fraction of nt differences).
RETURN
------
[FLOAT]
The population nucleotide diversity.
# ========================================================================
"""
diversity = 0
for i in range(0, H):
for j in range(0, H):
diversity += p[i] * D[i][j] * p[j]
return diversity |
def mml_namelist(namelist):
"""padding to be same length"""
N = max([len(s) for s in namelist])
return ["'%s'" % s.ljust(N, ' ') for s in namelist] |
def gf_rshift(f, n, K):
"""Efficiently divide `f` by `x**n`. """
if not n:
return f, []
else:
return f[:-n], f[-n:] |
def countBins(sortedData, binBoundaries):
"""
This method counts the number of data items in the sorted_data
Returns an array with the number. ret[0] is the number of data
points <= binBoundaries[0], ret[len(binBoundaries)] is the number
of points > binBoundaries[len(binBoundaries)-1]
@ In, sortedData, list or np.array,the data to be analyzed
@ In, binBoundaries, list or np.array, the bin boundaries
@ Out, ret, list, the list containing the number of bins
"""
binIndex = 0
sortedIndex = 0
ret = [0]*(len(binBoundaries)+1)
while sortedIndex < len(sortedData):
while not binIndex >= len(binBoundaries) and \
sortedData[sortedIndex] > binBoundaries[binIndex]:
binIndex += 1
ret[binIndex] += 1
sortedIndex += 1
return ret |
def convertRange(value, oldRange, newRange, rounded=True):
"""Compute the value within the new provided range"""
# Value is tested against new range, and divided against old range
value = ((value - oldRange[0]) * (newRange[1] - newRange[0])
) / (oldRange[1] - oldRange[0]) + newRange[0]
# Result is a valid integer, return is casted as valid integer
if (not rounded):
# send back a valid integer
return round(value * 10) / 10
return round(value) |
def slice_to_interval(slice_):
"""
Convert slice object to (start, stop, direction).
"""
start = slice_.start or 0 # Handles case where slice_.start is None.
step = slice_.step or 1 # Handles case where slice_.step is None.
if step == 1:
if start < 0:
raise ValueError(
"Tree sequence slices with start < 0 must have step=-1. "
f"Use for example [{slice_.start}:{slice_.stop}:-1]"
"(This is a limitation of slicing on Tree sequences "
"that does not apply to Python sequences in general.)"
)
if (slice_.stop is not None) and (slice_.stop < start):
raise ValueError(
"Tree sequence slices with step=1 must have stop >= start. "
"(This is a limitation of slicing on Tree sequences "
"that does not apply to Python sequences in general.)"
)
start_ = start
stop_ = slice_.stop
direction = 1
elif step == -1:
if start >= 0:
raise ValueError(
"Tree sequence slices with start >= 0 must have step=1. "
"(This is a limitation of slicing on Tree sequences "
"that does not apply to Python sequences in general.)"
)
if slice_.stop is not None:
if slice_.stop > start:
raise ValueError(
"Tree sequence slices with step=-1 must have stop <= start."
)
stop_ = 1 - slice_.stop
else:
stop_ = slice_.stop
start_ = 1 - start
direction = -1
else:
raise ValueError(
"Only step of 1 or -1 is supported in a Tree sequence slice. "
f"Step {slice_.step} is disallowed."
)
assert start_ >= 0
assert (stop_ is None) or (stop_ >= start_)
return start_, stop_, direction |
def TDict(val):
"""Checks if the given value is a dictionary.
"""
return isinstance(val, dict) |
def linearProbe(hk, size, i):
"""Default linear probe using c= 101."""
return (hk+101) % size |
def task2(a):
"""
Function that calculates square value of argument
Input: one integer
Output: one integer
"""
return a**2
#return iloczyn |
def _geo_sum(r: int, n: int):
"""Calculate the geometric sum for ratio, r, and number of terms, n."""
return (1 - r ** n) / (1 - r) |
def get_orders_list(orders_dict):
"""Returns orders list from orders dict"""
return orders_dict.get('orders') |
def face_sort(faces_in):
"""
sorts each face so that lowest index is first but retaining face order
then sorts all the faces
used to compare equality of two face lists
"""
faces_out = []
for face in faces_in:
argmin = face.index(min(face))
face_out = []
for v_ind, v1 in enumerate(face):
face_out.insert(0, face[argmin - v_ind - 1])
faces_out.append(tuple(face_out))
return sorted(faces_out) |
def is_magnet(magnet: str):
"""Check if provided string is a magnet.
>>> is_magnet('13600b294191fc92924bb3ce4b969c1e7e2bab8f4c93c3fc6d0a51733df3c060')
True
>>> is_magnet('123')
False
>>> is_magnet(b'123')
False
>>> is_magnet('x3600b294191fc92924bb3ce4b969c1e7e2bab8f4c93c3fc6d0a51733df3c060')
False
"""
if not isinstance(magnet, str):
return False
if len(magnet) != 64:
return False
try:
bytes.fromhex(magnet)
except ValueError:
return False
return True |
def hamming_distance(dna1, dna2):
"""
Finds the Hamming Distance between two strings of DNA.
:param dna1: a string of DNA
:param dna2: a string of DNA
:return: the computed Hamming Distance
"""
distance = 0
for i in range(len(dna1)):
if dna1[i] != dna2[i]:
distance += 1
return distance |
def is_named_tuple(obj):
"""
Check if the object is a named tuple.
Parameters
----------
obj : The object to check.
Returns
-------
is_named_tuple : bool
Whether `obj` is a named tuple.
Examples
--------
>>> Point = namedtuple("Point", ["x", "y"])
>>> p = Point(1, 2)
>>>
>>> is_named_tuple(p)
True
>>> is_named_tuple((1, 2))
False
"""
return isinstance(obj, tuple) and hasattr(obj, '_fields') |
def coeff(t, secret):
"""
Randomly generate a list of coefficients for a polynomial with
degree of `t` - 1, whose constant is `secret`.
For example with a 3rd degree coefficient like this:
3x^3 + 4x^2 + 18x + 554
554 is the secret, and the polynomial degree + 1 is
how many points are needed to recover this secret.
(in this case it's 4 points).
"""
coeff = [i for i in range(t - 1)]
coeff.append(secret)
return coeff |
def _partition(sequence, size, count):
"""
Partition sequence into C{count} subsequences of
length C{size}, and a remainder.
Return C{(partitions, remainder)}, where C{partitions} is a sequence of
C{count} subsequences of cardinality C{size}, and
C{apply(append, partitions) + remainder == sequence}.
"""
partitions = []
for index in range(0, size * count, size):
partitions.append(sequence[index:index + size])
return (partitions, sequence[size * count:]) |
def rsa_crt_dmq1(private_exponent: int, q: int) -> int:
"""
Compute the CRT private_exponent % (q - 1) value from the RSA
private_exponent (d) and q.
"""
return private_exponent % (q - 1) |
def set_positional_arguments(positional_parameters, positional_arguments, command_line_options):
""" Validate and set positional command line arguments. """
if len(positional_parameters) < len(positional_arguments):
return (True, 'Too many parameters')
if len(positional_parameters) > len(positional_arguments):
return (True, 'Not enough required parameters')
for i, parm in enumerate(positional_parameters):
command_line_options[parm] = positional_arguments[i]
return None |
def _rmcmd(s, cmd, left='', right=''):
"""
Remove the LaTeX command ``cmd`` from the string ``s``. This
function is used by ``detex``.
INPUT:
- ``s`` - (string) string from which to remove the command
- ``cmd`` - (string) command to be removed. This should be a
command which takes a single argument, like 'emph' or 'url'; the
command is removed, but its argument is not.
- ``left``, ``right`` - (string, optional, default '') add these
strings at the left and right ends of the command. See the
examples.
EXAMPLES::
sage: from sage.misc.sagedoc import _rmcmd
sage: _rmcmd('Check out \\url{http://www.sagemath.org}.', 'url')
'Check out http://www.sagemath.org.'
sage: _rmcmd('Text in \\emph{italics} looks like this.', 'emph', '*', '*')
'Text in *italics* looks like this.'
sage: _rmcmd('This is a \\very{silly} example.', 'very', right='!?')
'This is a silly!? example.'
"""
c = '\\%s{' % cmd
while True:
i = s.find(c)
if i == -1:
return s
nesting = 1
j = i+len(c)+1
while j < len(s) and nesting > 0:
if s[j] == '{':
nesting += 1
elif s[j] == '}':
nesting -= 1
j += 1
j -= 1 # j is position of closing '}'
if j < len(s):
s = s[:i] + left + s[i+len(c):j] + right + s[j+1:]
else:
return s |
def ensure_input(input, encoding="utf-8"):
"""Ensure the type of input and the path of outfile."""
if isinstance(input, (list, tuple)):
outfile = input[1]
input = "\n".join(input).encode(encoding)
else:
outfile = input.split("\n")[1]
input = input.encode(encoding)
return input, outfile |
def remove_indices_from_dict(obj):
"""
Removes indices from a obj dict.
"""
if not isinstance(obj, dict):
raise ValueError(u"Expecting a dict, found: {}".format(type(obj)))
result = {}
for key, val in obj.items():
bracket_index = key.find('[')
key = key[:bracket_index] if bracket_index > -1 else key
val = remove_indices_from_dict(val) if isinstance(val, dict) else val
if isinstance(val, list):
_val = []
for row in val:
if isinstance(row, dict):
row = remove_indices_from_dict(row)
_val.append(row)
val = _val
if key in result:
result[key].extend(val)
else:
result[key] = val
return result |
def expo_ma(data, period):
""" input a list of data points and return EMA with period points averaged
:param data: the list of data points
:type data: list
:param period: averaged over past period data points
:param period: int
:return a list of EMA with period points averaged
"""
if period > len(data):
# print("Error in expo_ma function: the length of data points is too short!")
return []
ret = [None] * (len(data)-period+1)
# initialize the first element in the EMA series to the SMA of first period data points
ret[0] = sum(data[:period])/period
# set smoothing factor to be 2/(period+1)
factor = float(2/(period+1))
# calculate the rest of series by the formula: factor * P(t) + (1-factor) * EXP(t-1)
for i in range(1, len(data)-period+1):
ret[i] = factor*data[period+i-1] + (1-factor)*ret[i-1]
return list(ret) |
def _job_contains_cert_data(data):
"""Boolean checks to ensure any received job message contains return cert data"""
if data is None:
return False
if 'cert' in data['data']:
return True
else:
return False |
def getKoreanColors(i, n):
"""
Make a palette in the korean style
"""
n = float(i)/float(n)*3.
R = (n<=1.)+ (2.-n)*(n>1.)*(n<=2.)
G = n*(n<=1.)+ (n>1.)*(n<=2.)+(3.-n)*(n>2.)
B = (n-1.)*(n>=1.)*(n<2.)+(n>=2.)
R, G, B = [int(i*255) for i in [R,G,B]]
return R,G,B |
def ensure_alt_ids_are_only_in_one_nest(nest_spec, list_elements):
"""
Ensures that the alternative id's in `nest_spec` are only associated with
a single nest. Raises a helpful ValueError if they are not.
Parameters
----------
nest_spec : OrderedDict, or None, optional.
Keys are strings that define the name of the nests. Values are lists of
alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Default == None.
list_elements : list of ints.
Each element should correspond to one of the alternatives identified as
belonging to a nest.
Returns
-------
None.
"""
try:
assert len(set(list_elements)) == len(list_elements)
except AssertionError:
msg = "Each alternative id should only be in a single nest."
raise ValueError(msg)
return None |
def csv_driver_args(distribution):
"""
Construct driver args for a GeoJSON distribution.
"""
url = distribution.get("downloadURL") or distribution.get("accessURL")
if not url:
raise KeyError(f"A download URL was not found for {str(distribution)}")
return {"urlpath": url, "csv_kwargs": {"blocksize": None, "sample": False}} |
def pxe_mac(mac):
"""
Create a MAC address file for pxe builds.
Add O1 to the beginning, replace colons with hyphens and downcase
"""
return "01-" + mac.replace(":", "-").lower() |
def dict_product(a, b):
"""Pointwise-multiply the values in two dicts with identical sets of
keys.
"""
assert set(a.keys()) == set(b.keys())
return { k: v * b[k] for k, v in a.items() } |
def ceil_shift(n, b):
"""Return ceil(n / 2**b) without performing any floating-point or division operations.
This is done by right-shifting n by b bits and incrementing the result by 1
if any '1' bits were shifted out.
"""
if not isinstance(n, int) or not isinstance(b, int):
raise TypeError("unsupported operand type(s): %r and %r" % (type(n).__name__, type(b).__name__))
assert n >= 0 and b >= 0 # I haven't tested or even thought about negative values
mask = (1 << b) - 1
if n & mask:
return (n >> b) + 1
else:
return n >> b |
def get_wikidata_id(wikidata_uri):
"""
Returns Wikidata ID (e.g. "Q92212") given Wikidata entity URI, or None.
"""
wikidata_base_uri = "http://www.wikidata.org/entity/"
if wikidata_uri.startswith(wikidata_base_uri):
wikidata_id = wikidata_uri[len(wikidata_base_uri):]
else:
wikidata_id = None
return wikidata_id |
def GetEnvCall(is_constructor, is_static, return_type):
"""Maps the types availabe via env->Call__Method."""
if is_constructor:
return 'NewObject'
env_call_map = {'boolean': 'Boolean',
'byte': 'Byte',
'char': 'Char',
'short': 'Short',
'int': 'Int',
'long': 'Long',
'float': 'Float',
'void': 'Void',
'double': 'Double',
'Object': 'Object',
}
call = env_call_map.get(return_type, 'Object')
if is_static:
call = 'Static' + call
return 'Call' + call + 'Method' |
def stiff_b(v1v1, v0v1, v0v0, rold):
"""called from stiff_a().
Decide if the iteration has degenerated because of a strongly dominant
real eigenvalue. Have just computed the latest iterate. v1v1 is its dot
product with itself, v0v1 is the dot product of the previous iterate with
the current one, and v0v0 is the dot product of the previous iterate with
itself. rold is a previous Rayleigh quotient approximating a dominant real
eigenvalue. It must be computed directly the first time the subroutine is
called. It is updated each call to stiff_b, hence is available for
subsequent calls.
If there is a strongly dominant real eigenvalue, rootre is set True,
root1[:] returns the eigenvalue, rho returns the magnitude of the
eigenvalue, and root2[:] is set to zero.
Original source: RKSuite.f, https://www.netlib.org/ode/rksuite/
"""
# real and imag parts of roots are returned in a list
root1 = [0.0, 0.0]
root2 = [0.0, 0.0]
r = v0v1 / v0v0
rho = abs(r)
det = v0v0 * v1v1 - v0v1**2
res = abs(det / v0v0)
rootre = det == 0.0 or (res <= 1e-6 * v1v1 and
abs(r - rold) <= 0.001 * rho)
if rootre:
root1[0] = r
rold = r
return rold, rho, root1, root2, rootre |
def fun(s):
"""Determine if the passed in email address is valid based on the following rules:
It must have the username@websitename.extension format type.
The username can only contain letters, digits, dashes and underscores [a-z], [A-Z], [0-9], [_-].
The website name can only have letters and digits [a-z][A-Z][0-9]
The extension can only contain letters [a-z][A-Z].
The maximum length of the extension is 3.
Args:
s (str): Email address to check
Returns:
(bool): Whether email is valid or not
"""
if s.count("@") == 1:
if s.count(".") == 1:
user, domain = s.split("@")
website, extension = domain.split(".")
if user.replace("-", "").replace("_", "").isalnum():
if website.isalnum():
if extension.isalnum():
if len(extension) <= 3:
return True
return False |
def open_files(filenames):
"""open files and return contents."""
return list(map(lambda file: open(file, 'r').read(), filenames)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.