content stringlengths 42 6.51k |
|---|
def EmailToAccountResourceName(email):
"""Turns an email into a service account resource name."""
return 'projects/-/serviceAccounts/{0}'.format(email) |
def count_sort(arr, max_element):
""" count sort, an O(n) sorting algorithm that just records the frequency
of each element in an array
:type arr: list
:type max_element: int
:rtype: list
"""
# initializing variables
freq = [0 for _ in range(max_element + 1)]
ret = list()
# count frequencies of each element
for element in arr:
freq[element] += 1
# create the return array from a list
for i in range(len(freq)):
# append the element to the return array as many times as it appears
# in the original array
for _ in range(freq[i]):
ret.append(i)
return ret |
def _fullname(obj):
"""Get the fully qualified class name of an object."""
if isinstance(obj, str):
return obj
cls = obj.__class__
module = cls.__module__
if module == "builtins": # no need for builtin prefix
return cls.__qualname__
return module + "." + cls.__qualname__ |
def binary_sort(x, start, end, sorted_list):
"""Performs Binary Search
Get the middle index (start+end/2)
Take the middle element of the sorted list and compare it to x
If x is the middle element, return the index
If start == end then return -1
If x is less than this, perform binary sort on the left list
Pass the new start and end indices
If x is more than this, perform binary sort on the right list
Pass the new start and end indices
Args:
x: The search element
start: The start index of the sub-array of sorted_list
end: The end index of the sub-array of sorted_list
sorted_list: The sorted list that we will be searching
Returns:
An int corresponding to the index of x in sorted_list
Otherwise returns -1 if x does not exist in sorted_list
"""
middle = int((start + end) / 2)
if start == end:
if x == sorted_list[middle]:
return middle+1
else:
return -1
else:
if x == sorted_list[middle]:
return middle+1
if x < sorted_list[middle]:
return binary_sort(x, 0, middle-1, sorted_list)
if x > sorted_list[middle]:
return binary_sort(x, middle+1, end, sorted_list) |
def display_onnx(model_onnx, max_length=1000):
"""
Returns a shortened string of the model.
@param model_onnx onnx model
@param max_length maximal string length
@return string
"""
res = str(model_onnx)
if max_length is None or len(res) <= max_length:
return res
begin = res[:max_length // 2]
end = res[-max_length // 2:]
return "\n".join([begin, '[...]', end]) |
def convert_raid_state_to_int(state):
"""
:type state: str
"""
state = state.lower()
if state == "optimal":
return 0
elif state == "degraded":
return 5
else:
return 100 |
def convert_input_to_standrad_format(inp: str):
"""_summary_
Removes spaces and dashes from argument inp. Then it returns inp.
Format of the argument inp must be validated with the function validate_format() before inserting it into this function.
Args:
inp (str): String to convert to standrad format.
Returns:
str: The altered version of the argument inp
"""
if "-" == inp[6]:
return inp.replace("-", "")
elif "-" == inp[7] and "-" == inp[4] and " " == inp[10]:
return inp.replace("-", "").replace(" ", "")[2:]
elif " " == inp[2] and " " == inp[5] and " " == inp[8]:
return inp.replace(" ", "")
else:
return inp |
def size_grow_function(curr_size, model_size):
"""
Compute the new size of the subproblem from the previous.
:param curr_size: the current size of the model.
:type curr_size: int
:param model_size: the model size, that is the upper bound of curr_size.
:type model_size: int
:return: the new current size
:rtype: int
"""
ratio = curr_size / model_size
ratio = ratio ** (4 / 5)
curr_size = int(model_size * ratio)
return curr_size |
def pretty2int(string):
"""Parse a pretty-printed version of an int into an int."""
return int(string.replace(",", "")) |
def create_reduce_job(results, wuid=None, job_num=None):
"""Test function for reduce job json creation."""
if wuid is None or job_num is None:
raise RuntimeError("Need to specify workunit id and job num.")
args = [result['payload_id'] for result in results]
return {
"job_type": "job:parpython_reduce_job",
"job_queue": "factotum-job_worker-large",
"payload": {
# sciflo tracking info
"_sciflo_wuid": wuid,
"_sciflo_job_num": job_num,
"_command": "/usr/bin/echo {}".format(' '.join(args)),
# job params
"results": ' '.join(args),
}
} |
def getAccuracy(l1, l2):
"""
Returns accuracy as a percentage between two lists, L1 and L2, of the same length
"""
assert(len(l1) == len(l2))
return sum([1 for i in range(0, len(l1)) if l1[i] == l2[i]]) / float(len(l1)) |
def calculate_desired_noise_rms(clean_rms, snr):
"""
Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR),
calculate the desired RMS of a noise sound to be mixed in.
Based on https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20
:param clean_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0
:param snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60
:return:
"""
a = float(snr) / 20
noise_rms = clean_rms / (10 ** a)
return noise_rms |
def is_gzip(name):
"""
True if the given name ends with '.gz'
"""
return name.split('.')[-1] == 'gz' |
def union(lst1, lst2):
"""
Purpose: To get union of lists
Parameters: two lists
Returns: list
Raises:
"""
final_list = list(set().union(lst1, lst2))
return final_list |
def _extract_comment(_comment):
"""
remove '#' at start of comment
"""
# if _comment is empty, do nothing
if not _comment:
return _comment
# str_ = _comment.lstrip(" ")
str_ = _comment.strip()
str_ = str_.lstrip("#")
return str_ |
def _unpack(param):
"""Unpack command-line option.
:param list param: List of isotopes.
:return: List of unpacked isotopes.
:rtype: :py:class:`list`
"""
options = []
for option_str in param:
options.extend(option_str.split(","))
return options |
def format_classification_dictionary(csv_dictionary):
"""
Convert loaded CSV classificator to the dictionary with the following format
{suites:{device_type,operating_system,application,browser}}.
:param csv_dictionary: CSV object with loaded classification dictionary
:return: Dictionary with suites as a key and rest columns as a value
"""
classificator = {}
for row in csv_dictionary:
classificator[row["suites"]] = {
"device_type": row["device_type"] if row["device_type"] != "" else "Unknown",
"operating_system": row["operating_system"] if row["operating_system"] != "" else "Unknown",
"application": row["application"] if row["application"] != "" else "Unknown",
"browser": row["browser"] if row["browser"] != "" else "Unknown"
}
return classificator |
def _from_data_nist(raw_data):
"""Convert a NIST data format to an internal format."""
for point in raw_data:
point.pop('species_data')
return raw_data |
def are_version_compatible(ver_a, ver_b):
"""
Expects a 2 part version like a.b with a and b both integers
"""
(a_1, a_2) = ver_a.split('.')
(b_1, b_2) = ver_b.split('.')
(a_1, a_2) = (int(a_1), int(a_2))
(b_1, b_2) = (int(b_1), int(b_2))
if a_1 < b_1:
return False
if (b_2 > a_2) and (a_1 != b_1):
return False
return True |
def update_pos(s0, v0, delta_t_ms):
"""
Given the position and speed at time t0 (s0, v0),
computes the new position at time t1 = t0 + delta_t
"""
return s0 + (v0 * delta_t_ms / 1000) |
def is_list(value):
"""Checks if `value` is a list.
Args:
value (mixed): Value to check.
Returns:
bool: Whether `value` is a list.
Example:
>>> is_list([])
True
>>> is_list({})
False
>>> is_list(())
False
.. versionadded:: 1.0.0
"""
return isinstance(value, list) |
def make_iterable(x):
"""
Makes the given object or primitive iterable.
:param x: item to check if is iterable and return as iterable
:return: list of items that is either x or contains x
"""
if x is None:
x = []
elif not isinstance(x, (list, tuple, set)):
x = [x]
return x |
def read_file(filename):
"""
Read a file and return its binary content. \n
@param filename : filename as string. \n
@return data as bytes
"""
with open(filename, mode='rb') as file:
file_content = file.read()
return file_content |
def pythonize_path(path):
""" Replaces argument to valid python dotted notation.
ex. foo/bar/baz -> foo.bar.baz
:param str path: path to pythonize
:return str: pythonized path
"""
return path.replace("/", ".") |
def add_to_dict(param_dict):
"""
Aggregates extra variables to dictionary
Parameters
----------
param_dict: python dictionary
dictionary with input parameters and values
Returns
----------
param_dict: python dictionary
dictionary with old and new values added
"""
# This is where you define `extra` parameters for adding to `param_dict`.
return param_dict |
def indentation(line):
"""Returns the number of leading whitespace characters."""
return len(line) - len(line.lstrip()) |
def split_number_and_unit(s):
"""Parse a string that consists of a integer number and an optional unit.
@param s a non-empty string that starts with an int and is followed by some letters
@return a triple of the number (as int) and the unit
"""
if not s:
raise ValueError('empty value')
s = s.strip()
pos = len(s)
while pos and not s[pos - 1].isdigit():
pos -= 1
number = int(s[:pos])
unit = s[pos:].strip()
return (number, unit) |
def calculate_z(x: int, m: int, sd: int) -> float:
"""Calculate a z-score, rounded to two decimal places."""
return round((x - m) / sd, 2) |
def natMult3and5(limit):
"""Find the sum of all the multiples of 3 or 5 below 'limit'"""
sum = 0
for i in range(limit):
if i % 5 == 0 or i % 3 == 0:
sum += i
return sum |
def detect_multi_line_assignment(index, lines):
"""
detects if a line is carriage returned across multiple lines and if so
extracts the entire multiline string and how
get multi line assignment e.g. if this string is
split on multiple lines
xs = np.array([[1, 2, 3],
[5, 6, 8]])
becomes in the line listing
["xs = np.array([[1, 2, 3],\n",[5, 6, 8]])]
this function will return the entire continuous output e.g.
"xs = np.array([[1, 2, 3],
[5, 6, 8]])"
@index: integer index of a list (line number)
@lines: list of strings (lines from a file)
"""
l = lines[index]
if l.count('=') == 0:
return {'index':index + 1, 'continue':1, 'output':None}
initial_index = index
startline = ""
l = lines[initial_index]
# lines ending in comma are continuations
# pd.DataFrame.from_dict({'x':[1, 2],
# 'y':[2, 3]})
while l.strip()[-1] == ',' and index < len(lines):
l = lines[index]
startline += l
index += 1
pass
if index == initial_index:
startline = lines[initial_index]
index = index + 1
return {'index':index, 'continue':0, 'output':startline} |
def lb_mixing(a1, a2):
"""
Applies Lorentz-Berthelot mixing rules on two atoms.
Args:
a1 (tuple):
Tuple of (epsilon, sigma)
a2 (tuple):
Tuple of (epsilon, sigma)
"""
eps = (a1[0]*a2[0])**(1./2)
sigma = (a1[1] + a2[1])/2.
return eps, sigma |
def AverageCluster(C):
"""
AverageCluster calculates the average local clustering coefficient value
of a network.
Input:
C = array of clustering coefficient values
Output:
c = average clustering coefficient value
"""
n = len(C)
c = sum(C)/n
return c |
def format_keys(keys):
"""
Converts keys like findHighlightForeground to find_highlight_foreground.
"""
for key in keys:
formatted_key = ''.join([f"_{c.lower()}" if c.isupper() else c for c in key.text])
key.text = formatted_key
return keys |
def rofi2list(datas, sep):
"""
Convert list formatted for rofi into python list object
Parameters
----------
datas : str
a string with element separeted by line-breaks
sep : str
separator character
Returns
-------
list
elements of datas in a list
Examples
--------
>>> rofi_list = "1\n2\n3\n4\n5\n6"
>>> rofi2list(rofi_list)
[1,2,3,4,5,6]
"""
return datas.split(sep) |
def extent_of_ranges(range_from, range_to):
"""Helper- returns the length of the outer extents of two ranges in ms"""
return range_to[1] - range_from[0] |
def levenshtein(s1, s2):
"""https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python"""
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
# j+1 instead of j since previous_row and current_row are one
# character longer
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1] |
def _get_native_location(name):
# type: (str) -> str
"""
Fetches the location of a native MacOS library.
:param name: The name of the library to be loaded.
:return: The location of the library on a MacOS filesystem.
"""
return '/System/Library/Frameworks/{0}.framework/{0}'.format(name) |
def iterative_factorial(n: int) -> int:
"""Returns the factorial of n, which is calculated iteratively.
This is just for comparison with the recursive implementation.
Since the "factorial" is a primitive recursive function, it can be
implemented iteratively.
Proof that factorial is a primitive recursive function:
https://proofwiki.org/wiki/Factorial_is_Primitive_Recursive.
A primitive recursive function is a recursive function which can be
implemented with "for" loops.
See: http://mathworld.wolfram.com/PrimitiveRecursiveFunction.html."""
assert n >= 0
if n == 0 or n == 1:
return 1
f = 1
for i in range(2, n + 1):
f *= i
return f |
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
out = []
for i in range(0, len(lst), n):
#yield lst[i:i + n]
out.append(lst[i:i + n])
return out |
def alignUp(value: int, size: int):
"""Aligns and address to the given size.
:param value: The address to align up.
:type value: int
:param size: The size to which we align the address.
:type size: int
:return The aligned address.
:rtype int
"""
return value + (size - value % size) % size |
def is_file_like(value):
"""Check if value is file-like object."""
try:
value.read(0)
except (AttributeError, TypeError):
return False
return True |
def integral_4(Nstrips):
"""
The first integral: integrate x between 0 and 1.
"""
width = 1/Nstrips
integral = 0
for point in range(Nstrips):
height = (point / Nstrips)
integral = integral + width * height
return integral |
def _next_page_state(page_state, took):
"""Move page state dictionary to the next page"""
page_state['page_num'] = page_state['page_num'] + 1
page_state['took'] = page_state['took'] + took
return page_state |
def cal_recom_result(sim_info, user_click):
"""
recom by itemcf
Args:
sim_info: item sim dict
user_click: user click dict
Return:
dict, key:userid value dict, value_key itemid , value_value recom_score
"""
recent_click_num = 3
topk = 5
recom_info = {}
for user in user_click:
click_list = user_click[user]
recom_info.setdefault(user, {})
for itemid in click_list[:recent_click_num]:
if itemid not in sim_info:
continue
for itemsimzuhe in sim_info[itemid][:topk]:
itemsimid = itemsimzuhe[0]
itemsimscore = itemsimzuhe[1]
recom_info[user][itemsimid] = itemsimscore
return recom_info |
def row_value(row):
"""Calculate the difference between the highest and largest number."""
return max(row) - min(row) |
def greedy_cow_transport(cows, limit=10):
"""
Uses a greedy heuristic to determine an allocation of cows that attempts to
minimize the number of spaceship trips needed to transport all the cows. The
returned allocation of cows may or may not be optimal.
The greedy heuristic should follow the following method:
1. As long as the current trip can fit another cow, add the largest cow that will fit
to the trip
2. Once the trip is full, begin a new trip to transport the remaining cows
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
# MY_CODE
remaining = sorted(cows.copy(), key=cows.get, reverse=True)
assert cows[remaining[0]] <= limit, "Problem shouldn't be impossible!"
boarding_list = []
while remaining:
cur_list = []
occupancy = 0
for cow in remaining:
if occupancy + cows[cow] <= limit:
cur_list += [cow]
occupancy += cows[cow]
elif occupancy + cows[remaining[-1]] > limit:
break
for elem in cur_list:
remaining.remove(elem)
boarding_list += [cur_list]
return boarding_list |
def regenerated_configure(file_paths):
"""Check if configure has been regenerated."""
if 'configure.ac' in file_paths:
return "yes" if 'configure' in file_paths else "no"
else:
return "not needed" |
def check_elements(string):
"""Check for chemical letters outside of the CHNOPS set.
If the string only contains CHNOPS, returns True.
Otherwise, returns False.
Note: does not cover Scandium :(
"""
bad_elements = "ABDEFGIKLMRTUVWXYZsaroudlefgibtn" # chem alphabet -CHNOPS
return not any(n in bad_elements for n in string) |
def valid_filename(proposed_file_name):
"""
Convert a proposed file name into a valid and readable UNIX filename.
:param str proposed_file_name: a proposed file name in string, supports
unicode in Python 3.
:return: a valid file name in string.
"""
valid_chars = list("abcdefghijklmnopqrstuvwxyz0123456789")
return "".join([i if i.lower() in valid_chars else "_" for i in proposed_file_name]) |
def annotation_layers(state):
"""Get all annotation layer names in the state
Parameters
----------
state : dict
Neuroglancer state as a JSON dict
Returns
-------
names : list
List of layer names
"""
return [l["name"] for l in state["layers"] if l["type"] == "annotation"] |
def bel_mul(d):
"""NI mul from Belaid et al."""
return int(d**2/4)+d |
def methods_of(obj):
"""
Get all callable methods of an object that don't
start with underscore (private attributes)
returns
:param obj: objects to get callable attributes from
:type obj: object
:return result: a list of tuples of the form (method_name, method)
:rtype: list
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result |
def rgb_to_hex(r, g, b):
""" convert rgb to hex """
h = ("0","1","2","3","4","5","6","7","8","9", "a", "b", "c", "d", "e", "f")
r1, r2 = int(r/16), int(float("0." + str(r/16).split(".")[-1])*16)
r = h[r1] + h[r2]
g1, g2 = int(g/16), int(float("0." + str(g/16).split(".")[-1])*16)
g = h[g1] + h[g2]
b1, b2 = int(b/16), int(float("0." + str(b/16).split(".")[-1])*16)
b = h[b1] + h[b2]
return f"0x{r}{g}{b}" |
def show_user_profile(username):
"""
user profile
"""
# show the user profile for that user
return 'User %s' % username |
def oddbox(funcname, boxsize, quiet=False, *args, **kwargs):
"""Ensure that the boxsize for smoothing is centered.
Makes a smoothing box/window be 2M+1 long to put the result in the centre
:param funcname: Function name, just for the message
:type funcname: string
:param boxsize: 2M|2M+1 that is corrected to be 2M+1
:type boxsize: integer
:param quiet: Determines if the warning is NOT printed, default is false.
:type quiet: logical
:return: Odd number.
:rtype: integer
"""
if boxsize % 2 == 0 and not quiet:
print("""boxsize should be odd ({0} smoothing),
currently {1}""".format(funcname, boxsize))
boxsize += 1
return boxsize |
def split_arg(s, sep, default=''):
"""
split str s in two at first sep, returning empty string as second result if no sep
"""
r = s.split(sep, 2)
r = list(map(str.strip, r))
arg = r[0]
if len(r) == 1:
val = default
else:
val = r[1]
val = {'true': True, 'false': False}.get(val.lower(), val)
return arg, val |
def get_emoji(string):
"""Manages string input with unicode chars and turns them into Unicode Strings."""
if len(string) != 0:
if string[0] == "<":
return string
else:
string = chr(int(string, 16))
return string |
def hot_test(blue, red):
"""Haze Optimized Transformation (HOT) test
Equation 3 (Zhu and Woodcock, 2012)
Based on the premise that the visible bands for most land surfaces
are highly correlated, but the spectral response to haze and thin cloud
is different between the blue and red wavelengths.
Zhang et al. (2002)
Parameters
----------
blue: ndarray
red: ndarray
Output
------
ndarray: boolean
"""
thres = 0.08
return blue - (0.5 * red) - thres > 0.0 |
def contrast(strength=1.0):
"""
Contrast filter.
Author: SolarLune
Date Updated: 6/6/11
strength = how strong the contrast filter is.
"""
return (
"""
// Name: Contrast Filter
// Author: SolarLune
// Date Updated: 6/6/11
uniform sampler2D bgl_RenderedTexture;
void main(void)
{
float contrast = """ + str(float(strength)) + """; // Multiplication value for contrast (high = more, 0 = none)
vec4 color = texture2D(bgl_RenderedTexture, vec2(gl_TexCoord[0].st.x, gl_TexCoord[0].st.y));
float avg = dot(color.rgb, vec3(0.299, 0.587, 0.114));
float diff = 1.0 + ((avg - 0.5) * contrast);
color *= diff;
gl_FragColor = color;
}
"""
) |
def check_for_reqd_cols(data, reqd_cols):
"""
Check data (PmagPy list of dicts) for required columns
"""
missing = []
for col in reqd_cols:
if col not in data[0]:
missing.append(col)
return missing |
def versiontuple(v):
"""Convert a string of package version in a tuple for future comparison.
:param v: string version, e.g "2.3.1".
:type v: str
:return: The return tuple, e.g. (2,3,1).
:rtype: tuple
:Example:
>>> versiontuple("2.3.1") > versiontuple("10.1.1")
>>> False
"""
return tuple(map(int, (v.split(".")))) |
def cosine_similarity(a,b):
"""
Calculate the cosine similarity between two vectors.
"""
import numpy
a = numpy.array(list(a), dtype=int)
b = numpy.array(list(b), dtype=int)
n = numpy.dot(a,b)
d = numpy.linalg.norm(a,ord=2) * numpy.linalg.norm(b,ord=2)
# If one of the vectors is the null vector, the dot product is going to
# be 0 since it's defined as perpendicular.
if d == 0:
return None
return 1.0 - n/d |
def generate_sequential_ints(n):
"""
Generates n sequential integers starting at 0
:param int n: number of ints to generate
:return: list of sequential ints
"""
ints = list()
for x in range(n):
ints.append(x)
return ints |
def get_template_s3_url(bucket_name, resource_path):
"""
Constructs S3 URL from bucket name and resource path.
:param bucket_name: S3 bucket name
:param prefix string: S3 path prefix
:return string: S3 Url of cloudformation templates
"""
return 'https://%s.s3.amazonaws.com/%s' % (bucket_name, resource_path) |
def gray2int(graystr):
"""Convert greycode to binary."""
num = int(graystr, 2)
num ^= (num >> 8)
num ^= (num >> 4)
num ^= (num >> 2)
num ^= (num >> 1)
return num |
def from_perfect_answer(text: str) -> str:
"""
Generates GIFT-ready text from a perfect answer.
Parameters
----------
text : str
The phrasing of the answer.
Returns
-------
out: str
GIFT-ready text.
"""
return f'={text}' |
def extract_key(key_shape, item):
"""construct a key according to key_shape for building an index"""
return {field: item[field] for field in key_shape} |
def solution(S, P, Q):
"""
DINAKAR
Order A,C,G,T => order as impact factor 1,2,3,4
in loop A,C,G,T appends 1,2,3,4 due to order and value of impact factor
Objective is to find the minimal impact in each P,Q
Clue- below line has the hidden answer - [find the minimal impact factor of nucleotides contained in the DNA
sequence between positions P[K] and Q[K]
(inclusive).]
- we know for each slice if we check only availability of characters in the order given above so it will always
give the minimal impact.
eg.
for first slice P=> 2 Q=> 5
GCC
I just need to check which A,C,G or T is available first
A - no
C - yes -> for this slice minimal impact is 2
"""
print(S)
print(P)
print(Q)
query_answer = []
for i in range(len(P)):
print()
# ar[start:end] = produce the slice ie. part of array / sub set of array
slice_ = S[P[i]:Q[i] + 1]
print("Slice...for position " + str(i) + ", P=> " + str(P[i]) + " Q=> " + str(Q[i] + 1))
print(slice_)
if "A" in slice_:
print("A is in slice...")
query_answer.append(1)
elif "C" in slice_:
print("C is in slice...")
query_answer.append(2)
elif "G" in slice_:
print("G is in slice...")
query_answer.append(3)
elif "T" in slice_:
print("T is in slice...")
query_answer.append(4)
print("query_answer " + str(query_answer))
return query_answer |
def compare_arr_str(arr1, arr2):
"""
Function that takes two arrays of str or str and compares them.
The string from one can be a sub string of the
"""
# Both str
if (isinstance(arr1, str) and isinstance(arr2, str)):
if arr1 in arr2:
return True
# First str
elif (isinstance(arr1, str) and isinstance(arr2, list)):
for string2 in arr2:
if arr1 in string2:
return True
# if no match has been found return False
return False
# Second str
elif (isinstance(arr1, list) and isinstance(arr2, str)):
for string1 in arr1:
if string1 in arr2:
return True
# if no match has been found return False
return False
# Both arrays
elif (isinstance(arr1, list) and isinstance(arr2, list)):
for string1 in arr1:
for string2 in arr2:
if string1 in string2:
return True
# if no match has been found return False
return False
# If none of the previous options are matched test if both objects are equal otherwise return false
elif (arr1 == arr2):
return True
else:
return False |
def splitext_all(_filename):
"""split all extensions (after the first .) from the filename
should work similar to os.path.splitext (but that splits only the last extension)
"""
_name, _extensions = _filename.split('.')[0], '.'.join(_filename.split('.')[1:])
return(_name, "."+ _extensions) |
def get_n_grams(token, grams_count):
"""
returns the n_grams of a token
:param token: ex. results
:param grams_count: ex. 2
:return: ['$r', 're', 'es', 'su', 'ul', 'lt', 'ts', 's$']
"""
grams = [token[i:i + grams_count] for i in range(len(token) - grams_count + 1)]
grams.append(grams[-1][-grams_count + 1:] + "$")
grams.insert(0, "$" + grams[0][:grams_count - 1])
return grams |
def calc_weight(judge_i, pairing_i):
""" Calculate the relative badness of this judge assignment
We want small negative numbers to be preferred to large negative numbers
"""
if judge_i < pairing_i:
# if the judge is highly ranked, we can have them judge a lower round
# if we absolutely have to
return judge_i - pairing_i
else:
return -1 * (judge_i - pairing_i)**2 |
def make_compact(creation_sequence):
"""
Returns the creation sequence in a compact form
that is the number of 'i's and 'd's alternating.
Examples
--------
>>> from networkx.algorithms.threshold import make_compact
>>> make_compact(['d', 'i', 'i', 'd', 'd', 'i', 'i', 'i'])
[1, 2, 2, 3]
>>> make_compact(['d', 'd', 'd', 'i', 'd', 'd'])
[3, 1, 2]
Notice that the first number is the first vertex
to be used for construction and so is always 'd'.
Labeled creation sequences lose their labels in the
compact representation.
>>> make_compact([3, 1, 2])
[3, 1, 2]
"""
first = creation_sequence[0]
if isinstance(first, str): # creation sequence
cs = creation_sequence[:]
elif isinstance(first, tuple): # labeled creation sequence
cs = [s[1] for s in creation_sequence]
elif isinstance(first, int): # compact creation sequence
return creation_sequence
else:
raise TypeError("Not a valid creation sequence type")
ccs = []
count = 1 # count the run lengths of d's or i's.
for i in range(1, len(cs)):
if cs[i] == cs[i - 1]:
count += 1
else:
ccs.append(count)
count = 1
ccs.append(count) # don't forget the last one
return ccs |
def get_first_syl(w):
"""
Given a word w, return the first syllable and the remaining suffix.
"""
assert len(w) > 0
a = w[0]
n = 0
while n < len(w) and w[n] == a:
n = n + 1
return ((a, n), w[n:]) |
def split_by_sep(seq):
"""
This method will split the HTTP response body by various separators,
such as new lines, tabs, <, double and single quotes.
This method is very important for the precision we get in chunked_diff!
If you're interested in a little bit of history take a look at the git log
for this file and you'll see that at the beginning this method was splitting
the input in chunks of equal length (32 bytes). This was a good initial
approach but after a couple of tests it was obvious that when a difference
(something that was in A but not B) was found the SequenceMatcher got
desynchronized and the rest of the A and B strings were also shown as
different, even though they were the same but "shifted" by a couple of
bytes (the size length of the initial difference).
After detecting this I changed the algorithm to separate the input strings
to this one, which takes advantage of the HTML format which is usually
split by lines and organized by tabs:
* \n
* \r
* \t
And also uses tags and attributes:
* <
* '
* "
The single and double quotes will serve as separators for other HTTP
response content types such as JSON.
Splitting by <space> was an option, but I believe it would create multiple
chunks without much meaning and reduce the performance improvement we
have achieved.
:param seq: A string
:return: A list of strings (chunks) for the input string
"""
chunk = []
chunks = []
append = chunks.append
empty_string_join = ''.join
separators = {'\n', '\t', '\r', '"', "'", '<'}
for c in seq:
if c in separators:
append(empty_string_join(chunk))
chunk = []
else:
chunk.append(c)
append(empty_string_join(chunk))
return chunks |
def extract_digit(str):
"""
extract only digits from a string
"""
return [int(s) for s in str.split() if s.isdigit()] |
def is_api_response_success(api_response: dict) -> bool:
"""Check if the response returned from the Connect API is a success or not."""
return "result" in api_response and api_response["result"].lower() == "success" |
def search_results_dict(results, num_matches, limit, offset, sort):
"""This is for consistent search results"""
ret = {
'matches': num_matches,
'limit': limit,
'offset': offset,
'results': results
}
return ret |
def update(tagid):
"""
Think a tag we have in our database isn't accurate?
Request this endpoint to update our db
"""
return {"Status": "Not finished"} |
def Crc8(data):
"""Calculates the CRC8 of data.
The generator polynomial used is: x^8 + x^2 + x + 1.
This is the same implementation that is used in the EC.
Args:
data: A string of data that we wish to calculate the CRC8 on.
Returns:
crc >> 8: An integer representing the CRC8 value.
"""
crc = 0
for byte in data:
crc ^= (ord(byte) << 8)
for _ in range(8):
if crc & 0x8000:
crc ^= (0x1070 << 3)
crc <<= 1
return crc >> 8 |
def wrap_text(text: str, max_length: int = 16) -> str:
"""Return a new label with wrapped text.
Parameters
----------
text : str
The current label text
max_length : int
The max length for the label (defaults to 16 characters)
Returns
-------
str
The new label text
"""
if max_length < 12:
max_length = 12
if len(text) > max_length:
words = text.split()
new_text = ""
if len(words) > 1:
temp_text = ""
for word in words:
test_text = temp_text + " " + word
if len(test_text) < max_length:
new_text = new_text + " " + word
temp_text = test_text
else:
new_text = new_text + "\n" + word
temp_text = word
return new_text
else:
return text |
def _find_linar_poly(p1_x, p1_y, p2_x, p2_y):
"""
Finding the linear polynomial y=kx+d from two points
Parameters
----------
p1_x : float
X value of first point.
p1_y : float
Y value of first point.
p2_x : float
X value of second point.
p2_y : float
Y value of second point.
Returns
-------
k : float
Slope of the linear polynomial.
d : float
Intersection with y-axis.
"""
k = (p2_y-p1_y)/(p2_x - p1_x)
d = p1_y - k * p1_x
return k,d |
def dockerhub_url(name, version):
"""Dockerhub URL."""
return "https://registry.hub.docker.com/v2/repositories/{}/tags/{}/".format(
name,
version,
) |
def ratio_to_int(percentage, max_val):
"""Converts a ratio to an integer if it is smaller than 1."""
if 1 <= percentage <= max_val:
out = percentage
elif 0 <= percentage < 1:
out = percentage * max_val
else:
raise ValueError("percentage={} outside of [0,{}].".format(percentage, max_val))
return int(out) |
def sum_of_squares(x, y, fn):
"""
Calculates error between training set and our approximation
Residential sum of squares: sum(|f(x) - y|^2)
for all (x,y) in out training set
:return: float which represents error
"""
return sum([(i - j) ** 2 for i, j in zip(map(fn, x), y)]) |
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
return (hasattr(object, "__set__") and hasattr(object, "__get__")) |
def calc_density_diff(cube1_value, cube2_value):
"""Computes formated density difference.
Parameters
----------
cube1_value : str
Single value of electron density.
cube2_value : str
Single value of electron density.
Returns
-------
str
cube1 - cube2 value.
Examples
--------
>>> calc_density_diff('3.73097E-15', '2.43683E-15')
'1.29414E-15'
"""
diff = float(cube1_value) - float(cube2_value)
diff = '{:0.5E}'.format(diff)
return str(diff) |
def to_lowercase(words):
"""Convert all characters to lowercase from list of tokenized words"""
new_words = [word.lower() for word in words]
return new_words |
def grouplist(obj):
"""Returns a list of groups"""
return obj['hostgroup'] |
def form_columns(form):
"""
:param form: Taken from requests.form
:return: columns: list of slugified column names
labels: dict mapping string labels of special column types
(observed_date, latitude, longitude, location)
to names of columns
"""
labels = {}
columns = []
for k, v in form.items():
if k.startswith('col_name_'):
# key_type_observed_date
key = k.replace("col_name_", "")
columns.append(key)
# e.g labels['observed_date'] = 'date'
labels[v] = key
return columns, labels |
def get_baji_for_icmrucc_spinfree(b, a, j, i, v_n, a_n, c_n, a2a):
"""
Author(s): Yuhto Mori
"""
if a2a:
bj = (b - c_n) * (a_n + c_n) + j
ai = (a - c_n) * (a_n + c_n) + i
if bj > ai:
if b > j:
if b >= a_n + c_n:
redu = (a_n+1)*a_n//2
else:
redu = (b-c_n+1)*(b-c_n)//2
elif b < j:
redu = (b-c_n+2)*(b-c_n+1)//2
else:
redu = (b-c_n+1)*(b-c_n)//2 + a - c_n
if a < i:
redu = redu + 1
baji = bj * (bj + 1) // 2 + ai - redu
else:
if a > i:
if a >= a_n + c_n:
redu = (a_n+1)*a_n//2
else:
redu = (a-c_n+1)*(a-c_n)//2
elif a < i:
redu = (a-c_n+2)*(a-c_n+1)//2
else:
redu = (a-c_n+1)*(a-c_n)//2 + b - c_n
if b < j:
redu = redu + 1
baji = ai * (ai + 1) // 2 + bj - redu
index = int(baji)
else:
bj = (b - c_n) * (a_n + c_n) + j
ai = (a - c_n) * (a_n + c_n) + i
if bj > ai:
if b >= a_n + c_n:
redu = a_n*a_n * (a_n*a_n + 1) // 2
else:
tmp = a_n * (b - c_n)
if j >= c_n:
tmp = tmp + j - c_n
redu = tmp * (tmp + 1) // 2 + a_n * (a - c_n)
baji = bj * (bj + 1) // 2 + ai - redu
else:
if a >= a_n + c_n:
redu = a_n*a_n * (a_n*a_n + 1) // 2
else:
tmp = a_n * (a - c_n)
if i >= c_n:
tmp = tmp + i - c_n
redu = tmp * (tmp + 1) // 2 + a_n * (b - c_n)
baji = ai * (ai + 1) // 2 + bj - redu
index = int(baji)
return index |
def _is_datetime_dtype(obj):
"""Returns True if the obj.dtype is datetime64 or timedelta64
"""
dtype = getattr(obj, 'dtype', None)
return dtype is not None and dtype.char in 'Mm' |
def green(s):
"""Color text green in a terminal."""
return "\033[1;32m" + s + "\033[0m" |
def _gf2mul(a,b):
"""
Computes ``a * b``.
Parameters
----------
a, b : integer
Polynomial coefficient bit vectors.
Returns
-------
c : integer
Polynomial coefficient bit vector of ``c = a * b``.
"""
c = 0
while a > 0:
if (a & 1) != 0:
c ^= b
b <<= 1
a >>= 1
return c |
def parse_row(row, cols, sheet):
"""
parse a row into a dict
:param int row: row index
:param dict cols: dict of header, column index
:param Sheet sheet: sheet to parse data from
:return: dict of values key'ed by their column name
:rtype: dict[str, str]
"""
vals = {}
for header, col in cols.items():
cell = sheet.cell(row, col)
vals[header] = cell.value
return vals |
def response_test(response: dict):
"""Quick test of the http request to make sure it has the data structure needed to analyze
Args:
response (dict): dictionary of http response
Returns:
boolean test
"""
if 'items' in response:
if len(response['items'])>0:
if 'snippet' in response['items'][0]:
if 'channelTitle' in response['items'][0]['snippet']:
return True
return False |
def bubble_sort(list_to_sort):
""" Sort list in input using bubble sorting.
:param list_to_sort: the list to sort
:type list_to_sort: list
:return: the list sorted
:rtype: list
"""
list_size = len(list_to_sort)
for i in range(list_size):
for j in range(0, list_size-i-1):
if list_to_sort[j] > list_to_sort[j+1] :
# Swap current and next element
list_to_sort[j], list_to_sort[j+1] = list_to_sort[j+1], list_to_sort[j]
return list_to_sort |
def get(args, attr):
"""
Gets a command-line argument if it exists, otherwise returns None.
Args:
args: The command-line arguments.
attr (str): The name of the command-line argument.
"""
if hasattr(args, attr):
return getattr(args, attr)
return None |
def butlast(mylist):
"""Like butlast in Lisp; returns the list without the last element."""
return mylist[:-1] |
def is_float(string):
"""
Check if `string` can be converted
into a float.
Parameters
----------
string : str
String to check.
Returns
-------
check : bool
True if `string` can be converted.
"""
try:
float(string)
return True
except ValueError:
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.