content stringlengths 42 6.51k |
|---|
def claimant_2_share(responses, derived):
"""
Return the proportionate amount of claimant 2 for child support, based on
annual income.
"""
proportion = derived['claimant_2_share_proportion'] / 100
return derived['total_section_seven_expenses'] * proportion |
def get_coinbase_api_pagination_id(timestamp, last_data=[], data=[]):
"""
Pagination details: https://docs.pro.coinbase.com/#pagination
"""
if len(data):
return data[-1]["trade_id"] |
def get_num_bags(list_of_bags: list, colour: str) -> list:
"""
Recursive function to loop through list.
Start with shiny gold bag, find bags inside.
Call itself and search through child bags.
Search for unique colours in tracking list.
:return: A list of unique colours.
:rtype: list
"""
# Return a list of lines which include the "colour".
lines = [
line for line in list_of_bags if colour in line and line.index(colour) != 0
]
all_colours = []
if len(lines) == 0:
return []
else:
# Return a list of parent colour bags.
colours = [line[: line.index(" bags")] for line in lines]
# all_colours tracks colours we have checked.
# This gathers colours we haven't checked yet.
colours = [colour for colour in colours if colour not in all_colours]
# Recursively gather all colours.
for colour in colours:
all_colours.append(colour)
bags = get_num_bags(list_of_bags, colour)
all_colours += bags
# Gather unique colours to return.
unique_colours = []
for colour in all_colours:
if colour not in unique_colours:
unique_colours.append(colour)
return unique_colours |
def get_x_and_y_coords(array):
"""
Import a value from an array and transform into a list of x and y coords
:param array: 2D array of square coordinates
:return: list of x and y coordinates to be used for plotting
"""
x_vals = [array[0][0], array[1][0], array[2][0], array[3][0]]
y_vals = [array[0][1], array[1][1], array[2][1], array[3][1]]
coords = [x_vals, y_vals]
return coords |
def ToText(value):
""" Covert int/float to a string
:param any value: int or float.
:return: string(str)
"""
if not isinstance(value,str): text=str(value)
else:text=value
return text |
def binary_search(items, desired_item, start=0, end=None,):
"""Standard Binary search program takes
Parameters:
items= a sorted list of Id objects
desired_item = a Goofy Object looking for a matching .address field in items; single looking for a match (groovy baby)
start= int value representing the index position of search section
end = end boundary of the search section; when end == start
Returns:
None = only returned if the desired_item not found in items
pos = returns the index position of desired_item if found.
"""
if end == None:
end = len(items)
if start == end:
return None
# raise ValueError("%s was not found in the list." % desired_item)
pos = (end - start) // 2 + start
if desired_item.letters == items[pos].letters:
checkfirstSpill=str(desired_item.numbers)
checkfirstAddress=str(items[pos].numbers)
# if checkfirstSpill[len(checkfirstSpill)//2:] in checkfirstAddress: # have to make sure that checkfirstspill shorter than checkfirst address
if checkfirstSpill == checkfirstAddress:
return pos
else:
i=1
while desired_item.letters==items[pos+i].letters:
checkfirstAddress=items[pos+i].numbers
if checkfirstSpill== checkfirstAddress:
print("Special case for {} with {}".format(checkfirstSpill,checkfirstAddress))
return (pos+i)
else:
i+=1
continue
else:
return
#if the next items dont match in numbers, and its been run thru to check for letter matches
#return nothing
elif desired_item.letters > items[pos].letters:
return binary_search(items, desired_item, start=(pos + 1), end=end)
else: # desired_item < items[pos]:
return binary_search(items, desired_item, start=start, end=pos) |
def indent(text, prefix):
"""Prefix each line in text by a given prefix."""
return ''.join(prefix + line for line in text.splitlines(True)) |
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://www.diveintopython3.net/native-datatypes.html#slicinglists
for an introduction.
"""
try:
bits = []
for x in arg.split(':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value |
def mergeOrderings(orderings, seen=None):
"""Merge multiple orderings so that within-ordering order is preserved
Orderings are constrained in such a way that if an object appears
in two or more orderings, then the suffix that begins with the
object must be in both orderings.
For example:
>>> mergeOrderings([
... ['x', 'y', 'z'],
... ['q', 'z'],
... [1, 3, 5],
... ['z']
... ])
['x', 'y', 'q', 1, 3, 5, 'z']
"""
if seen is None:
seen = {}
result = []
orderings.reverse()
for ordering in orderings:
ordering = list(ordering)
ordering.reverse()
for o in ordering:
if o not in seen:
seen[o] = 1
result.append(o)
result.reverse()
return result |
def disttar_string(target, source, env):
"""This is what gets printed on the console. We'll strip out the list
or source files, since it tends to get very long. If you want to see the
contents, the easiest way is to uncomment the line 'Adding to TAR file'
below. """
return 'DistTar(%s,...)' % target[0] |
def extract_id_from_sheets_url(url) -> str:
"""
Assumes `url` is of the form
https://docs.google.com/spreadsheets/d/<ID>/edit...
and returns the <ID> portion
"""
start = url.find("/d/") + 3
end = url.find("/edit")
return url[start:end] |
def convert_sec_to_min(seconds) -> str:
"""Converts a second to a minute:second format
Examples
--------
>>> convert_sec_to_min(0)
'0:00'
>>> convert_sec_to_min(60)
'1:00'
>>> convert_sec_to_min(90)
'1:30'
>>> convert_sec_to_min(912):
'15:12'
Parameters
----------
seconds: int
the seconds to convert the data from
Returns
-------
str: the formatted output
"""
minutes, sec = divmod(seconds, 60)
return "%02d:%02d" % (minutes, sec) |
def hyphen_range(s):
""" Takes a range in form of "a-b" and generate a list of numbers between a and b inclusive.
Also accepts comma separated ranges like "a-b,c-d,f" will build a list which will include
Numbers from a to b, a to d and f"""
s = "".join(s.split()) #removes white space
r = set()
for x in s.split(','):
t = x.split('-')
if len(t) not in [1, 2]:
raise SyntaxError("Range is not properly formatted: " + s)
if len(t) == 1:
r.add(int(t[0]))
else:
r.update(set(range(int(t[0]), int(t[1]) + 1)))
l = list(r)
l.sort()
return l |
def maxDepthTLCovers(tlcovs):
"""
Prune top-level covers for maximum depth
Inputs:
tlcovs: A list of top-level covers as returned by explain.
Outputs:
tlcovs_md: The pruned top level covers.
md: The maximum depth found.
"""
md = max(max(d_max) for (_,_,_,d_max,_) in tlcovs)
tlcovs_md = [(u,k,d_min,d_max,ts) for (u,k,d_min,d_max,ts) in tlcovs if max(d_max) == md]
return tlcovs_md, md |
def take_user_input(userinputs):
"""
The method take dict inputs for list of inputs to be taken,
takes all input from user and returns back input taken
:param inputs: Format for dict
{
"key": "Description to be show for input"
}
:return:
{
"key": "[INPUT_FROM_USER]"
}
"""
response = { }
for key in userinputs:
response[key] = input(userinputs[key] + "\n>> ")
return response |
def evaluate_state(board, heurustic_weights):
"""
function that evaluates the state of the board
"""
height = len(board)
min_height = len(board)
max_height = 0
for row_num, row in enumerate(board):
for block in row:
if block != 0:
if (height - row_num) > max_height:
max_height = height - row_num
for row_num, row in enumerate(board):
for block in row:
if block != 0:
if (height - row_num) < min_height:
min_height = height - row_num - 1
#print('MAX HEIGHT: {}'.format(max_height))
#print('MIN HEIGHT: {}'.format(min_height))
height_diff = max_height - min_height
#print(height_diff)
row_clears = 0
for row in board:
row_clear = True
for block in row:
if block == 0:
row_clear = False
break
if row_clear:
row_clears += 1
#print('ROW CLEARS: {}'.format(row_clears))
#calculate the number of holes on the board
num_holes = 0
burried_holes = 0
for row_num, row in enumerate(board):
for block_num, block in enumerate(row):
#block is empty
if block == 0:
burried = False
for i in range(row_num -1):
if board[i][block_num] != 0:
burried = True
break
if burried:
burried_holes += 1
if row_num > 0 and board[row_num - 1][block_num] != 0:
#print('CURRENTLY ON ROW {}'.format(row_num))
#print(row)
#print('CURRENT BLOCK: {}'.format(block))
#print('BLOCK ABOVE: {}'.format(board[row_num - 1][block_num]))
num_holes += 1
#print(num_holes)
#display_board(board)
#print(num_holes)
#print(burried_holes)
return (heurustic_weights[0] * max_height) + (heurustic_weights[1] * height_diff) + (heurustic_weights[2] * num_holes) + (heurustic_weights[3] * burried_holes) + (heurustic_weights[4] * row_clears) |
def ConvertToBinaryList(x):
"""
Method to convert an arbitrary length list of integers to a binary valued
list where the binary value is 1 only if the original value is > 0
:param x:
:return:
"""
binaryList = list()
for ii in range(len(x)):
if x[ii] > 0:
binaryList.append(1)
else:
binaryList.append(0)
return binaryList |
def _determine_header_and_data_format(file_size, num_channels, num_records):
"""
Determine the column metadata header and data format of the file.
Look files have been through several generations as data and systems have expanded.
This helper determines if the file was written to hold up to 16 or 32 columns and
if the data are stored as 4 byte floats or 8 byte doubles.
Parameters
----------
file_size : int
Total file size in bytes
num_channels : int
Number of data channels written to the file
num_records : int
Number of records (rows) written to each channel
Returns
-------
number_of_header_channels : int
Number of channels in the header, 16 or 32.
bytes_per_data : int
Nubmer of bytes per data point, 4 or 8.
"""
# Calculate the size if this were a 16 channel file - these files used all 4 byte floats
sixteen_ch_float_file_size = 36 + 84 * 16 + 4 * num_records * num_channels
# Calculate the size if this were a 32 channel file of 4 byte floats
thirty_two_ch_float_file_size = 36 + 84 * 32 + 4 * num_records * num_channels
# Calculate the size if this were a 32 channel file of 8 byte doubles
thirty_two_ch_double_file_size = 36 + 84 * 32 + 8 * num_records * num_channels
if file_size == sixteen_ch_float_file_size:
return 16, 4
elif file_size == thirty_two_ch_float_file_size:
return 32, 4
elif file_size == thirty_two_ch_double_file_size:
return 32, 8
else:
IOError(f'Cannot determine format of look file with size {file_size}') |
def endNamespace(moduleConfig):
"""String for the end the header namespace"""
string = [ '} //' + s + '\n' for s in reversed(moduleConfig['Namespace']) ]
string = ''.join(string)
return string |
def flipBit ( bitVal ):
"""
This function will flip the given binary integer.
Parameters
----------
bitVal:
An integer value of 1 or 0
Returns
-------
flipBitVal:
An integer value of 1 or 0
"""
flipBitVal = 1 - bitVal
return flipBitVal |
def hurdle_race(k, height):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/the-hurdle-race/problem
Hurdle race calculates how many doses the person must jump after their natural jump height k, given the hurdles.
In short, we take the highest hurdle and subtract the jump k, and if it's negative, we return 0 as it means the
runner can already clear the highest hurdle. Otherwise, we know the difference is the number of doses needed to
clear the highest hurdle.
Args:
k (int): the natural jump height of the runner
height (list): list of integer hurdle heights the runner must clear
Returns:
int: number of doses needed to clear highest hurdle
"""
return max(max(height) - k, 0) |
def check_luhn_modulo10(key):
"""Calculates luhns modulo 10
Luhn modulo10 http://en.wikipedia.org/wiki/Luhn_algorithm
@type key: string
@param key: string of numbers.
@rtype: boolean
@return: Is key in luhn's modulo 10
"""
key = str(key)
try:
numbers = [int(x) for x in key] # Convert key to number list
except ValueError:
return False
odd = numbers[::-2]
even = [sum(divmod(d * 2, 10)) for d in numbers[-2::-2]]
result = sum(odd + even)
return (result % 10 == 0) |
def tree_label(tree):
"""Returns the label on the root node of tree."""
if isinstance(tree, list):
return tree[0]
else:
return tree |
def search_error_for_adams(adams_results: list, adams_results_less: list, m: int) -> list:
"""
Function for calculating errors for Adams method
:param adams_results: results from this method
:param adams_results_less: results from this method with divided step
:param m: value for the fault
:return: list with errors
"""
errors = []
for index in range(len(adams_results)):
error = (adams_results[index][1] - adams_results_less[index * 2][1]) / (2 ** m - 1)
errors.append(error)
return errors |
def _lowercase(dictin):
"""
Make all keys from a dictionary lowercase.
Args:
dictin (dict): dictionary
Returns:
modified dictionary dictin
Ok for nested dictionaries, but will not work for dictionaries within lists.
"""
if isinstance(dictin, dict):
return {k.lower(): _lowercase(v) for k, v in dictin.items()}
else:
return dictin |
def row_end_index(index):
""" get index of the end of the 0x10 byte row containing the given index """
return index - (index % 0x10) + 0xF |
def filter_dict(table, allowed_fields):
"""
Take an input dict (table) and filter the available fields to the allowed list
"""
filtered_rows = []
for row in table:
filtered_rows.append({key: row[key] for key in allowed_fields})
return filtered_rows |
def is_implied_by_something_else(
current_label,
reversed_normalizer,
all_labels_for_protein,
):
"""Returns whether the current label is implied by other labels for protein.
Args:
current_label: label about which we're asking "is this implied by some other
label for this protein?"
reversed_normalizer: output of reverse_map(label_normalizer). Helps this
function run fast.
all_labels_for_protein: set of all labels given to protein.
Returns:
bool
"""
all_labels_for_protein_without_current = all_labels_for_protein - frozenset(
[current_label])
children_of_current_label = reversed_normalizer[current_label]
# Most labels imply themselves; remove.
children_of_current_label = children_of_current_label - frozenset(
[current_label])
return len( # pylint: disable=g-explicit-length-test
children_of_current_label.intersection(
all_labels_for_protein_without_current)) > 0 |
def is_estimator(obj):
"""
Check if obj has 'fit' and 'transform' methods
Parameters
----------
obj
Returns
-------
"""
return hasattr(obj, 'fit') and hasattr(obj, 'transform') |
def split_wrap(sql):
"""Split with \n, and strip the ' '. """
sql_list = sql.split('\n')
if sql_list[0] == '':
del sql_list[0]
if sql_list[-1] == '':
del sql_list[-1]
sql_list = list(map(lambda x: x.strip(), sql_list))
return sql_list |
def normalize_es_result(es_result):
"""Turn a single ES result (relationship doc) into a normalized format."""
return (
('RelationshipType', es_result['RelationshipType']),
('Grouping', es_result['Grouping']),
('ID', es_result['ID']),
('SourceID', es_result['Source']['ID']),
('TargetID', es_result['Target']['ID']),
) |
def isFull(dict1):
"""dict->Bool
Retourne True si le dictionnaire est plein, False sinon.
"""
if(len(dict1.keys()) == 0):
return True
for elem in dict1.keys():
if(dict1[elem] == '' or dict1[elem] == '2018'):
return False
return True |
def exterior_algebra_basis(n, degrees):
"""
Basis of an exterior algebra in degree ``n``, where the
generators are in degrees ``degrees``.
INPUT:
- ``n`` - integer
- ``degrees`` - iterable of integers
Return list of lists, each list representing exponents for the
corresponding generators. (So each list consists of 0's and 1's.)
EXAMPLES::
sage: from sage.algebras.commutative_dga import exterior_algebra_basis
sage: exterior_algebra_basis(1, (1,3,1))
[[0, 0, 1], [1, 0, 0]]
sage: exterior_algebra_basis(4, (1,3,1))
[[0, 1, 1], [1, 1, 0]]
sage: exterior_algebra_basis(10, (1,5,1,1))
[]
"""
zeroes = [0]*len(degrees)
if not degrees:
if n == 0:
return [zeroes]
else:
return []
if len(degrees) == 1:
if n == degrees[0]:
return [[1]]
elif n == 0:
return [zeroes]
else:
return []
result = [[0] + v for
v in exterior_algebra_basis(n, degrees[1:])]
if n == 0 and zeroes not in result:
result += [zeroes]
d = degrees[0]
return result + [[1] + v for
v in exterior_algebra_basis(n-d, degrees[1:])] |
def index(err):
"""Index of knex
"""
# if request.path.startswith("/api/"):
return err, 404 |
def config_class(environment: str):
"""Link given environment to a config class."""
return f"{__package__}.config.{environment.capitalize()}Config" |
def FormatBytes(bytes):
"""Pretty-print a number of bytes."""
if bytes > 1e6:
bytes = bytes / 1.0e6
return '%.1fm' % bytes
if bytes > 1e3:
bytes = bytes / 1.0e3
return '%.1fk' % bytes
return str(bytes) |
def fix_teen(n):
"""
[summary]
[extended_summary]
Args:
n (int): [description]
"""
if n in [13, 14, 17, 18, 19]:
return 0
return n |
def compact_float(n, max_decimals=None):
"""Reduce a float to a more compact value.
Args:
n: Floating point number.
max_decimals: Maximum decimals to keep; defaults to None.
Returns:
An integer if `n` is essentially an integer, or a string
representation of `n` reduced to `max_decimals` numbers after
the decimal point. Otherwise, simply returns `n`.
"""
compact = n
if float(n).is_integer():
compact = int(n)
elif max_decimals is not None:
compact = "{0:.{1}f}".format(n, max_decimals)
return compact |
def row_sum_odd_numbers2(n):
"""
Time complexity: O(1+2+...+n)=O(n^2).
Space complexity: O(n).
"""
# Edge case.
if n == 1:
return 1
n_odds = sum([i for i in range(1, n + 1)]) - n
idx = 1
odd = 1
lst = []
while idx != n_odds:
odd += 2
idx += 1
for j in range(1, n + 1):
odd += 2
lst.append(odd)
return sum(lst) |
def has_check_failure(properties, message):
"""
Search in properties for a failed property with the given message
"""
for property in properties:
if message in property["description"] and property["status"] == "FAILURE":
return True
return False |
def compute_interval_id(season, day, period):
"""
Arguments
---------
season : int
day : int
period : int
Returns
-------
int
"""
return 1 + (168 * (season - 1)) + (24 * (day - 1)) + (period - 1) |
def compute_ks_for_conv2d(w_in: int, w_out: int, padding: int=1) -> int:
"""Compute the kernel size to use with conv2d when we want
the output tensor has smaller spatial dimensions, ie.
w_out < w_in.
We assume the filter has stride=1.
Computation is based on the formula
w_out = floor(w_in - k + 2p) + 1
We get only positive integer for k only if:
w_out - w_in < 2p-1
"""
assert w_out - w_in < 2*padding-1, "No valid kernel size is possible"
c = w_out - w_in - 2*padding -1
return -c |
def get_points_from_box(bb_xyxy):
"""
Get the center of the bounding and the point "on the ground"
@ param = box : 2 points representing the bounding box
@ return = centroid (x1,y1) and ground point (x2,y2)
"""
# Center of the box x = (x1+x2)/2 et y = (y1+y2)/2
center_x = int(((bb_xyxy[0] + bb_xyxy[2]) / 2))
center_y = int(((bb_xyxy[1] + bb_xyxy[3]) / 2))
# Coordiniate on the point at the bottom center of the box
# center_y_ground = center_y + ((bb_xyxy[3] - bb_xyxy[1]) / 2)
center_y_ground = bb_xyxy[3]#bb_xyxy[1] + bb_xyxy[3]
return (center_x, center_y), (center_x, int(center_y_ground)) |
def parse_genome_length_string(gen_len_param):
"""
:param gen_len_param:
:return:
"""
factors = {
'G': 1e9,
'M': 1e6,
'K': 1e3
}
try:
genome_length = int(gen_len_param)
except ValueError:
numeric, factor = gen_len_param[:-1], gen_len_param[-1]
genome_length = int(int(numeric) * factors[factor.upper()])
return genome_length |
def GetPatchMetadata(patch_dict):
"""Gets the patch's metadata.
Args:
patch_dict: A dictionary that has the patch metadata.
Returns:
A tuple that contains the metadata values.
"""
# Get the metadata values of a patch if possible.
start_version = patch_dict.get('start_version', 0)
end_version = patch_dict.get('end_version', None)
is_critical = patch_dict.get('is_critical', False)
return start_version, end_version, is_critical |
def sample_config_file(fname="sample_config_file.ini"):
"""Create a sample config file, to be modified by hand."""
string = """
[local]
; the directory where the analysis will be executed.
workdir : .
; the root directory of the data repository.
datadir : .
; the root directory of the data repository.
productdir : None
[analysis]
projection : ARC
interpolation : spline
prefix : test_
list_of_directories :
;;Two options: either a list of directories:
; dir1
; dir2
;; or a star symbol for all directories
; *
calibrator_directories :
; if left empty, calibrator scans are taken from list_of_directories when
; calculating light curves, and ignored when calculating images
skydip_directories :
; if left empty, calibrator scans are taken from list_of_directories when
; calculating light curves, and ignored when calculating images
noise_threshold : 5
;; For spectral rms smoothing, in percentage of then number of spectral bins.
smooth_window : 0.05
;; Coordinates have to be specified in decimal degrees. ONLY use if different
;; from target coordinates!
; reference_ra : 10.5
; reference_dec : 5.3
;; Pixel size in arcminutes
pixel_size : 1
;; Channels to save from RFI filtering. It might indicate known strong spectral
;; lines
goodchans :
[debugging]
debug_file_format : jpg
"""
with open(fname, "w") as fobj:
print(string, file=fobj)
return fname |
def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close, elementwise.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
for xi, yi in zip(x,y):
if not ( abs(xi-yi) <= atol + rtol * abs(yi) ):
return False
return True |
def int16(string):
"""Conversion de type pour parser."""
return int(string, 16) |
def parse_slack_output(slack_rtm_output, bot):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
at_bot = "<@%s>" % bot['bot_id']
if output and 'text' in output and at_bot in output['text'] and 'channel' in output:
# return text after the @ mention, whitespace removed
return output['text'].split(at_bot)[1].strip(), \
output['channel']
return None, None |
def get_colorbrewer2_colors(family='Set2'):
"""Helper function that returns a list of color combinations
extracted from colorbrewer2.org.
Args:
type: the color family from colorbrewer2.org to use.
"""
if family == 'Set2':
return [
'#e41a1c',
'#377eb8',
'#4daf4a',
'#984ea3',
'#ff7f00',
'#ffff33',
'#a65628',
'#b3de69'
]
if family == 'Set3':
return [
'#8dd3c7',
'#ffffb3',
'#bebada',
'#fb8072',
'#80b1d3',
'#fdb462',
''
]
elif family == 'Dark2':
return [
'#1b9e77',
'#d95f02',
'#7570b3',
'#e7298a',
'#66a61e',
'#e6ab02',
'#a6761d'
]
elif family == 'Pastel':
return [
'#fbb4ae',
'#b3cde3',
'#ccebc5',
'#decbe4',
'#fed9a6',
'#ffffcc',
'#e5d8bd'
] |
def clip(val, lower=0.0, upper=1.0):
"""
Clips val between lower and upper.
>>> clip(1, 0, 2)
1
>>> clip(2, 3, 6)
3
>>> clip(5, 1, 2)
2
Works recursively on lists.
>>> clip([-0.2, 0.5, 1.4, 0.7])
[0.0, 0.5, 1.0, 0.7]
:param val: value to be clipped
:param lower: lower bound
:param upper: upper bound
:return: val clipped between lower and upper
"""
if isinstance(val, list):
return [clip(v, lower, upper) for v in val]
return max(lower, min(upper, val)) |
def is_rhyme(word1, word2, k):
"""
Returns True if the last k letters of the two words are the same (case sensitive).
Automatically returns False if either word contains less than k letters.
"""
if (k == 0): # Cannot compare if k is 0
return False
if (len(word1) < k or len(word2) < k): # Return False if either word
return False # contains less than k letters
rev_word1 = word1[::-1] # Reverse word1
rev_word2 = word2[::-1] # Reverse word2
# Compare first k chars of reversed word1 and reversed word2
# Equivalent of comparing last k chars of word 1 and word2
return rev_word1[:k] == rev_word2[:k] |
def complement(s):
"""
return the complementary sequence string
"""
basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
letters = list(s)
letters = [basecomplement[base] for base in letters]
return ''.join(letters) |
def machineCostPerH(fixed, operating):
"""
fixed = fixed costs
operating = operating costs
"""
return {'Machine cost per H': [fixed + operating]} |
def _get_finished_states(entity):
"""
Member name is either:
operationState' (NS, NSI)
'_admin.'operationalState' (VIM, WIM, SDN)
For NS and NSI, 'operationState' may be one of:
PROCESSING, COMPLETED,PARTIALLY_COMPLETED, FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
For VIM, WIM, SDN: '_admin.operationalState' may be one of:
ENABLED, DISABLED, ERROR, PROCESSING
:param entity: can be NS, NSI, or other
:return: two tuples with status completed strings, status failed string
"""
if entity == 'NS' or entity == 'NSI':
return ('COMPLETED', 'PARTIALLY_COMPLETED'), ('FAILED_TEMP', 'FAILED')
else:
return ('ENABLED', ), ('ERROR', ) |
def compat_is_coresight(dcompat):
"""
Check if a device tree node claims compatibility with CoreSight.
We don't check for "arm" here because an architecture-licensee core might be
ETM-compatible but not be an Arm device.
We also expect to see "primecell".
"""
for dc in dcompat:
if dc.startswith("coresight-"):
return dc[10:]
return None |
def content_hash(o):
"""
Makes a hash from a dictionary, list, tuple or set to any level, that contains
only other hashable types (including any lists, tuples, sets, and
dictionaries).
"""
if isinstance(o, (set, tuple, list)):
return tuple([content_hash(item) for item in o])
elif not isinstance(o, dict):
if isinstance(o, object) and hasattr(o, '__dict__'):
return content_hash(o.__dict__)
try:
return hash(o)
except:
return hash(str(o))
hashes = {}
for k, v in o.items():
hashes[k] = content_hash(v)
return hash(tuple(frozenset(sorted(hashes.items())))) |
def get_phenotype_curie(phenotype_uri):
"""Extract Phenotypes CURIE from the URI
"""
if '/MEDDRA/' in phenotype_uri:
phenotype_id = phenotype_uri.split("/MEDDRA/", 1)[1]
return 'MEDDRA:' + phenotype_id
if '/HP:' in phenotype_uri:
phenotype_id = phenotype_uri.split("/HP:", 1)[1]
return 'HP:' + phenotype_id
# if '/GO:' in phenotype_uri:
# phenotype_id = phenotype_uri.split("/GO:",1)[1]
# return 'GO:' + phenotype_id |
def area_is_at_diagonal(i, j):
"""
Find whether extracted area is from the diagonal.
Parameters:
i -- First interval index.
j -- Second interval index.
Output:
Whether an area is at the diagonal.
"""
return(i == j) |
def transcribe(seq: str) -> str:
"""
transcribes DNA to RNA by generating
the complement sequence with T -> U replacement
"""
translate = {"A": "U", "C": "G", "T": "A", "G": "C"}
return "".join([translate[i] for i in seq]) |
def parse(input_string):
"""
Parses the Smart Game Format string
"""
# check for null string
if len(input_string) == 0:
raise ValueError('Length of input string should be greater than zero!')
# check if tree exists
if input_string[0] != '(' and input_string[-1] != ')':
raise ValueError('No tree exists.')
input_string = input_string.strip('(').strip(')')
if input_string[0] != ';':
raise ValueError('A tree should have a node inside it!')
# input_string = input_string.replace
return input_string |
def get_editdistance(a, b):
""" Simple unweighted Levenshtein distance """
r0 = range(0, len(b) + 1)
r1 = [0] * (len(b) + 1)
for i in range(0, len(a)):
r1[0] = i + 1
for j in range(0, len(b)):
c = 0 if a[i] is b[j] else 1
r1[j + 1] = min(r1[j] + 1, r0[j + 1] + 1, r0[j] + c)
r0 = r1[:]
return r1[len(b)] |
def peak2range(peak):
"""
Transform a peak from string to Python range object.
"""
try:
chrom, r = peak.split(':')
r = r.split('-')
rang = range(int(r[0]), int(r[1])+1)
return chrom, rang
except ValueError:
print(f'Cannot process peak {peak}. Please ensure ',
'that all peaks have the correct format.\nRun ',
'ensure_peak_format() on all anndata objects.') |
def url_creator(date):
"""
Input is list i.e. enter value as 13 11 17 to get as [13,11,17] in url_creator input
"""
(dd, mm, yy) = date
return 'http://www.bseindia.com/download/BhavCopy/Equity/EQ_ISINCODE_{0:}{1:02d}{2:}.zip'.format(dd,
mm, yy) |
def is_heartbeat_enabled(outgoing_heartbeat: int, incoming_heartbeat: int):
"""
Determine if STOMP heartbeat is enabled or not. Per the specification, it'll only be enabled
if a both estabilished times is greater than zero.
More on: https://stomp.github.io/stomp-specification-1.1.html#Heart-beating
"""
return outgoing_heartbeat > 0 and incoming_heartbeat > 0 |
def _flipFrame(parentFrame, objFrame):
"""
Translate a Cocoa frame to vanilla coordinates.
"""
(pL, pB), (pW, pH) = parentFrame
(oL, oB), (oW, oH) = objFrame
oT = pH - oB - oH
return oL, oT, oW, oH |
def get_block_index(blocks, key):
"""Retrieves the index at which a given block is, or -1 if not found."""
keys = [i for i in range(len(blocks)) if blocks[i]['key'] == key]
return keys[0] if keys else -1 |
def divide_lista_objs(objList, resList, numThreads):
"""Divide a lista de objetos em numCPU sublistas"""
numThreads = int(numThreads)
numObj = len(objList)
numSubSet = 1
if int(numThreads) > int(numObj):
numSubSet = numObj
else:
numSubSet = numThreads
markerList = []
for i in range(numSubSet):
markerList.append(0)
i = numObj
while i > 0:
for j in range(numSubSet):
if i > 0:
markerList[j] = markerList[j] + 1
i = i - 1
for i in range(numSubSet):
objList.insert(i, [])
for j in range(markerList.pop(0)):
objList[i].append(objList.pop(i + 1))
if numThreads <= numObj:
resList.insert(0, [])
remaider = len(resList[1:])
for j in range(remaider):
resList[0].append(resList.pop(1))
while len(resList) < len(objList):
resList.insert(0, [])
else:
lenResList = len(resList)
for i in range(lenResList):
resList.insert(i, [])
resList[i].append(resList.pop(i + 1))
while (len(objList) + len(resList)) > numThreads:
resList[0].append(resList[1].pop())
resList.pop(1)
for i in range(len(objList)):
resList.insert(0, [])
while len(resList) > len(objList):
objList.append([])
return (objList, resList) |
def _create_permissions_string(permission_list):
"""
Creates an list of individual facebook permissions
and makes a comma seperated string of permissions.
"""
return ','.join(permission_list).replace(" ", "") |
def containsUnwanted(str, set):
""" Check whether str contains ANY of the items not in set. """
return 0 in [c in set for c in str] |
def change1(x, y):
"""Mimic ByRef by rebinding after the fact"""
x = x + 1
y = y + 1
return y |
def toint(x):
"""Convert to integer without rasing exceptions"""
from numpy import rint
x = rint(x)
try: x = int(x)
except: x = 0
return x |
def _read_bytes_as_string(keytab: str, index: int, bytes_to_read: int) -> str:
""" Given hex-encoded keytab data, the index we're starting from, the number of
bytes in the keytab we want to read, and the keytab format version, this function
will read and interpret the bytes requested starting at the index.
The resultant hex is them decoded to a UTF-8 string and returned.
A hex number is 4 bits, so our "bytes to read" value gets doubled to determine
actual offsets in our hex string.
"""
offset = bytes_to_read * 2
end_index = index + offset
if end_index > len(keytab):
return '0' # this is the same as get_bytes_number above. when we can't read, return 0
return bytearray.fromhex(keytab[index:end_index]).decode('UTF-8') |
def kullanicidanDegerAta(_kapasite, _agirliklar, _valueInt):
"""
Kullanicidan programin calismasi icin gerekli olan degerleri alan ve aldigi degerleri donduren fonksiyon
"""
sirtCantasiKapasite = _kapasite
esyaAgirlik = _agirliklar
esyaValue = _valueInt
return sirtCantasiKapasite, esyaAgirlik, esyaValue |
def hms(d, delim=':', output_string=False):
"""Convert hours, minutes, seconds to decimal degrees, and back.
EXAMPLES:
hms('15:15:32.8')
hms([7, 49])
hms(18.235097)
hms(18.235097, output_string=True)
Also works for negative values.
SEE ALSO: :func:`dms`
"""
# 2008-12-22 00:40 IJC: Created
# 2009-02-16 14:07 IJC: Works with spaced or colon-ed delimiters
# 2015-03-19 21:29 IJMC: Copied from phot.py. Added output_string.
from numpy import sign
if d.__class__==str or hasattr(d, '__iter__'): # must be HMS
if d.__class__==str:
d = d.split(delim)
if len(d)==1:
d = d[0].split(' ')
if (len(d)==1) and (d.find('h')>-1):
d.replace('h',delim)
d.replace('m',delim)
d.replace('s','')
d = d.split(delim)
s = sign(float(d[0]))
if s==0: s=1
degval = float(d[0])*15.0
if len(d)>=2:
degval = degval + s*float(d[1])/4.0
if len(d)==3:
degval = degval + s*float(d[2])/240.0
return degval
else: # must be decimal degrees
hour = int(d/15.0)
d = abs(d)
min = int((d-hour*15.0)*4.0)
sec = (d-hour*15.0-min/4.0)*240.0
ret = (hour, min, sec)
if output_string:
ret = '%02i:%02i:%04.2f' % ret
return ret |
def print_adjacent_bases(bases, sequence):
"""
Print a summary of the bases preceding removed adapter sequences.
Print a warning if one of the bases is overrepresented and there are
at least 20 preceding bases available.
Return whether a warning was printed.
"""
total = sum(bases.values())
if total == 0:
return False
print('Bases preceding removed adapters:')
warnbase = None
for base in ['A', 'C', 'G', 'T', '']:
b = base if base != '' else 'none/other'
fraction = 1.0 * bases[base] / total
print(' {0}: {1:.1%}'.format(b, fraction))
if fraction > 0.8 and b != '':
warnbase = b
if total >= 20 and warnbase is not None:
print('WARNING:')
print(' The adapter is preceded by "{}" extremely often.'.format(warnbase))
print(' The provided adapter sequence may be incomplete.')
print(' To fix the problem, add "{}" to the beginning of the adapter sequence.'.format(warnbase))
print()
return True
print()
return False |
def number_of_tests_for_comparison(ns):
"""
Get the number of t-tests required to compare `ns` number of samples.
Parameter
---------
> ns: number of samples
Returns
-------
The number of t-tests required to compare those many samples
"""
return (ns * (ns -1)) / 2 |
def list_slit(lst, size=None):
"""
Function to split range into n sub-ranges, or into m sub-ranges of the size <= size
:param lst: range to split
:param size: size of a sub-range
"""
if not size:
size = len(lst)
return [lst[i:i + size] for i in range(0, len(lst), size)] |
def to_ic50(x, max_ic50=50000.0):
"""
Convert regression targets in the range [0.0, 1.0] to ic50s in the range
[0, 50000.0].
Parameters
----------
x : numpy.array of float
Returns
-------
numpy.array of float
"""
return max_ic50 ** (1.0-x) |
def SecureBytesEqual( a, b ):
"""Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks."""
# Consistent timing matters more here than data type flexibility
# We do NOT want to support py2's str type because iterating over them
# (below) produces different results.
if type( a ) != bytes or type( b ) != bytes:
raise TypeError( "inputs must be bytes instances" )
# We assume the length of the expected digest is public knowledge,
# thus this early return isn't leaking anything an attacker wouldn't
# already know
if len( a ) != len( b ):
return False
# We assume that integers in the bytes range are all cached,
# thus timing shouldn't vary much due to integer object creation
result = 0
for x, y in zip( a, b ):
result |= x ^ y
return result == 0 |
def convert_lon(lon):
"""
Return 0 <= *lon* < 360 converted to -180 <= *lon < 180.
"""
return lon - 360 if lon > 180 else lon |
def check_piece_collision(board, piece, i, j):
"""
Checks if a piece will collide with the board
"""
for x in range(len(piece)):
for y in range(len(piece[0])):
if piece[x][y] and board[i+x][j+y]:
return True
return False |
def get_results(process_data):
""" return results from process data
"""
results = []
for process in process_data:
results.extend(process['result'])
return results |
def start_section(title, html):
"""Layout for start of a new section, if html this will make a new table."""
if html:
return ''.join(['<h1>', title, '</h1>\n<table border="0" cellpadding="3" width="600">'])
else:
return title + '\n\n' |
def mb_to_human(num):
"""Translates float number of bytes into human readable strings."""
suffixes = ['M', 'G', 'T', 'P']
if num == 0:
return '0 B'
i = 0
while num >= 1024 and i < len(suffixes) - 1:
num /= 1024
i += 1
return "{:.2f} {}".format(num, suffixes[i]) |
def is_odd(num: int) -> bool:
"""Is num odd?
:param num: number to check.
:type num: int
:returns: True if num is odd.
:rtype: bool
:raises: ``TypeError`` if num is not an int.
"""
if not isinstance(num, int):
raise TypeError("{} is not an int".format(num))
return num % 2 == 1 |
def filter_invalidable_urls(workflow, urls):
""" Returns a list of url that can be invalidated and warns about illegal urls"""
to_invalidate = []
for url in urls:
resource = workflow.find_resource(url)
if not resource:
msg = "Ignoring {} : this resource does not belong to the workflow.".format(url)
print(msg)
elif not workflow.resource_available(url):
msg = "Ignoring {} : this resource has not been produced yet.".format(url)
print(msg)
elif resource.is_primary():
msg = "Ignoring {} : primary resources can't be invalidated.".format(url)
print(msg)
else:
to_invalidate.append(url)
return to_invalidate |
def add_more_place(context: list, new: list):
"""Append places to context
Args:
context: total nearby palce data
new: new data by next page tokens
Returns:
context: total nearby place that append
new to is's with out duplicated place
"""
place_exist = [place['place_id'] for place in context]
for place in new:
# Check that place is exists or not
if place['place_id'] in place_exist:
continue
context.append(place)
return context |
def quote(text):
"""Return a string surrounded by quotes"""
# This may be trivial, but it can make code more self-documenting
return '"{}"'.format(text) |
def _is_valid_debug(debug):
"""
"""
return isinstance(debug, bool) |
def _toIPv4AddrInteger(strIPv4Addr):
"""Convert the IPv4 address string to the IPv4 address integer.
:param str strIPv4Addr: IPv4 address string.
:return: IPv4 address integer.
:rtype: int
Example::
strIPv4Addr Return
-------------------------
'192.0.2.1' -> 3221225985
Test:
>>> print(_toIPv4AddrInteger('192.0.2.1'))
3221225985
"""
listIPv4Octet = strIPv4Addr.split('.')
return (
(int(listIPv4Octet[0]) << 24) +
(int(listIPv4Octet[1]) << 16) +
(int(listIPv4Octet[2]) << 8) +
int(listIPv4Octet[3])) |
def normalize(grid):
"""
Given a grid of unnormalized probabilities, computes the
correspond normalized version of that grid.
"""
total = 0.0
for row in grid:
for cell in row:
total += cell
for i,row in enumerate(grid):
for j,cell in enumerate(row):
grid[i][j] = float(cell) / total
return grid |
def organize_alignments(design: list):
"""
Organize alignments
"""
samples = []
for sample in design:
samples.append(f'{sample[0]},{sample[1]}_rep{sample[2]},{sample[3]},{sample[4]}')
return '\n'.join(samples) |
def split(arr, splits=2):
"""Split given array into `splits` smaller, similar sized arrays"""
if len(arr) < splits:
raise ValueError("Can't find more splits than array has elements")
new_size = int(len(arr) / splits)
return ([arr[n * new_size:(n + 1) * new_size] for n in range(splits - 1)]
+ [arr[(splits - 1) * new_size:]]) |
def _to_id(name):
"""Convert a given name to a valid html id, replacing
dots with hyphens."""
return name.replace('.', '-') |
def compare_predictions_and_gold_labels(predictions_set, golds_set):
""" Calculate True Positives (tp), False Positives (fp), and False Negatives (fn) """
tp, fp, fn = 0, 0, 0
for pred in predictions_set:
if pred in golds_set:
tp += 1
else:
fp += 1
for gold in golds_set:
if gold not in predictions_set:
fn += 1
return tp, fp, fn |
def camelcase(var): # someVariable
"""
Camel case convention. Include an uppercase at every first element except the first.
:param var: Variable to transform
:type var: :py:class:`list`
:returns: **transformed**: (:py:class:`str`) - Transformed input in ``camelCase`` convention.
"""
result = ""
for i, element in enumerate(var):
element = list(element)
if i > 0:
element[0] = element[0].upper()
result += "".join(element)
return result |
def unbool(element, true=object(), false=object()):
"""A hack to make True and 1 and False and 0 unique for ``uniq``."""
if element is True:
return true
elif element is False:
return false
return element |
def swip_swap(source, c1, c2):
"""
This function takes a string source and characters c1 and c2
and returns the string source with all occurrences of c1 and c2 swapped.
"""
result = ""
for i in source:
if i is c1:
result += c2
elif i is c2:
result += c1
else:
result += i
return result |
def stdDev(X):
"""Assumes that X is a list of numbers.
Returns the standard deviation of X"""
mean = float(sum(X))/len(X)
tot = 0.0
for x in X:
tot += (x - mean)**2
return (tot/len(X))**0.5 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.