content stringlengths 42 6.51k |
|---|
def get_matching(content, match):
""" filters out lines that don't include match """
if match != "":
lines = [line for line in content.split("\n") if match in line]
content = "\n".join(lines)
return content |
def ms_payload_2(payload):
""" Receives the input given by the user from create_payloadS.py """
return {
'1': "shellcode/pyinject",
'2': "shellcode/multipyinject",
'3': "set/reverse_shell",
'4': "set/reverse_shell",
'5': "set/reverse_shell",
'6': "shellcode/alphanum",
# '7': "7",
'8': "cmd/multi",
}.get(payload, "ERROR") |
def largestPermutation(k:int, arr:list):
"""
"""
sorted_arr = sorted(arr, reverse = True)
index_dict = {v:i for i, v in enumerate(arr)}
counter = 0
if k==0:
print(arr)
else:
for i,v in enumerate(arr):
largest_value = sorted_arr[i]
lg_idx = index_dict[largest_value] # index for largest value in unsorted arr
if (largest_value!=arr[i]) and (counter<k):
arr[lg_idx], arr[i] = arr[i], arr[lg_idx]
index_dict[v] = lg_idx
index_dict[largest_value] = i
counter +=1
return arr |
def get_dims(board) -> tuple:
"""Returns the (y, x) dimensions of a matrix"""
return len(board), len(board[0]) |
def searchRange(nums, target):
"""Find first and last position."""
def midpoint(x, y):
"""Find mid point."""
return x + (y - x) // 2
lo, hi = 0, len(nums)-1
_max = -1
_min = float('inf')
while lo <= hi:
mid = midpoint(lo, hi)
if nums[mid] == target:
_max = max(_max, mid)
_min = min(_min, mid)
if nums[mid] <= target:
lo = mid+1
else:
hi = mid-1
if _max == -1:
return [-1, _max]
lo, hi = 0, _min
while lo <= hi:
mid = midpoint(lo, hi)
if nums[mid] == target:
_min = min(_min, mid)
if nums[mid] >= target:
hi = mid-1
else:
lo = mid+1
return [_min, _max] |
def dice_coefficient2(a, b, case_insens=True):
"""
:type a: str
:type b: str
:type case_insens: bool
duplicate bigrams in a word should be counted distinctly
(per discussion), otherwise 'AA' and 'AAAA' would have a
dice coefficient of 1...
https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Dice%27s_coefficient#Python
This implementation is reverse. 1 means not hit, 0 means best match
"""
if case_insens:
a = a.lower()
b = b.lower()
if not len(a) or not len(b):
return 1.0
# quick case for true duplicates
if a == b:
return 0.0
# if a != b, and a or b are single chars, then they can't possibly match
if len(a) == 1 or len(b) == 1:
return 1.0
# use python list comprehension, preferred over list.append()
a_bigram_list = [a[i:i + 2] for i in range(len(a) - 1)]
b_bigram_list = [b[i:i + 2] for i in range(len(b) - 1)]
a_bigram_list.sort()
b_bigram_list.sort()
# assignments to save function calls
lena = len(a_bigram_list)
lenb = len(b_bigram_list)
# initialize match counters
matches = i = j = 0
while i < lena and j < lenb:
if a_bigram_list[i] == b_bigram_list[j]:
matches += 2
i += 1
j += 1
elif a_bigram_list[i] < b_bigram_list[j]:
i += 1
else:
j += 1
score = float(matches) / float(lena + lenb)
score = 1 - score
return round(score, 6) |
def is_palindrome_permutation(value):
"""
195. Palindrome Permutation: Given a string, write a function to check if it is a permutation of a palindrome.
"""
length = len(value)
records = []
for i in range(ord('a'), ord('z') + 1):
records.append(0)
# Flip and unflip the values from 0-->1 & 1-->0
for i in value:
pos = ord(i) - 97
if records[pos] == 1:
records[pos] = 0
elif records[pos] == 0:
records[pos] = 1
if len(value) % 2 == 0:
return sum(records) == 0
else:
return sum(records) == 1 |
def is_odd(n):
"""
check if number n is odd
:param n:
:return: if is odd return True, otherwise return False
"""
if n % 2:
return False # odd
else:
return True |
def init_result_info(doi, path, defaults=None):
"""Initialise result info."""
info = defaults or {}
info['analysis_complete'] = True
info['analysis_doi'] = doi
info['analysis_path'] = path
return info |
def _split_dict(in_dict, key_names, nullable_fields=frozenset()):
"""
Split a dict into two dicts. Keys in key_names go into the new dict if
their value is present and not None, allowing for None values if the key
name is present in the nullable_fields set.
return (updated original dict, new dict)
"""
new_dict = {}
for key_name in key_names:
if key_name in in_dict:
val = in_dict.pop(key_name, None)
if val is not None or (val is None and key_name in nullable_fields):
new_dict[key_name] = val
return in_dict, new_dict |
def sumOfAbs(array, cutoff):
"""Return sum of absolute values above a cutoff.
:param array:
:type array: :class:`collections.abc.Iterable`
:param cutoff:
:type cutoff: :py:class:`float`
:return: value
:rtype: :py:class:`float`
"""
return sum(abs(value) for value in array if abs(value) > cutoff) |
def add_fmt(nfields, doc):
"""
Function used to define the table's format depending on config.bench
file.
"""
if doc:
tmp = '|lrcr|r|rrrr|rrr|rr|'
for i in range(0, nfields):
tmp += 'rrr|rrrr|'
return tmp
else:
tmp = '|c|c|'
for i in range(0, nfields):
tmp += 'c|'
return tmp |
def basename(path):
"""
Gets the last component of a path.
Arguments:
path -- File path.
"""
return path.replace("\\", "/").split("/")[-1] |
def get_nestted_dict_value(nested_dict, path_list):
"""
FEtch the value from a dictionary provided the path as list of strings
and indecies
Parameters
----------
nested_dict : dict
path_list : list
list of list of strings and/or indecies
Returns
-------
value : any type
"""
value = nested_dict
for k in path_list:
value = value[k]
return value |
def _lcs(string, sub):
"""
Computes longest common subsequence (LCS) for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the LCS between the two strings
"""
if len(string) < len(sub):
sub, string = string, sub
str_len, sub_len = len(string), len(sub)
lengths = [[0 for _ in range(sub_len + 1)] for _ in range(str_len + 1)]
for j in range(1, sub_len + 1):
for i in range(1, str_len + 1):
if string[i - 1] == sub[j - 1]:
lengths[i][j] = lengths[i - 1][j - 1] + 1
else:
lengths[i][j] = max(lengths[i - 1][j], lengths[i][j - 1])
return lengths[str_len][sub_len] |
def first_line(text):
"""Return only the first line of a potential multi-line text"""
return text.split("\n")[0].split("\r")[0] |
def rec_replace(l, d):
""" Recursively replace list l with values of dictionary d
ARGUMENTS:
l - list or list of lists
d - dictionary where if an element in l matches d['key'], it is replaced by the value
OUTPUT:
l - list with elements updated
"""
for i in range(len(l)):
if isinstance(l[i], list):
rec_replace(l[i], d)
else:
l[i] = d.get(l[i], l[i])
return l |
def clean_data(lst):
"""
Removes artificial NaNs and noisy cells -
Noisy cell contains only special chars.
"""
empty = ['nan', 'NaN']
res = []
for cell in lst:
if str(cell) in empty:
cell = ""
else:
alphaNum = ''.join(e for e in str(cell) if e.isalnum())
if alphaNum == "":
cell = ""
res = res + [cell]
return res |
def to_usd(my_price):
"""
converts a numeric value to usd-formatted string, for printing and display purposes
"""
return "${0:,.2f}".format(my_price) |
def fine_dining_validation(fine_dining):
""" Decide if the cuisine input is valid.
Parameters:
(str): A user's input to the cuisine factor.
Return:
(str): A single valid string, such as "1", "0" or "-5" and so on.
"""
while fine_dining != "5" and fine_dining != "4" and fine_dining != "3" and fine_dining != "2" and fine_dining != "1" and fine_dining != "0" and fine_dining != "-1" and fine_dining != "-2" and fine_dining != "-3" and fine_dining != "-4" and fine_dining != "-5" :
print("\nI'm sorry, but " + fine_dining + " is not a valid choice. Please try again.")
fine_dining = input("\nHow much do you like fine dining? (-5 to 5)"
+ "\n> ")
return fine_dining |
def get_nested_expression(shifts):
"""Returns a string representing the addition of all the input bitvectors.
Args:
shifts: An integer, the number of nested shift operations.
"""
nested_expressions = []
for i in range(shifts):
rhs = "x_0" if i == 0 else nested_expressions[i - 1]
expression = ["(bvshl", f"x_{i + 1}", rhs + ")"]
nested_expressions.append(" ".join(expression))
return nested_expressions[-1] |
def print_hi(name):
"""
:param name: the to say hi
:return: None
"""
# Use a breakpoint in the code line below to debug your script.
return f"Hi, {name}" |
def sense(location, states, moves):
"""
Sense the environment.
:param location: current location
:param states: possible states
:param moves: possible moves
:return: sensor output
"""
ideal = [1] * len(moves)
for index, move in enumerate(moves):
if move(location) in states:
ideal[index] = 0
return ideal |
def callback(indata, outdata, frames, time, status):
"""
This function obtains audio data from the input channels.
"""
if status:
print(status)
return indata |
def set_dict_indices(my_array):
"""Creates a dictionary based on values in my_array, and links each of them to an index.
Parameters
----------
my_array:
An array (e.g. [a,b,c])
Returns
-------
my_dict:
A dictionary (e.g. {a:0, b:1, c:2})
"""
my_dict = {}
i = 0
for value in my_array:
my_dict[value] = i
i += 1
return my_dict |
def format_error(msg, row=None, col=None, line=None):
"""
Format the error for human consumption.
"""
if row is None or col is None or line is None:
return 'error: {0}'.format(msg)
else:
return 'error: {0} at column {1} on line {2}:\n{3}{4}\n{5}^'.format(msg,
col, row, ' ' * 2, line, ' ' * (col + 1)) |
def is_vlan_bitmap_empty(bitmap):
"""check VLAN bitmap empty"""
if not bitmap or len(bitmap) == 0:
return True
for bit in bitmap:
if bit != '0':
return False
return True |
def lcm(x, y):
"""This function takes two
integers and returns the L.C.M."""
# choose the greater number
if x > y:
greater = x
else:
greater = y
while(True):
if((greater % x == 0) and (greater % y == 0)):
lcm = greater
break
greater += 1
return lcm |
def MD5_f1(b, c, d):
""" First ternary bitwise operation."""
return ((b & c) | ((~b) & d)) & 0xFFFFFFFF |
def strip_space(x):
""" Strip extra spaces """
return " ".join([s.strip() for s in x.split()]) |
def _standardize_county_name(zipcounty):
"""
Standardize county name to match with our 'San Francisco' like formatting.
Takes a zipcounty dict and updates 'countyName' key if exists.
"""
if 'countyName' in zipcounty.keys():
countyname = zipcounty['countyName'].lower()
county_list = [word[0].upper() + word[1:] for word in countyname.split()]
zipcounty['countyName'] = ' '.join(county_list)
return zipcounty |
def _write(fp, lines):
"""
Write a collection of lines to given file.
:param str fp: path to file to write
:param Iterable[str] lines: collection of lines to write
:return str: path to file written
"""
with open(fp, 'w') as f:
for l in lines:
f.write(l)
return fp |
def floor(x):
"""Implementation of `floor`."""
return x.__floor__() |
def get_H_O_index_list(atoms):
"""Returns two lists with the indices of hydrogen and oxygen atoms.
"""
H_list = O_list = []
for index, atom in enumerate(atoms):
if atom[0] == 'H':
H_list.append(index)
if atom[0] == 'O':
O_list.append(index)
return H_list, O_list |
def normalize_text(text, lower=True):
"""
Normalizes a string.
The string is lowercased and all non-alphanumeric characters are removed.
>>> normalize_text("already normalized")
'already normalized'
>>> normalize_text("This is a fancy title / with subtitle ")
'this is a fancy title with subtitle'
>>> normalize_text("#@$~(@ $*This has fancy \\n symbols in it \\n")
'this has fancy symbols in it'
>>> normalize_text("Oh no a ton of special symbols: $*#@(@()!")
'oh no a ton of special symbols'
>>> normalize_text("A (2009) +B (2008)")
'a 2009 b 2008'
>>> normalize_text("1238912839")
'1238912839'
>>> normalize_text("#$@(*$(@#$*(")
''
>>> normalize_text("Now$ this$ =is= a $*#(ing crazy string !!@)# check")
'now this is a ing crazy string check'
>>> normalize_text("Also commata, and other punctuation... is not alpha-numeric")
'also commata and other punctuation is not alphanumeric'
>>> normalize_text(("This goes over\\n" "Two Lines"))
'this goes over two lines'
>>> normalize_text('')
''
"""
if lower:
text = text.lower()
return ' '.join(filter(None, (''.join(c for c in w if c.isalnum())
for w in text.split()))) |
def try_get_value(obj, name, default):
"""
Try to get a value that may not exist.
If `obj.name` doesn't have a value, `default` is returned.
"""
try:
return getattr(obj, name)
except LookupError:
return default |
def _get_query_parameters(module_params):
"""Builds query parameter.
:return: dict
:example: {"$filter": Name eq 'template name'}
"""
system_query_param = module_params.get("system_query_options")
query_param = {}
if system_query_param:
query_param = dict([("$" + k, v) for k, v in system_query_param.items() if v is not None])
return query_param |
def get_top_k_from_counts(n, counts):
"""
Given a map of counts mapping from a key to its frequency, returns the top k keys (based on frequency) after
normalizing the frequencies by the total.
:param n: The number of keys to return
:param counts: A map of counts mapping from a key to its frequency.
:return: A map from every key to its normalized frequency
"""
total = sum(counts.values())
sorted_counts = sorted([(k, v / total) for (k, v) in counts.items() if k != 'total'], key=lambda x: x[1],
reverse=True)
return sorted_counts[:n] |
def calculate_recursive_fuel(fuel_mass):
"""Calculate the recursive fuel needed to launch a mass of fuel."""
new_fuel_mass = fuel_mass // 3 - 2
if new_fuel_mass <= 0:
return 0
return new_fuel_mass + calculate_recursive_fuel(new_fuel_mass) |
def serialize_classifier_output(analysis, type):
"""."""
return {
'id': None,
'type': type,
'attributes': {
'url': analysis.get('url', None)
}
} |
def strip_str_arr(str_arr):
"""
strips a str_arr, remove \n and spaces
"""
res = []
for string in str_arr:
temp = string.rstrip().strip()
if not temp:
continue
res.append(temp)
return res |
def params(css, encoding, use_bom=False, expect_error=False, **kwargs):
"""Nicer syntax to make a tuple."""
return css, encoding, use_bom, expect_error, kwargs |
def start_quote(text):
"""Check for text starting quote"""
return text.startswith("'") or text.startswith('"') |
def get_name(header, splitchar = "_", items = 2):
"""use own function vs. import from match_contigs_to_probes - we don't want lowercase"""
if splitchar:
return "_".join(header.split(splitchar)[:items]).lstrip('>')
else:
return header.lstrip('>') |
def tag2ts(ts_tag_sequence):
"""
transform ts tag sequence to targeted sentiment
:param ts_tag_sequence: tag sequence for ts task
:return:
"""
n_tags = len(ts_tag_sequence)
ts_sequence, sentiments = [], []
beg, end = -1, -1
for i in range(n_tags):
ts_tag = ts_tag_sequence[i]
# current position and sentiment
eles = ts_tag.split('-')
if len(eles) == 2:
pos, sentiment = eles
else:
pos, sentiment = 'O', 'O'
if sentiment != 'O':
# current word is a subjective word
sentiments.append(sentiment)
if pos == 'S':
# singleton
ts_sequence.append((i, i, sentiments[0]))
sentiments = []
elif pos == 'B':
beg = i
elif pos == 'E':
end = i
# schema1: only the consistent sentiment tags are accepted
# that is, all of the sentiment tags are the same
if end > beg > -1 and len(set(sentiments)) == 1:
ts_sequence.append((beg, end, sentiment))
sentiments = []
beg, end = -1, -1
return ts_sequence |
def find_first(dictionary, condition):
""" utility for finding the first occurrence of a passing condition for a key and value pair in a dict """
for key, value in dictionary.items():
if condition(key, value):
return key, value
return None |
def dicts_are_consistent(d1: dict, d2: dict) -> bool:
"""
checks if all items whose keys are in (d1 and d2) are equal.
returns bool.
"""
return all(d1[k] == d2[k] for k in set(d1).intersection(set(d2))) |
def build_class(names):
""" Format ZLabels class file.
>>> names = ['Label.AColleague', 'Label.ZipPostalCode']
>>> output = build_class(names)
>>> print output
@isTest
private class ZLabels {
private static List<String> labels = new List<String> {
Label.AColleague,
Label.ZipPostalCode
};
}
<BLANKLINE>
"""
header = '@isTest\nprivate class ZLabels {\n private static List<String> labels = new List<String> {\n'
footer = ' };\n}\n'
content = ''.join(' ' + n + ',' + '\n' for n in names)
content = content[:-2] + '\n'
return header + content + footer |
def fibonacci(n):
"""Return the nth fibonacci number.
>>> fibonacci(11)
89
>>> fibonacci(5)
5
>>> fibonacci(0)
0
>>> fibonacci(1)
1
"""
"*** YOUR CODE HERE ***"
if n is 1:
return 1
elif n is 0:
return 0
else:
return fibonacci(n - 1) + fibonacci(n - 2) |
def grid_garden_hatch_time(grid_garden):
"""
Returns the minimum time in seconds, for a grid garden to
to have all its larvae hatched into butterflies.
Parameters:
grid_garden (list): A 2d list
Returns:
seconds (int): Time in seconds
Convention: '0' denotes empty space, '1' a larva and '2' a butterfly.\n
Note: Larvae only hatch if they come into contact with a butterfly.
No butterfly, no hatching! Thats the motto of this grid garden.
>>> grid_garden_hatch_time([]) # empty list
-1
>>> grid_garden_hatch_time([[]]) # empty 2d list
-1
>>> grid_garden_hatch_time([[], [], []]) # empty garden
-1
>>> grid_garden_hatch_time([[2, 1, 1], [1, 1, 0], [0, 1, 1]])
4
>>> grid_garden_hatch_time([[1, 1, 1], [1, 1, 0], [0, 1, 1]]) # no butterfly
-1
>>> grid_garden_hatch_time([[0, 1, 0], [1, 2, 0], [0, 0, 1]])
2
>>> grid_garden_hatch_time([[0, 0, 0, 1], [1, 1, 0, 0], [0, 2, 0, 1], [1, 0, 1, 0]])
4
"""
from copy import deepcopy
butterflies = []
larvae = []
# find the locations of each larva and butterfly
for i in range(len(grid_garden)):
for j in range(len(grid_garden[i])):
if grid_garden[i][j] == 2:
butterflies.append((i, j))
elif grid_garden[i][j] == 1:
larvae.append((i, j))
# If there are no butterflies in the garden, it won't hatch new ones.
if len(butterflies) == 0:
return -1
# build a garden similar to this one except this one has all its
# larvae hatched into butterflies. This will be used to determine
# when to stop looking for new butterflies.
end_result = deepcopy(grid_garden)
for larvai, larvaj in larvae:
end_result[larvai][larvaj] = 2
# avoid looking at the same block over and over again
nodes = set(butterflies)
seconds = 0
while grid_garden != end_result:
# increment timer for each loop.
seconds += 1
# initially starting at the known butterfly locations,
# search all the four neighboring blocks, located vertically
# and horizontally but not diagonally.
for x, y in nodes.copy():
if x-1 >= 0:
nodes.add((x-1, y))
if grid_garden[x-1][y] == 1:
grid_garden[x-1][y] = 2
if y-1 >= 0:
nodes.add((x, y-1))
if grid_garden[x][y-1] == 1:
grid_garden[x][y-1] = 2
if x+1 < len(grid_garden):
nodes.add((x+1, y))
if grid_garden[x+1][y] == 1:
grid_garden[x+1][y] = 2
if y+1 < len(grid_garden[x]):
nodes.add((x, y+1))
if grid_garden[x][y+1] == 1:
grid_garden[x][y+1] = 2
# discard a block after searching it.
nodes.discard((x, y))
return seconds |
def process_properties(content, sep=': ', comment_char='#'):
"""
Read the file passed as parameter as a properties file.
"""
props = {}
for line in content.split("\n"):
sline = line.strip()
if sline and not sline.startswith(comment_char):
key_value = sline.split(sep)
key = key_value[0].strip()
value = sep.join(key_value[1:]).strip().strip('"')
props[key] = value
return props |
def strip_biosphere_exc_locations(db):
"""Biosphere flows don't have locations - if any are included they can confuse linking"""
for ds in db:
for exc in ds.get('exchanges', []):
if exc.get('type') == 'biosphere' and 'location' in exc:
del exc['location']
return db |
def testif(b, testname, msgOK="", msgFailed=""):
"""Function used for testing.
param b: boolean, normally a tested condition: true if test passed, false otherwise
param testname: the test name
param msgOK: string to be printed if param b==True ( test condition true)
param msgFailed: string to be printed if param b==False
returns b
"""
if b:
print("Success: " + testname + "; " + msgOK)
else:
print("Failed: " + testname + "; " + msgFailed)
return b |
def a2idx(j, n=None):
"""Return integer after making positive and validating against n."""
if type(j) is not int:
jindex = getattr(j, '__index__', None)
if jindex is not None:
j = jindex()
else:
raise IndexError("Invalid index a[%r]" % (j,))
if n is not None:
if j < 0:
j += n
if not (j >= 0 and j < n):
raise IndexError("Index out of range: a[%s]" % (j,))
return int(j) |
def is_next_east_cell_empty(i, j, field):
"""
check if next to the right cell is empty
:param i:
:param j:
:param field:
:return: True if next right cell of the field is empty, False otherwise
"""
if j == len(field[0]) - 1:
if field[i][0] == '.':
return True
return False
if field[i][j + 1] == '.':
return True
return False |
def tuple_map(f, a, b):
"""Zip + Map for tuples
Args:
f (function): The function to apply
a (tuple): The first tuple
b (tuple): The second tuple
Returns:
[type]: [description]
"""
return tuple([f(x) for x in zip(a, b)]) |
def create_new_tarball_name(platform, program, version):
""" Converts the name of a platform as specified to the prepare_release
framework to an archive name according to BLAST release naming conventions.
Note: the platform names come from the prepare_release script conventions,
more information can be found in http://mini.ncbi.nih.gov/3oo
"""
retval = "ncbi-" + program + "-" + version
if program == "blast":
retval += "+"
if platform.startswith("Win"):
retval += "-x64-win64"
elif platform.startswith("Linux32"):
retval += "-ia32-linux"
elif platform.startswith("Linux64"):
retval += "-x64-linux"
elif platform.startswith("IntelMAC"):
retval += "-x64-macosx"
elif platform == "SunOSSparc":
retval += "-sparc64-solaris"
elif platform == "SunOSx86":
retval += "-x64-solaris"
else:
raise RuntimeError("Unknown platform: " + platform)
return retval |
def istask(x):
""" Is x a runnable task?
A task is a tuple with a callable first argument
Examples
--------
>>> inc = lambda x: x + 1
>>> istask((inc, 1))
True
>>> istask(1)
False
"""
return type(x) is tuple and x and callable(x[0]) |
def find_ori(dna: list, ori: str) -> int:
"""
A circular DNA, find the origin of replication
Given:
dna: a list of nucleotides ATCG (e.g.: ['A', 'T', 'C', 'C', 'G'])
ori: a string of nucleotides (e.g.: "CGA")
Return:
start index of dna where ori starts. (e.g.: 3)
(any of them)
"""
if len(ori) > len(dna):
print("ori longer than dna, no need to find.")
return -1
circular_dna = dna
for i in range(len(ori) - 1):
circular_dna.append(dna[i])
for i in range(len(circular_dna) - len(ori) + 1):
frag = circular_dna[i:i + len(ori)]
frag = ''.join(frag)
if frag == ori:
return i % len(dna)
return -1 |
def parse_share_url(share_url):
"""Return the group_id and share_token in a group's share url.
:param str share_url: the share url of a group
"""
*__, group_id, share_token = share_url.rstrip('/').split('/')
return group_id, share_token |
def polyarea(poly):
"""Returns the signed area of the given polygon.
The polygon is given as a list of ``(x, y)`` pairs.
Counter-clockwise polys have positive area, and vice-versa.
"""
area = 0.0
p = poly[:]
# close the polygon
if p[0] != p[-1]:
p.append(p[0])
for (x1, y1), (x2, y2) in zip(p, p[1:]):
area += x1*y2 - y1*x2
area /= 2.0
return area |
def evaluate_matches(predictions, gold_standard_dict):
"""
Given predictions set and the gold standard, evaluate Precision, Recall and F1-Score.
"""
# annotated as "Yes" and algorithm outputs "Match"
num_true_positives = 0
# annotated as "No" but algorithm outputs "Match"
num_false_positives = 0
# annotated as "Yes" and algorithm outputs "No Match"
num_false_negatives = 0
# annotated as "No" and algorithm outputs "No Match"
num_true_negatives = 0
for match_pair in predictions:
if gold_standard_dict.get(match_pair, None) == 1:
num_true_positives += 1
elif gold_standard_dict.get(match_pair, None) == 0:
num_false_positives += 1
not_found_matches = set(gold_standard_dict).difference(
set(predictions))
for id_pair in not_found_matches:
if gold_standard_dict.get(id_pair, None) == 1:
num_false_negatives += 1
elif gold_standard_dict.get(id_pair, None) == 0:
num_true_negatives += 1
if num_true_positives + num_false_positives:
precision_ = num_true_positives / (
num_true_positives + num_false_positives)
else:
precision_ = 0.0
if num_true_positives + num_false_negatives:
recall_ = num_true_positives / (
num_true_positives + num_false_negatives)
else:
recall_ = 0.0
if precision_+recall_ >0.0:
f1_score = "{0:.2f}".format(
round(2 * ((precision_ * recall_) / (precision_ + recall_)),
4) * 100)
else:
f1_score = 0.0
return recall_, precision_, f1_score |
def f_(x):
"""
Derivate of the function f(x) = x^3 - x - 2
Needed for Newton-Raphson.
"""
return 3*x**2 - 1 |
def get_approval_status(payload):
"""
Gets data from command received from Slack
"""
approve_action = next(a for a in payload["actions"] if a["name"] == "approve")
if approve_action is None:
raise Exception("Request must contain 'approve' action")
action_data = approve_action["value"].split("|")
return {
"approved": action_data[0].lower() == "true",
"buildUrl": action_data[1],
"jenkinsUrl": action_data[2],
"buildVersion": action_data[3]
} |
def eat_quoted(i, string):
"""
:param i: Index of the first quote mark
:param string:
:return: Index of the end of the closing quote mark
"""
assert string[i] == '"'
i += 1
while string[i] != '"':
if string[i:i+2] == r'\"':
i += 2
else:
i += 1
if i >= len(string):
raise Exception(f'Could not parse {string}.')
return i |
def filter_word(word, wordpattern):
"""Checks if a word fits the wordpattern.
Special character mapping is performed as follows:
"-" -> " "
"*" -> <wildcard>"""
if len(word)!=len(wordpattern):
return False
# Enumerate allows iteration over
for ind,char in enumerate(wordpattern):
# Perform type checking to support string and char array wordpattern
if isinstance(char,str):
char_comp_pat=char
else:
char_comp_pat=chr(char)
# Handle special chars in wordpattern
char_comp_word=word[ind]
if char_comp_pat=="-":
char_comp_pat=" "
if char_comp_pat!="*":
if char_comp_pat!=char_comp_word:
return False
return True |
def _parse_prop(search, proplist):
"""Extract property value from record using the given urn search filter."""
props = [i for i in proplist if all(item in i['urn'].items() for item in search.items())]
if len(props) > 0:
return props[0]['value'][list(props[0]['value'].keys())[0]] |
def merge(*dicts):
"""Merges N cloudformation definition objects together."""
result = {}
for d in dicts:
for k, v in d.items():
if isinstance(v, dict):
result[k] = {**result.get(k, {}), **v}
else:
result[k] = v
return result |
def annual_post_secondary_expenses(responses, derived):
""" Return the annual cost of the monthly cost of post secondary expense """
try:
return float(responses.get('annual_post_secondary_expenses', 0))
except ValueError:
return 0 |
def get_crop_center_and_size_from_bbox(bbox):
"""Return crop center and size from bbox quadruple
Note that the center and size are in the order of (x, y)
Args:
bbox:
Returns:
"""
ymin, xmin, ymax, xmax = bbox
crop_center = [int((xmin + xmax) / 2), int((ymin + ymax) / 2)]
crop_size = [xmax - xmin, ymax - ymin]
return crop_center, crop_size |
def _isCpuOnly(log):
"""check for CPU-Only mode"""
for l in log:
if "cpu" in l.lower():
return True
return False |
def shake_padding(used_bytes, align_bytes):
"""
The SHAKE padding function
"""
padlen = align_bytes - (used_bytes % align_bytes)
if padlen == 1:
return [0x9f]
elif padlen == 2:
return [0x1f, 0x80]
else:
return [0x1f] + ([0x00] * (padlen - 2)) + [0x80] |
def _should_create_policy_engine_core(policy_configuration):
"""Examine the policy_configuration and decide to start a riemann core
"""
return any(group.get('policies')
for group in policy_configuration['groups'].values()) |
def output_units(un=None):
"""Enable or disable the output of units when printing.
By default output of units is enabled. Do nothing if un is None.
When disabled (un is False) print of Magnitudes will produce only
numbers.
Return: True if output of units enabled, False otherwise.
>>> print(mg(2, 'day'))
2.0000 day
>>> output_units(False)
False
>>> print(mg(2, 'day').ounit('s'))
172800.0000
"""
global _prn_units
if un is not None:
_prn_units = un
return _prn_units |
def insertion_sort(inputArray):
"""input: array
output: sorted array
features: in-place, stable, adaptive, online
efficiency: O(n^2) (worst/avg cases), O(n) (best case)
space complexity: O(1)
method:
Iterate through the array.
If the previous value is greater than the current value,
move previous value to current index.
Continue backwards until current value is larger than the previous value,
and assign the value to the corresponding index."""
for index in range(len(inputArray)):
value = inputArray[index]
position = index
while position > 0 and inputArray[position-1] > value:
inputArray[position] = inputArray[position-1]
position = position - 1
inputArray[position] = value
return inputArray |
def round_float(value):
"""Rounds a float to the nearest integer value."""
return int(value + 0.5) |
def _older_than(number: int, unit: str) -> str:
"""
Returns a query term matching messages older than a time period.
Args:
number: The number of units of time of the period.
unit: The unit of time: "day", "month", or "year".
Returns:
The query string.
"""
return f'older_than:{number}{unit[0]}' |
def prefixed(strlist, prefix):
"""
Filter a list to values starting with the prefix string
:param strlist: a list of strings
:param prefix: str
:return: a subset of the original list to values only beginning with the prefix string
"""
return [g for g in strlist if str(g).startswith(prefix)] |
def _basis_bitstring(i, num_qubits):
"""Create vector corresponding to i-th basis vector of num_qubits system."""
return [int(char) for char in bin(i)[2:].zfill(num_qubits)] |
def join_list(lst, string=', '):
"""
:param lst: List to be joined
:param string: String that will be used to join the items in the list
:return: List after being converted into a string
"""
lst = str(string).join(str(x) for x in lst)
return lst |
def _pretty_annotation_val(val, cpool):
"""
a pretty display of a tag and data pair annotation value
"""
tag, data = val
if tag in 'BCDFIJSZs':
data = "%s#%i" % (tag, data)
elif tag == 'e':
data = "e#%i.#%i" % data
elif tag == 'c':
data = "c#%i" % data
elif tag == '@':
data = "@" + data.pretty_annotation()
elif tag == '[':
combine = list()
for val in data:
combine.append(_pretty_annotation_val(val, cpool))
data = "[%s]" % ", ".join(combine)
return data |
def cal_pivot(n_losses,network_block_num):
"""
Calculate the inserted layer for additional loss
"""
num_segments = n_losses + 1
num_block_per_segment = (network_block_num // num_segments) + 1
pivot_set = []
for i in range(num_segments - 1):
pivot_set.append(min(num_block_per_segment * (i + 1), network_block_num - 1))
return pivot_set |
def par(valores_acumulados):
"""
Regresa 1 si encuentra un par,
de lo contrario regresa 0
valores_acumulados es un arreglo con
valores acumulados de la mano
"""
for val in valores_acumulados:
if val == 2:
return 1
return 0 |
def get_fraction(file_name):
"""
return the fraction number encoded in the file name
:param file_name: file name with format .*_fraction[.mgf]
:return: fraction number
"""
lid = file_name.rfind('_')
assert lid != -1
rid = file_name.rfind(".")
if rid == -1:
rid = len(file_name)
return int(file_name[lid + 1: rid]) |
def vtune(scale, acc_rate):
"""
This is a vectorized version of the pymc3 tune function
Tunes the scaling parameter for the proposal distribution
according to the acceptance rate over the last tune_interval:
Rate Variance adaptation
---- -------------------
<0.001 x 0.1
<0.05 x 0.5
<0.2 x 0.9
>0.5 x 1.1
>0.75 x 2
>0.95 x 10
"""
scale_ = (acc_rate < 0.001)*scale*0.1 +\
((acc_rate >= 0.001) & (acc_rate < 0.05))*scale*.5 +\
((acc_rate >= 0.05) & (acc_rate < 0.24))*scale * 0.9 +\
(acc_rate > 0.95)*scale * 10.0 +\
((acc_rate <= 0.95) & (acc_rate > 0.75))*scale * 2.0 +\
((acc_rate <= 0.75) & (acc_rate > 0.5))*scale*1.1 +\
((acc_rate>=.24) & (acc_rate<=.5))*scale
return scale_ |
def daily_cost(lam=0.88, intercept=160):
"""
Return the expected daily cost for machine with lam and intercept
"""
# for a poisson distribution, E(X) = lam, Var(X) = lam,
# since E(X-E(X))^2 = E(X^2) - (EX)^2,
# then E(X^2) = Var(X) + (EX)^2
return intercept + 40*(lam + lam**2) |
def set_ctrl(ctrl=False, comp="unexp"):
"""
set_ctrl()
Sets the control value (only modifies if it is True).
Optional args:
- ctrl (bool): whether the run is a control
default: False
- comp (str) : comparison type
default: "unexp"
Returns:
- ctrl (bool): modified control value
"""
ori_with_U = "U" in comp and "ori" in comp
if comp in ["unexp", "DvU", "dir_unexp"] or ori_with_U:
ctrl = False
if comp == "all":
raise ValueError("Should not be used if comp is 'all'.")
return ctrl |
def common_letters_in_IDs_differing_with_one_letter(boxes):
"""Find common letters in two IDs, which are differing with one letter."""
for index, box in enumerate(boxes[:-1]):
for other_box in boxes[index + 1:]:
same_letters = [letter_a
for letter_a, letter_b in zip(box, other_box)
if letter_a == letter_b]
if len(box) - 1 == len(same_letters):
return ''.join(same_letters)
return None |
def _getvars(expression, user_dict):
"""Get the variables in `expression`."""
cexpr = compile(expression, '<string>', 'eval')
exprvars = [var for var in cexpr.co_names
if var not in ['None', 'False', 'True']]
reqvars = {}
for var in exprvars:
# Get the value
if var in user_dict:
val = user_dict[var]
else:
val = None
# Check the value.
if val is not None:
reqvars[var] = val
return reqvars |
def first(item):
"""
Return an empty dict or the first item in a list
:param item:
:return:
"""
if isinstance(item, list):
return {} if not item else item[0]
return {} |
def ade_fn2index(fn):
"""
Split an ADE filename to get the index
of the file in the lists of the index file.
"""
fn = fn.split(".")[0]
number = fn.split("_")[-1]
number = int(number) - 1
return number |
def quadratic_sum(n: int) -> int:
"""calculate the quadratic num from 1 ~ n"""
return sum(n ** 2 for n in range(1, n + 1)) |
def binarySearch(array, target):
""" Devuelve la posicion del elemento "target" si se encuentra en el array, caso contrario devuelve -1 """
left = 0
right = len(array) - 1
while left <= right:
mid = int(left + (right - left) / 2)
if array[mid] == target:
return mid
if array[mid] < target:
left = mid + 1
else:
right = mid - 1
return -1 |
def select_from_list(
master_list,
first=None,
last=None,
skip=[],
only=[],
loose=True,
):
"""
Select only part of a list.
"""
sorted_list = sorted(master_list,key=lambda s: s.lower())
sub_list = []
if first is not None:
before_first = True
else:
before_first = False
if last is not None:
after_last = False
else:
after_last = False
for element in sorted_list:
if first is not None:
if loose:
if element.lower() >= first.lower():
before_first = False
else:
if element.lower() == first.lower():
before_first = False
if last is not None:
if loose:
if element.lower() > last.lower():
after_last = True
if before_first:
continue
if after_last:
continue
if skip is not None:
if len(skip) > 0:
match = False
if loose:
for this_skip in skip:
if element.lower() == this_skip.lower():
match = True
else:
if element in skip:
match = True
if match:
continue
if only is not None:
if len(only) > 0:
match = False
if loose:
for this_only in only:
if element.lower() == this_only.lower():
match = True
else:
if element in only:
match = True
if not match:
continue
sub_list.append(element)
if last is not None:
if not loose:
if element.lower() == last.lower():
after_last = True
return(sub_list) |
def _extract_dict(input_dict, output_dict, input_keys):
"""
Recursively extract values from a defaults dictionary.
A defaults dictionary consists of:
- an optional "all" key
- zero or more other keys, each of whose values is a defaults dictionary
The goal is to add any matching values to an output dictionary, with more specific
matching values overriding less specific matching values. As such, given an input
dictionary and a list of keywords,
- Add all key/value pairs from the "all" dictionary (if present) to the output
dictionary.
- For each keyword in the list, if that keyword is in the dictionary, call this
function recursively on the value of that key, which is (see above) a dictionary.
- Don't check on whether a value already exists in the output dictionary, because
more-specific overrides less-specific (if you need a default for a specific value to
definitely override a more general default, nest that value as a keyword inside the
more general dictionary).
Parameters
----------
input_dict : dict
The dictionary to search
output_dict : dict
The dictionary to build from
input_keys : list
A list of keys to search for
Returns
-------
output_dict : dict
The edited output dictionary
"""
if "all" in input_dict:
for keyword in input_dict["all"].keys():
output_dict[keyword] = input_dict["all"][keyword]
for keyword in input_keys:
if keyword in input_dict:
output_dict = _extract_dict(input_dict[keyword], output_dict, input_keys)
return output_dict |
def scale(value):
"""Scale the light sensor values from 0-65535 (AnalogIn range)
to 0-50 (arbitrarily chosen to plot well with temperature)"""
return value / 65535 * 50 |
def adjust_learning_rate(initial_lr, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 10 epochs"""
lr = initial_lr * (0.1 ** (epoch // 10))
return lr |
def counting_sort(A, max_val):
"""
This sorting algorithm will only work with array length n consist of
elements from 0 to k for integer k < n.
"""
k = max_val + 1
count = [0]*k
result = [None]*len(A)
print("Array A = ", A)
# counting the number of i (1 < i < k) in A and store in count
for i in A:
count[i] += 1
print("Counting array (storing number of elements appear in A) = ", count)
# calculate the position of each element in the sorted array
for i in range(1, len(count)):
count[i] = count[i] + count[i-1]
print("Counting array (storing order of elements appear in A) = ", count)
# store the elements back in result using the position of elements stored in count
for i in range(len(A)-1, -1, -1):
result[count[A[i]]-1] = A[i]
count[A[i]] -= 1
return result |
def _create_item(target_columns, rows):
"""Creates the 'item' field for a deid or inspect request."""
table = {'headers': [], 'rows': []}
for _ in rows:
table['rows'].append({'values': []})
for col in target_columns:
table['headers'].append({'name': col['name']})
for i in range(len(rows)):
if col['name'] not in rows[i]:
raise Exception('Expected column "{}" not found in row: "{}"'.format(
col['name'], rows[i]))
table['rows'][i]['values'].append({col['type']: rows[i][col['name']]})
return {'table': table} |
def validate_ecl(field):
"""
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
"""
return field in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.