content stringlengths 42 6.51k |
|---|
def ra_to_sex(ra, delimiter=':'):
"""Convert ra in decimal degrees to sexigesimal"""
# Calculation from Decimal Degrees:
ra_hh = int(ra / 15)
ra_mm = int((ra / 15 - ra_hh) * 60)
ra_ss = int(((ra / 15 - ra_hh) * 60 - ra_mm) * 60)
ra_ff = int((((ra / 15 - ra_hh) * 60 - ra_mm) * 60 - ra_ss) * 100)
return ('%02d' % ra_hh + delimiter + '%02d' % ra_mm + delimiter + '%02d' % ra_ss + '.' + '%02d' % ra_ff) |
def solution(strs): # O(M * N)
"""
Write a function to find the longest common prefix string amongst an array of strings.
If there is no common prefix, return an empty string "".
>>> solution(['flower', 'flow', 'flight'])
'fl'
>>> solution(['dog', 'racecar', 'car'])
''
>>> solution(['amazing', 'amazingly', 'amazing'])
'amazing'
>>> solution([])
''
"""
output = '' # O(1)
length = len(strs) # O(1)
if length == 0: # O(1)
return output # O(1)
if length == 1: # O(1)
return strs[0] # O(1)
options = strs[0] # O(1)
for i in range(1, length): # O(M)
word = strs[i] # O(1)
for j, _ in enumerate(word): # O(N)
if j >= len(options): # O(1)
break # O(1)
char = options[j] # O(1)
if word[j] != char: # O(1)
break # O(1)
output += char # O(1)
options = output # O(1)
output = '' # O(1)
return options # O(1) |
def IsPosInt( x ):
"""Test if a number is an integer"""
try:
val = int(x)
return val > 0
except ValueError: return False |
def capitalize(line):
"""Capitalize the first char in a string
string.capitalize messes up deliberate upper cases in the strings so
here's a dedicated function.
Args:
line (str): The string to capitalize
Returns:
str: The capitalized string
"""
return line[0].upper() + line[1:len(line)] |
def prepend_text(a, b=None):
"""Prepends a to b if b exists, else just returns a"""
if not b:
return a
return u"{0}\n\n{1}".format(a, b) |
def escape_name(name):
"""Escape sensor and request names to be valid Python identifiers."""
return name.replace('.', '_').replace('-', '_') |
def name(primitive):
"""
Gets the __name__
of a `Primitive`.
"""
try:
return primitive.__name__()
except TypeError:
return primitive.__name__ |
def T9ToText(text):
"""
1) Take one element (They are all the same ex. 7777 = 7).
2) Take times the number was pressed (ex. 7777 = 4).
3) Translate it (ex. 7777: from (1) & (2) => translator[7][4] = 's')
"""
translator = {2:'abc', 3:'def', 4:'ghi', 5:'jkl',
6:'mno', 7:'pqrs', 8:'tuv', 9:'wxyz'}
return "".join([translator[int(i[0])][len(i) - 1] for i in text.split()]) |
def linear_search(arr, value):
"""
My Python implementation of linear search
Searches an array and returns either the index of the value (if found) or -1 (if not found)
Time complexity: O(n)
Space complexity: O(1)
"""
for i in range(len(arr)): # O(n)
if arr[i] == value:
return i
return -1 |
def url_replace(value, field_name, params=None):
"""
Give a field and a value and it's update the post parameter for the url accordly
"""
url = "?{}={}".format(field_name, value)
if params:
querystring = params.split("&")
filtered_querystring = filter(
lambda p: p.split("=")[0] != field_name, querystring
)
encoded_querystring = "&".join(filtered_querystring)
url = "{}&{}".format(url, encoded_querystring)
return url |
def name_to_pollination(name: str) -> str:
"""Add a pollination- in front of the name."""
if name.startswith('pollination.'):
return name.replace('pollination.', 'pollination-')
elif name.replace('_', '-').startswith('pollination-'):
return name
else:
return f'pollination-{name}' |
def func(x, a, b, c):
"""
Second degree polynomial function of the form
f(x) = ax^2 + bx + c
:param x: input variable
:param a: second deg coeff
:param b: first deg coeff
:param c: zeroth deg coeff
:return: f(x)
"""
return a*x**2 + b*x + c |
def get_query_range(count: int, page: int):
"""Generate query for range of the search results
:type count: ``int``
:param count: Max amount of the search results
:type page: ``int``
:param page: Current page, depends on count
:return: A query range
:rtype: ``str``
"""
if page < 1:
raise ValueError('page value can\'t be less than 1')
if count < 1:
raise ValueError('max results value can\'t be less than 1')
return f'from={(page-1)*count + 1}&to={page*count}' |
def securitygroup_rules_preview(context, request, leftcol_width=3, rightcol_width=9):
""" Security group rules preview, used in Launch Instance and Create Launch Configuration wizards.
"""
return dict(
leftcol_width=leftcol_width,
rightcol_width=rightcol_width,
) |
def get_only_idf_lit_containing_patterns(all_changes):
"""
It is possible that every bug-fix pattern can not be used to seed bugs.
We filter some of them here. For example:
* we may filter very long change patterns (although we do it once while aggregating data from MongoDB)
* we may select only those chage patterns that has atleast 'N' frequency
"""
filtered_change_patterns = []
# # ----------------------- Filtering number of tokens -------------------------
# max_number_of_tokens = 10
# for change_pattern in self.all_training_change_patterns:
# print('\n\n \t *** *** Selecting only change patterns having total {} tokens *** ***'.format(max_number_of_tokens*2))
# if len(change_pattern['fix']) <= max_number_of_tokens and len(change_pattern['buggy']) <= max_number_of_tokens:
# filtered_change_patterns.append(change_pattern)
# ----------------------- Filtering based on the frequency of the change patterns -----------------
# min_frequency = 4
# print('\n \t *** *** Filtering only change patterns having minimum frequency {} *** ***\n'.format(min_frequency))
# mapping_of_change_patterns = SeedBugs._str_mapping_change_pattern_to_change(
# all_changes)
# for mapped_seq in mapping_of_change_patterns:
# if len(mapping_of_change_patterns[mapped_seq]) >= min_frequency:
# filtered_change_patterns.extend(
# mapping_of_change_patterns[mapped_seq])
# print("\tTotal {} change patterns and {} filtered change patterns ".format(
# len(mapping_of_change_patterns), len(filtered_change_patterns)))
# ------------------- Remove those change patterns that does not contain any Identifiers/Literals ------------
for t in all_changes:
# If the change pattern contains at-least one Identifier/Literal, we use that.
# Else the change pattern is discarded
if 'Idf_' in ' '.join(t['fix']) or 'Idf_' in ' '.join(t['buggy']) or 'Lit_' in ' '.join(
t['fix']) or 'Lit_' in ' '.join(t['buggy']):
filtered_change_patterns.append(t)
return filtered_change_patterns |
def interpolate_indices(x0, y0, x1, y1, x):
"""Linearly interpolate an int-valued function between points (`x0`, `y0`) and (`x1`, `y1`) and calculate its
values at `x`."""
return (y1 * (x - x0) + y0 * (x1 - x)) // (x1 - x0) |
def indent(text, prefix):
"""
Adds `prefix` to the beginning of non-empty lines in `text`.
"""
# Based on Python 3's textwrap.indent
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if line.strip() else line)
return u"".join(prefixed_lines()) |
def fix_url(url):
"""Fixes anomalous URL paths
:param url: the url to fix
"""
return "http://" + url if "://" not in url else url |
def get_network(properties):
""" Gets configuration that connects an instance to an existing network
and assigns to it an ephemeral public IP.
"""
network_name = properties.get('network')
is_self_link = '/' in network_name or '.' in network_name
if is_self_link:
network_url = network_name
else:
network_url = 'global/networks/{}'.format(network_name)
network_interfaces = {
'network': network_url
}
if properties['hasExternalIp']:
access_configs = {
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'
}
if 'natIP' in properties:
access_configs['natIP'] = properties['natIP']
network_interfaces['accessConfigs'] = [access_configs]
netif_optional_props = ['subnetwork', 'networkIP']
for prop in netif_optional_props:
if prop in properties:
network_interfaces[prop] = properties[prop]
return network_interfaces |
def human_readable_size(byte_count, unit="B", binary_marker='i'):
""" Converts a number of bytes into a human-readable size string. """
SUFFIXES = {
0: "",
1: "k" + binary_marker,
2: "M" + binary_marker,
3: "G" + binary_marker,
4: "T" + binary_marker,
5: "P" + binary_marker
}
if byte_count is None:
return 0
suffix_order =0
while byte_count >= 1024:
suffix_order += 1
byte_count /= 1024
return "{} {}{}".format(byte_count, SUFFIXES[suffix_order], unit) |
def pg2dtypes(pgtype):
"""Returns equivalent dtype for input `pgtype`."""
mapping = {
'bigint': 'float64',
'boolean': 'bool',
'date': 'datetime64[D]',
'double precision': 'float64',
'geometry': 'object',
'int': 'int64',
'integer': 'float64',
'number': 'float64',
'numeric': 'float64',
'real': 'float64',
'smallint': 'float64',
'string': 'object',
'timestamp': 'datetime64[ns]',
'timestampz': 'datetime64[ns]',
'timestamp with time zone': 'datetime64[ns]',
'timestamp without time zone': 'datetime64[ns]',
'USER-DEFINED': 'object',
}
return mapping.get(str(pgtype), 'object') |
def _is_namedtuple_like(x):
"""Helper which returns `True` if input is `collections.namedtuple`-like."""
try:
for fn in getattr(x, '_fields'):
_ = getattr(x, fn)
return True
except AttributeError:
return False |
def remove_nonascii(text):
""" remove nonascii
"""
return ''.join([i if ord(i) < 128 else ' ' for i in text]) |
def minX(arr):
"""
given an arr, get the min number that can keep the running sum greater than or equal to 1
"""
m = min(arr)
if m > 0:
return (m * -1) + 1
else:
m = (m * -1) + 1
running_sum = m
for val in arr:
if running_sum + val >= 1:
running_sum += val
else:
m += 1 - (running_sum + val)
running_sum = 1
return m |
def get_fibonacci(number: int) -> int:
"""Get the nth Fibonacci number."""
if number == 1:
return 1
elif number == 2:
return 2
total = 0
last = 0
current = 1
for _ in range(1, number):
total = last + current
last = current
current = total
return total |
def err_func(p,x,y,yerr,function):
"""
Difference between data and a model
"""
#print "p is",type(p),"of length",len(p)
#print "x is",type(x),"of length",len(x)
#print "y is",type(y),"of length",len(y)
fit = function(x,p)
#print "fit is",type(fit),"of length",len(fit)
return (y - function(x,p))/yerr**2 |
def linear_growth(
start: float, end: float, start_time: int, end_time: int, trade_time: int
) -> float:
"""
Simple linear growth function. Grows from start to end after end_time minutes (starts after start_time minutes)
"""
time = max(0, trade_time - start_time)
rate = (end - start) / (end_time - start_time)
return min(end, start + (rate * time)) |
def problem25(nr_of_digits):
"""Problem 25 - 1000-digit Fibonacci number"""
n_1 = 1
n_2 = 1
seq = 3
while True:
n = n_1 + n_2
if (n / 10 ** (nr_of_digits-1)) >= 1:
break
n_2 = n_1
n_1 = n
seq += 1
return seq |
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
or pipe characters contained within. A quoted string can be
embedded in an argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or ("|" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result) |
def name_or_id(model, name):
"""Return an _id attribute if one exists."""
name_id = name + "_id"
if hasattr(model, name_id):
return getattr(model, name_id)
elif hasattr(model, name):
return getattr(model, name)
else:
return None |
def luma(p):
"""
Returns brightness of a pixel, based on relative luminance
This is based on ITU-R BT.709. Note that human eyes are most sensitive to green light.
:param p: A tuple of (R,G,B) values
:return: The relative luminance of a pixel (in the range [0, 255]).
"""
return 0.2126*p[0] + 0.7152*p[1] + 0.0722*p[2] |
def linear_fit(ab, x):
"""Linear function of x
Args:
ab: [a, b]
Constant and slope
x: array or float
Input data
Returns:
a + b * x
"""
return ab[0] + ab[1] * x |
def allin(inp,reflist):
""" This function will determine if all of the strings in the list input are in
the list reflist."""
for item in inp:
if item not in reflist:
return False
return True |
def rotmap(start):
"""
dict[char,char]: Map chars (from start to start+26) to rotated characters.
"""
ints = range(start, start + 26)
rots = (start + i % 26 for i in range(13, 13 + 26))
return dict(zip(map(chr, ints), map(chr, rots))) |
def get_multiple_values_from_header(header):
"""
Get all the values for the header.
Multiple values of the same header are in a comma separated list; make sure
to ignore white space when splitting the values.
"""
return [value.strip() for value in header.split(',')] |
def get_spaces(depth):
"""returns the required number of spaces
for indentation purpose"""
return ' ' * (depth * 4 - 2) |
def station_archives(archive_definitions, station):
"""
Returns archive definitions list for provided station
:param archive_definitions: list - list of archive definitions
:param station: string - station name
:return: list - list of archive definitions
"""
search_result = []
for x in archive_definitions:
if x[0] == station:
search_result.append(x)
return search_result |
def extract_prediction(cmd_output):
""" To anaylze predictions
Takes the output of the subprocess that analyzes the files
as string and check if contains the prediction. If it contains
then return it
Args:
cmd_output: The output of the analyzer subprocess
Returns:
The output of the process that analyzes the files
"""
if "<s>" in cmd_output:
return cmd_output[cmd_output.index('<s>') + 3:
cmd_output.index('</s>')].strip()
return "" |
def get_orientation(o, a, b):
"""
Given three 2D points o, a and b, by using the cross product of oa and ob
:return: positive if o->a->b is in counter-clockwise order; negative if o->a->b is in clockwise order; zero if
the three points are colinear
"""
return (a[0] - o[0]) * (b[1] - a[1]) - (a[1] - o[1]) * (b[0] - a[0]) |
def make_weights_for_balanced_classes(images, targets):
"""Adapted from https://discuss.pytorch.org/t/balanced-sampling-between-classes-with-torchvision-dataloader/2703"""
nclasses = len(set(targets))
# Number of occurences of each class
count_per_class = [0] * nclasses
for t in targets:
count_per_class[t] += 1
# Weight (reciprocal of prob) per class
N = float(len(images))
weight_per_class = [ N / float(count) for count in count_per_class]
# Expand to target list
weight_per_datapoint = [0] * len(images)
for i in range(len(images)):
weight_per_datapoint[i] = weight_per_class[targets[i]]
return weight_per_datapoint |
def connected(fragment, prim_dict, max_ind):
""" Find fragments that are connected to current fragment.
Parameters
----------
fragment : list
Current fragment
prim_dict : dict
Where prims know which fragments they are in
max_ind : int
max value of indices used to make fragment
Returns
-------
final : numpy array
Array of connected fragment indices
"""
'Numpy array method'
#arr = np.zeros((len(prim_dict)))
#for prim in fragment:
# for f2 in prim_dict[prim]:
# arr[f2]=1
#pull out nonzero entries
#x = np.array(np.nonzero(arr))
#pull out fragments that are higher than max index
#final = x[x > max_ind]
#return final
#return x
'List method'
#out = []
#for prim in fragment:
# out.extend(prim_dict[prim])
#final = list(set(out))
#final.sort()
#new = [x for x in final if x > max_ind]
#return new
'Dict method'
out = {}
for prim in fragment:
for f2 in prim_dict[prim]:
## out[f2]=1
if f2 > max_ind:
out[f2]=1
else:
continue
keylist = list(out.keys())
##keylist.sort()
return keylist |
def chooseMonth(num):
"""
returns texted month
"""
if num == "01":
return "JAN"
if num == "02":
return "FEB"
if num == "03":
return "MAR"
if num == "04":
return "APR"
if num == "05":
return "MAY"
if num == "06":
return "JUN"
if num == "07":
return "JUL"
if num == "08":
return "AUG"
if num == "09":
return "SEP"
if num == "10":
return "OCT"
if num == "11":
return "NOV"
if num == "12":
return "DEC"
return "UNKNOWN" |
def convert_c_to_f(temp_c):
"""Converts temp (C) to temp (F)."""
try:
temp_f = (temp_c * 1.8) + 32
temp_f = round(temp_f, 2)
except TypeError:
temp_f = False
return temp_f |
def is_local_host(location: str) -> bool:
"""Returns True if ip represents localhost or offline else return False.
:param location: Location string in the format ip[/slot[/port]].
"""
return any(x in location.lower() for x in ("localhost", "127.0.0.1", "offline", "null")) |
def get_word_id(word, word2id, lower):
"""
Get a word ID.
If the model does not use lowercase and the evaluation file is lowercased,
we might be able to find an associated word.
"""
assert type(lower) is bool
word_id = word2id.get(word)
if word_id is None and not lower:
word_id = word2id.get(word.capitalize())
if word_id is None and not lower:
word_id = word2id.get(word.title())
return word_id |
def make_list(arg):
"""Return a list with arg as its member or arg if arg is already a list. Returns an empty list if arg is None"""
return (arg
if isinstance(arg, list)
else ([arg]
if arg is not None
else [])) |
def batching(function, inp):
"""Apply a function along the batch axis"""
return [function(inp_i) for inp_i in inp] |
def create_constraints(reac_list, lb = 0, up = 0):
"""
creates a dictionary of constraints ready to be used on other functions that use fba
:param reac_list: list of str, list of reaction ids to be constrained
:param lb: int or float, value of the lower bound
:param up: int or float, value of the upper bound
:return: dict, a dictionary with reaction ids as keys, and tuples of lower and upper
bounds as values
"""
if lb > up:
raise Exception("Lower bound must be lower than upper bound")
cons_dic = {}
for reac in reac_list:
cons_dic[reac] = (lb, up)
return cons_dic |
def check_for_take(string: str) -> bool:
"""
Helper Function that checks to see if clapperboard elements
follows set of rules to be identified as a "take" element
Parameters
----------
string: String
(The string thay will be checked
Returns
-------
Boolean
True if object follows set of rules
False if not.
"""
if string.isdigit() and int(string) < 30:
return True
return False |
def parse_barcode_renamer(barcodes, barcode_renamer):
"""
Return a dictionary with cell barcode and the renamed barcode.
barcodes : list
List of cellular barcode strings
barcode_renamer : str
Path to tab-separated file mapping barcodes to their new name
e.g. with channel or cell annotation label,
e.g. AAATGCCCAAACTGCT-1 lung_epithelial_cell|AAATGCCCAAACTGCT-1
Returns
-------
barcode_renamer : dict
A (str, str) mapping of the original barcode to its new name
"""
if barcode_renamer is not None:
renamer = {}
with open(barcode_renamer) as f:
for line in f.readlines():
barcode, renamed = line.split()
renamer[barcode] = renamed.replace("|", "_")
else:
renamer = dict(zip(barcodes, barcodes))
return renamer |
def split_on_attributes(keys,rows):
"""
Given a tuple of column names 'keys', and a collection of dict-like
rows, returns a dictionary where every unique value as defined by keys
is a key in the dictionary, and the value under that key is a list
containing the corresponding rows.
"""
ret = {}
for row in rows:
key = tuple([row[k] for k in keys])
vals = ret.get(key)
if vals is None:
vals = []
ret[key] = vals
vals.append(row)
return ret |
def listStatuses(jobs):
"""
Given a dictionary of job statuses
(like the one got from parseSubmitLogFastRaw)
will return a dictionary of jobs in each status
For example: {'009': ["1.003","2.001"], '012': ["418.001"], '005': ["1503.001","1555.002"]}
@param jobs: Dictionary of job statuses
@return: Dictionary of jobs in each status category
"""
status = {}
for k, e in list(jobs.items()):
try:
status[e].append(k)
except:
# there are only few possible values, using exceptions is faster
status[e] = [k]
return status |
def addLists(list1, list2):
"""Add lists together by value. i.e. addLists([1,1], [2,2]) == [3,3]."""
# Find big list and small list
blist, slist = list(list2), list(list1)
if len(list1) > len(list2):
blist, slist = slist, blist
# Overlay small list onto big list
for i, b in enumerate(slist):
blist[i] += b
return blist |
def is_palindrome(string):
"""
Checks the string for palindrome.
:param string: string to check
:return true if string is a palindrome false if not
"""
if string == string[::-1]:
return True
return False |
def pageHeader(
headline="",
tagline=""):
"""
*Generate a pageHeader - TBS style*
**Key Arguments:**
- ``headline`` -- the headline text
- ``tagline`` -- the tagline text for below the headline
**Return:**
- ``pageHeader`` -- the pageHeader
"""
pageHeader = """
<div class="page-header" id=" ">
<h1>%(headline)s<br><small>%(tagline)s</small></h1>
</div>""" % locals()
return pageHeader |
def string_is_null_or_blank(s):
"""Check if a string is null or consists entirely of whitespace."""
return not s or s.isspace() |
def get_verbosity_level(verbosity_count: int) -> int:
"""Get verbosity level by how many --vvv were passed
Arguments:
verboisty_count {int} -- how many v's were passed
50 - critical
40 - error
30 - warning
20 - info
10 -debug
0 - notset
"""
# If 5, we want 10 debug level logging
if verbosity_count >= 5:
return 10
# If 4, we want 20 info level logging
elif verbosity_count == 4:
return 20
# If 3, we want 30 warming level logging
elif verbosity_count == 3:
return 30
# If 2, we want 40 error level logging
elif verbosity_count == 2:
return 40
# always return critical
return 50 |
def process_row( row, value_col ):
"""
Looks at a processed CSV row returning the date, time and
then value of the specified column.
"""
date_time = row[3].split('T')
# [:5] so that we just get hh:mm
return (date_time[0], date_time[1][:5], row[value_col]) |
def _get_ancestors_by_one_uuid(
rp_uuid, parent_uuid_by_rp_uuid, ancestors=None):
"""Returns a set of uuids of ancestors for a given rp uuid"""
if ancestors is None:
ancestors = set([rp_uuid])
parent_uuid = parent_uuid_by_rp_uuid[rp_uuid]
if parent_uuid is None:
return ancestors
ancestors.add(parent_uuid)
return _get_ancestors_by_one_uuid(
parent_uuid, parent_uuid_by_rp_uuid, ancestors=ancestors) |
def cubicip(ipparams, position, etc = []):
"""
This function fits the intra-pixel sensitivity effect using a 2D cubic.
Parameters
----------
a: cubic coefficient in y
b: cubic coefficient in x
c: coefficient of cross-term xy^2
d: coefficient of cross-term yx^2
e: quadratic coefficient in y
f: quadratic coefficient in x
g: coefficient of cross-term xy
h: linear coefficient in y
i: linear coefficient in x
j: constant
Returns
-------
returns the flux values for the intra-pixel model
Revisions
---------
2008-07-08 Kevin Stevenson, UCF
kevin218@knights.ucf.edu
Original version
"""
a = ipparams[0]
b = ipparams[1]
c = ipparams[2]
d = ipparams[3]
e = ipparams[4]
f = ipparams[5]
g = ipparams[6]
h = ipparams[7]
i = ipparams[8]
j = ipparams[9]
y, x, q = position
return a*y**3 + b*x**3 + c*y**2*x + d*y*x**2 + e*y**2 + f*x**2 + g*y*x + h*y + i*x + j |
def reverse_complement(sequence):
"""return the reverse complement of a nucleotide (including IUPAC ambiguous nuceotide codes)"""
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N', 'M': 'K', 'R': 'Y', 'W': 'W',
'S': 'S', 'Y': 'R', 'K': 'M', 'V': 'B', 'H': 'D', 'D': 'H', 'B': 'V'}
return "".join(complement[base] for base in reversed(sequence)) |
def reverse(str):
"""Reverse string"""
return str[::-1] |
def membername(class_, val):
"""Convert a member variable/constant into a member name string."""
return next((k for k, v in class_.__dict__.items() if v == val), str(val)) |
def get_multiples_desc(number, count):
"""
return the first count multiples of number in desc order in a list.
e.g call with input (3,2) returns [6,3]
call with input(5,3) returns [15,10, 5]
Hint: one line of code, use a builtin function we have already seen in the lists lesson.
"""
return list(reversed(range(number,(number*count)+1,number))) |
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
# MY IMPLEMENTATION
# make copy of hand, so hand is not mutated
handcop = hand.copy()
# return True if both are True (AND)
# short circuit evaluation
# first condition: valid word is in the word list
if word in wordList:
# second condition: word is entirely composed of letters in the hand
# for every letter in word
for letter in word:
# check whether it is key in hand dictionary and its value is still bigger than 0
if (letter in handcop) and (handcop[letter] > 0):
# if so, change value associated with that letter in handcop dictionary by -1
handcop[letter] = handcop[letter] - 1
# if either key is not in hand dictionary or its value is still equal or smaller than 0, return Flase
elif (letter not in handcop) or (handcop[letter] <= 0):
return False
# loop has checked that all letters of valid word are in hand, so both conditions evaluated to True
return True
# elif word is not in the word list, return False
elif word not in wordList:
return False |
def gte(value, arg):
"""Returns a boolean of whether the value is greater than or equal to the
argument.
"""
return value >= int(arg) |
def listify(data):
"""Check if input is a list. If not, make it a single item list.
Used for functions designed to operate on multiple objects,
when only performing operations on a single object.
Params
------
data: list or single object
Returns
-------
data: list
Same list as passed in, or single item list.
"""
if type(data) == list:
return data
return [data] |
def find_unique(a, b):
"""
:param a: Iterable number 1.
:type a: list, tuple
:param b: Iterable number 2.
:type b: list, tuple
:return: List of unique objects from both ``a`` and ``b``.
:rtype: list
Example:
--------------------------
.. code-block:: python
>>> list_1 = [1, 2, 3]
>>> list_2 = [1, 5, 2]
>>> unique_items = find_unique(list_1, list_2)
>>> print(unique_items)
[3, 5]
>>> type(unique_items)
<class 'list'>
"""
set_a = set(a)
set_b = set(b)
unique = set_a - set_b
unique |= set_b - set_a
return list(unique) |
def _getWordCategory(word, words_category_index):
"""
Retrieves the category of a word
:param word:
:param words_category_index:
:return:
"""
if word in words_category_index:
return words_category_index[word]
else:
return None |
def longest_increasing_array(arr):
"""
longest sub array in increasing order
:param arr:
:return: maximum length of sub array
"""
if not arr:
return 0
if len(arr) == 1:
return 1
end =1
start = 0
max_len = 1
while end < len(arr):
if arr[end] > arr[end - 1]:
if end - start + 1 > max_len:
max_len = end - start + 1
else:
start = end
end += 1
return max_len |
def calculate_issue_importance(num_trackers, user, light_user):
"""Calculates issue's importance, based on the changelog's popularity
and if it was reported by human or a robot.
"""
importance = 1 + num_trackers
if user:
importance *= 10
return importance |
def apply_function(f, *args, **kwargs):
""" Apply a function or staticmethod/classmethod to the given arguments.
"""
if callable(f):
return f(*args, **kwargs)
elif len(args) and hasattr(f, '__get__'):
# support staticmethod/classmethod
return f.__get__(None, args[0])(*args, **kwargs)
else:
assert False, "expected a function or staticmethod/classmethod" |
def names_csv(items, direction = None, initial = None, extra = None):
"""Create a comma-separated list of the names of all items.
If direction is provided then only items for the given direction are added.
If initial is provided then this will be the starting CSV list instead of
an empty one.
Arguments:
items -- a list of interface properties, method arguments or signal
arguments
Keyword arguments:
direction -- item direction filter; 'in', 'out' or None (default None)
initial -- initial CSV string to be prepended (default None)
extra -- extra CSV string to be appended (default None)
Returns:
the CSV string"""
if initial:
csv = initial
first = False
else:
csv = ''
first = True
for item in items:
if not direction or direction == item.direction:
if not first:
csv += ','
else:
first=False
csv += item.name
if extra:
if csv:
csv += ',' + extra
else:
csv = extra
return csv |
def is_skipped_node(node_entry):
"""Whether a node is not counted.
Parameters
----------
node_entry : dict
Node entry.
Returns
-------
out : bool
whether node is skipped.
"""
# Operators not counted in graph tuner.
_SKIPPED_OP = ["Tuple"]
return node_entry["op"] in _SKIPPED_OP |
def speed2slowness(value):
"""speed: float value 0 to 1.0
slowness: integer value 1 to 31"""
slowness = int(-30 * value + 31)
return slowness |
def to_chunks(lst, n):
"""List of sublists of size n form lst
:param lst: List
:param n: Integer
:returns: List"""
res = []
for i in range(0,len(lst), n):
res.append(lst[i:i+n])
return res |
def parse_rating(line):
"""
Parses a recommendation. Format: userId\tgender\tage\toccupation\tavg_rating\trmse\tlabels
Parameters
----------
line : str
The line that contains user information
Returns
-------
list : list
A list containing gender, age, labels
"""
fields = line.strip().split("::")[:]
return [str(fields[1]), str(fields[2]), str(fields[0])] |
def is_numpy(value):
"""Check 'value' is numpy array or not.
Args:
value (any, required): The value to check.
Returns:
bool: True if 'value' is valid, False if it is not.
"""
if f"{type(value).__module__}.{type(value).__name__}" == 'numpy.ndarray':
return True
return False |
def signed_shift(val, shift):
"""Bit shifts the value val. +ve values shift left and -ve shift right.
Args:
val (int): Value to be shifted
shift (int): Number of bits to shift by
Returns:
int: Shifted result
"""
return val << shift if shift >= 0 else val >> -shift |
def command_available(cmd):
"""Is this command available on the system."""
from distutils import spawn
return spawn.find_executable(cmd) is not None |
def hexadecimal(number, size=1):
"""Improved hex function that can return two's complement numbers"""
if number < 0:
number = 2**size + number
return '0x' + hex(number)[2:].zfill(size).replace('L', '') |
def detection_length(detection):
"""Calculates total duration of detection based on it's occurrences."""
ret_val = 0.0
for occ in detection.get("occs",[]):
ret_val += occ["se"]-occ["ss"]
return ret_val |
def smallvar_description(entry):
"""Return small variant description from query result"""
keys = (
"release",
"chromosome",
"start",
"end",
"bin",
"reference",
"alternative",
"ensembl_gene_id",
)
if isinstance(entry, dict):
return "-".join(map(str, (entry[key] for key in keys)))
else:
return "-".join(map(str, (getattr(entry, key) for key in keys))) |
def custom_formatwarning(msg, *a):
"""Display a custom message and ignore every other warning.
"""
return str(msg) + '\n' |
def get_write_to_map_from_permutation(original, permuted):
"""With a permutation given by C{original} and C{permuted},
generate a list C{wtm} of indices such that
C{permuted[wtm[i]] == original[i]}.
Requires that the permutation can be inferred from
C{original} and C{permuted}.
>>> for p1 in generate_permutations(range(5)):
... for p2 in generate_permutations(range(5)):
... wtm = get_write_to_map_from_permutation(p1, p2)
... p2a = [0] * len(p2)
... for i, oi in enumerate(p1):
... p2a[wtm[i]] = oi
... assert p2 == p2a
"""
assert len(original) == len(permuted)
where_in_permuted = dict(
(permuted[i], i) for i in range(len(permuted)))
assert len(where_in_permuted) == len(permuted)
return tuple(where_in_permuted[oi] for oi in original) |
def is_partial_link_text_selector(selector):
"""
A basic method to determine if a selector is a partial link text selector.
"""
if (
selector.startswith("partial_link=")
or selector.startswith("partial_link_text=")
or selector.startswith("partial_text=")
or selector.startswith("p_link=")
or selector.startswith("p_link_text=")
or selector.startswith("p_text=")
):
return True
return False |
def compute_sea_level(altitude: float, atmospheric: float) -> float:
"""
Calculates the pressure at sea level (in hPa) from the specified altitude
(in meters), and atmospheric pressure (in hPa).
# Equation taken from BMP180 datasheet (page 17):
# http://www.adafruit.com/datasheets/BST-BMP180-DS000-09.pdf
Args:
altitude : Altitude in meters
atmospheric : Atmospheric pressure in hPa
Return:
float The approximate pressure
"""
return atmospheric / pow(1.0 - (altitude / 44330.0), 5.255) |
def differenclists(list1, list2):
"""
Returns list1 \ list2 (aka all elements in list 1 that are not in list 2)
"""
return list(set(list1).difference(list2)) |
def substitute_var(text, values):
"""substitute strings of the form ${NAME} in str, replacing
with substitutions from subobj.
:param text: Text in which to subsitute.
:param values: Dictionary with keys and values.
"""
for (name, value) in values.items():
assert isinstance(name, str), "%r is not a string" % name
assert isinstance(value, str), "Value %r for %s is not a string" % (value, name)
text = text.replace("${%s}" % name, value)
return text |
def _strip_prefix(s, prefix):
"""A helper to strip the prefix from the string if present"""
return s[len(prefix):] if s and s.startswith(prefix) else s |
def update_running_average(new_obs, running_average, n):
"""Updates a running average while avoiding large values
and provoke overflow.
Parameters
----------
running_average: value of the cumulative average so far
n: Number of samples including new observation
new_obs: New observation"""
a = 1/n
b = 1 - a
return a*new_obs + b*running_average |
def pythonComments(text):
"""
Converts comments delimited by # or ## and on a new line into a markdown cell.
For python files only
>>> pythonComments('''## This is a
... ## multiline comment
... def function()''')
'# <markdowncell>\\n## This is a\\n## multiline comment\\n# <codecell>\\ndef function()\\n'
>>> pythonComments('''def function():
... variable = 5 # Comment not in cell
... # Comment also not in cell''')
'def function():\\n variable = 5 # Comment not in cell\\n # Comment also not in cell\\n'
"""
text = text.splitlines()
newtext = ''
inComment = False
for i, line in enumerate(text):
if line.startswith("#") and not inComment: # True if first line of comment
inComment = True
newtext += "# <markdowncell>\n"
newtext += (line + "\n")
elif inComment and not line.startswith("#"): # True if first line after comment
inComment = False
newtext += "# <codecell>\n"
newtext += (line+"\n")
else:
newtext += (line+"\n")
return newtext |
def first_non_consecutive(arr):
"""
Finds the first element within an array that is not consecutive.
:param arr: An array of ints.
:return: the first element not consecutive, otherwise None.
"""
for i, j in enumerate(arr):
if j != arr[0] + i:
return j
return None |
def _validate_inputs_outputs_var_format(value: str) -> str:
"""Validate inputs/outputs variables
Arguments:
value {str} -- A '.' seperated string to be checked for inputs/outputs variable
formatting
Returns:
str -- A string with validation error messages
"""
add_info = ''
parts = value.split('.')
if len(parts) > 0 and parts[0] != 'inputs':
add_info = f'Inputs and outputs variables can only refer to an input value' \
f' not: {parts[0]}'
elif len(parts) > 1 and parts[1] != 'parameters':
add_info = f'Inputs and outputs variables can only refer to an input parameter' \
f' not: {parts[1]}'
elif len(parts) != 3:
add_info = 'Inputs and outputs variables must have 3 segments.'
return add_info |
def bucket_from_url(url):
""" Extracts a bucket name from an S3 URL.
url: an S3 URL, or what should be one
Return value: bucket name from S3 URL
"""
if url[:6] == 's3n://':
start_index = 6
elif url[:5] == 's3://':
start_index = 5
elif url[0] == '/':
start_index = 1
else:
start_index = 0
while url[start_index] == '/':
start_index += 1
return url[start_index:start_index+url[start_index:].index('/')] |
def poisson_map(n, alpha=1, beta=0):
"""
finds the poisson MAP point given a Gamma distribution prior with parameters alpha and beta
"""
kMAP = (n + alpha - 1) / (1 + beta)
if kMAP < 0:
raise ValueError("kMAP < 0")
return kMAP |
def __single_random_event(event, possibilities):
"""Calculates the probability for a single
event to happen in a list of possibilities.
Args:
event
possibilities (list)
Returns:
float: The probability"""
if len(possibilities) == 0:
return 0
matches = 0
for possibility in possibilities:
if event == possibility:
matches += 1
return matches / len(possibilities) |
def indToXY(id, width, height):
"""Index to (x,y) location"""
# Observation map is y-major coordinate
x, y = id % width, id // width
return [x, y] |
def clean_attrib(value):
"""Cleans up value string.
Removes any trailing '_0' that randomly show up
Args:
value (str): attrib value to clean
Returns:
str: cleaned attribute value
"""
clean_value = value
if value.endswith("_0"):
clean_value = clean_value.strip('_0')
return clean_value |
def power_of_two(number):
"""number == (2^x)*b, returns x"""
return len(bin(number)) - len(bin(number).rstrip('0')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.