content
stringlengths 42
6.51k
|
|---|
def addMissingRes(data,plist):
"""
This will simply add a empty peak, if it is missing. With a key "dataexist" set as False
"""
for i in range(len(plist)):
if not data[i]["name"]==plist[i]:
dataset ={}
dataset["name"]=plist[i]
dataset["dataexist"]=False
data.insert(i,dataset)
return data
|
def join_list(lst, string):
"""
:param lst: List to be joined
:param string: String that will be used to join the items in the list
:return: List after being converted into a string
"""
lst = str(string).join(str(x) for x in lst)
return lst
|
def permutation_from_block_permutations(permutations):
"""Reverse operation to :py:func:`permutation_to_block_permutations`
Compute the concatenation of permutations
``(1,2,0) [+] (0,2,1) --> (1,2,0,3,5,4)``
:param permutations: A list of permutation tuples
``[t = (t_0,...,t_n1), u = (u_0,...,u_n2),..., z = (z_0,...,z_nm)]``
:type permutations: list of tuples
:return: permutation image tuple
``s = t [+] u [+] ... [+] z``
:rtype: tuple
"""
offset = 0
new_perm = []
for p in permutations:
new_perm[offset: offset +len(p)] = [p_i + offset for p_i in p]
offset += len(p)
return tuple(new_perm)
|
def nonAlphaChars (tok):
"""Returns a version of the token where alphabetic characters have been removed,
and digits have been replaced by 'D'. This may be the empty string of course. """
result = ""
for sub in tok:
if (sub.isdigit()):
result += "D"
elif (not sub.isalpha()):
result += sub
return result
|
def cve_is_about_system(cpe_type):
"""
Determines whether CVE is about system.
:param cpe_type: One of {'a', 'o', 'h'} = application, operating system,
hardware
:return: true if CVE is about system.
"""
return ('o' in cpe_type or 'h' in cpe_type) and 'a' not in cpe_type
|
def get_emoji_url(emoji_id):
"""Get the image URL of an emoji."""
return f"https://cdn.discordapp.com/emojis/{emoji_id}.png"
|
def _check_inputs(max_steps, inner, outer):
"""Checks whether max_steps, inner and outer are okay.
Can be run before loading matrix into memory."""
if max_steps < 1:
raise ValueError('maxsteps must be a positive integer')
if inner is None:
if outer is not None:
raise ValueError('If inner is None, outer must be None')
elif outer is None:
outer = inner
elif outer < inner:
raise ValueError('outer must exceed or be equal to inner')
return inner, outer
|
def interchanging_key_and_value(x):
"""creating a function for interchanging key and value"""
return(x[1],x[0])
|
def Score(low, high, n):
"""Score whether the actual value falls in the range.
Hitting the posts counts as 0.5, -1 is invalid.
low: low end of range
high: high end of range
n: actual value
Returns: -1, 0, 0.5 or 1
"""
if n is None:
return -1
if low < n < high:
return 1
if n == low or n == high:
return 0.5
else:
return 0
|
def search_1(addresses: list, street_number: str, street_name: str) -> list:
"""
Implements a two-term search functionality using sets.
:author: jrg94
:param addresses: a list of addresses
:param street_number: a street number
:param street_name: a street name
:return: a list of street addresses that match the term-term query
"""
number_matches = set()
name_matches = set()
for address in addresses:
if not street_number or street_number in address: number_matches.add(address)
if not street_name or street_name in address: name_matches.add(address)
intersection = number_matches & name_matches
return list(intersection)
|
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
c = complex(x, y)
z = 0.0j
for i in range(max_iters):
z = z*z + c
if (z.real*z.real + z.imag*z.imag) >= 4:
return i
return max_iters
|
def acceptable_bearing_error(bearing1, bearing2, delta):
"""Determines if the two bearings are more than a set angle apart, allowing
for angles that span zero (North)
:param bearing1: The first bearing
:type bearing1: number (degrees)
:param bearing2: The second bearing
:type bearing2: number (degrees)
:param delta: The acceptable separation
:type delta: number (degrees)
"""
try:
# Try treating it as a Quantity
bearing1_mag = bearing1.magnitude
except AttributeError:
# Otherwise just a normal float
bearing1_mag = float(bearing1)
try:
bearing2_mag = bearing2.magnitude
except AttributeError:
bearing2_mag = float(bearing2)
# note: compact test algorithm came from here:
# https://gamedev.stackexchange.com/a/4472/8270
diff = 180 - abs(abs(bearing1_mag - bearing2_mag) - 180)
return diff <= delta
|
def _extract_sub_bs(basis_state, qubits):
""" Extract state of qubits in specified order, given in computational basis
Since the input is in basis state, and the basis states of system only
containing the sublist of qubits are a subset of the full basis,
the state we look for is a basis state as well. This means we can
return an integer here, instead of a full state.
Example:
>>> _extract_sub_bs(7, [3, 0, 1]) # 7 = 0b0111 -> 0b[1][0][3] = 0b110
6
:param basis_state: int
:param qubits: Indices of qubits to extract from basis_state, in order of
ascending significance.
:return: Integer, representing state of qubits in the overall
state basis_state.
"""
return sum(1 << i
for i in range(len(qubits))
# if i-th apply qubit is set
if basis_state & (1 << qubits[i]) != 0)
|
def wordlist_to_worddict(wordlist):
"""
Takes a wordlist and returns a dictionary keyed by the first letter of
the words. Used for acrostic pass phrase generation
"""
worddict = {}
# Maybe should be a defaultdict, but this reduces dependencies
for word in wordlist:
try:
worddict[word[0]].append(word)
except KeyError:
worddict[word[0]] = [word, ]
return worddict
|
def gen_input( k1, k2, k3, k4, rho_0, c_p1, c_p2, c_p3, eps, Y1_0,
A1, A2, E1, E2, dh1, dh2 ):
"""Generate gyp1d input file from template.
Keyword arguments:
matl = [ k1, k2, k3, k4, rho_0, c_p1, c_p2, c_p3, eps, Y1_0,
A1, A2, E1, E2, dh1, dh2 ]
"""
template = """
&matl
k_temps = 273.15, 448.15, 1088.15, 1473.15
k_vals = %(k1)s, %(k2)s, %(k3)s, %(k4)s
rho_0 = %(rho_0)s
c_p = %(c_p1)s, %(c_p2)s, %(c_p3)s
eps = %(eps)s
Y1_0 = %(Y1_0)s
A = %(A1)s, %(A2)s
E = %(E1)s, %(E2)s
dh = %(dh1)s, %(dh2)s /
&scen
L = 0.0159
L_a = 0.092
t_end = 3601
H = 3.048 /
&numr
N = 30
N_t = 160000
N_sol = 100 /
"""
# ==================================================
# = Generate gyp1d input file =
# ==================================================
outcase = template % {'k1':str(k1),
'k2':str(k2),
'k3':str(k3),
'k4':str(k4),
'rho_0':str(rho_0),
'c_p1':str(c_p1),
'c_p2':str(c_p2),
'c_p3':str(c_p3),
'eps':str(eps),
'Y1_0':str(Y1_0),
'A1':str(A1),
'A2':str(A2),
'E1':str(E1),
'E2':str(E2),
'dh1':str(dh1),
'dh2':str(dh2)}
# =====================
# = Write gyp1d files =
# =====================
casename = 'case'
filename = '../' + casename + '.inp'
# Opens a new file, writes the gyp1d input file, and closes the file
f = open(filename, 'w')
f.write(outcase)
f.close()
return casename
|
def fitarg_rename(fitarg, ren):
"""Rename variable names in ``fitarg`` with rename function.
::
#simple renaming
fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'y' if pname=='x' else pname)
#{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1},
#prefixing
figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'prefix_'+pname)
#{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1}
"""
if isinstance(ren, str):
s = ren
ren = lambda x: s + "_" + x # noqa: E731
ret = {}
prefix = ["limit_", "fix_", "error_"]
for k, v in fitarg.items():
vn = k
pf = ""
for p in prefix:
if k.startswith(p):
i = len(p)
vn = k[i:]
pf = p
newvn = pf + ren(vn)
ret[newvn] = v
return ret
|
def remove_cdata_tags_from_every_node(content: str):
"""[removes a CDATA tag from every node in the document] """
content = content.replace(']]>', '>')
content = content.replace('<![CDATA[', '<')
return content
|
def find_boyer_moore(T, P):
"""Return the lowest index of T at which substring P begins (or else -1)."""
n, m = len(T), len(P) # introduce convenient notations
if m == 0: return 0 # trivial search for empty string
last = {} # build 'last' dictionary
for k in range(m):
last[P[k]] = k # later occurrence overwrites
# align end of pattern at index m-1 of text
i = m - 1 # an index into T
k = m - 1 # an index into P
while i < n:
if T[i] == P[k]: # a matching character
if k == 0:
return i # pattern begins at index i of text
else:
i -= 1 # examine previous character
k -= 1 # of both T and P
else:
j = last.get(T[i], -1) # last(T[i]) is -1 if not found
i += m - min(k, j + 1) # case analysis for jump step
k = m - 1 # restart at end of pattern
return -1
|
def levenshtein_distance(word1, word2):
"""
Credit: https://en.wikipedia.org/wiki/Levenshtein_distance
:param word1:
:param word2:
:return:
"""
word2 = word2.lower()
word1 = word1.lower()
matrix = [[0 for x in range(len(word2) + 1)] for x in range(len(word1) + 1)]
for x in range(len(word1) + 1):
matrix[x][0] = x
for y in range(len(word2) + 1):
matrix[0][y] = y
for x in range(1, len(word1) + 1):
for y in range(1, len(word2) + 1):
if word1[x - 1] == word2[y - 1]:
matrix[x][y] = min(
matrix[x - 1][y] + 1,
matrix[x - 1][y - 1],
matrix[x][y - 1] + 1
)
else:
matrix[x][y] = min(
matrix[x - 1][y] + 1,
matrix[x - 1][y - 1] + 1,
matrix[x][y - 1] + 1
)
return matrix[len(word1)][len(word2)]
|
def get_attr(instance, name):
"""Get the value of an attribute from a given instance.
:param instance: The instance.
:type instance: object
:param name: The attribute name.
:type name: str
"""
return getattr(instance, name)
|
def _lyn(w):
"""
Returns the length of the longest prefix of ``w`` that is a Lyndon word.
EXAMPLES::
sage: import sage.combinat.necklace as necklace
sage: necklace._lyn([0,1,1,0,0,1,2])
3
sage: necklace._lyn([0,0,0,1])
4
sage: necklace._lyn([2,1,0,0,2,2,1])
1
"""
p = 1
k = max(w)+1
for i in range(1, len(w)):
b = w[i]
a = w[:i]
if b < a[i-p] or b > k-1:
return p
elif b == a[i-p]:
pass
else:
p = i+1
return p
|
def add_vec(vec1, vec2):
"""return vector sum of 2 vectors"""
return vec1[0] + vec2[0], vec1[1] + vec2[1]
|
def f(fstr):
"""f-strings for older versions of Python
i.e 2.7, 3.5 and eariler.
Uses globals(), which is somewhat hacky.
"""
if type(fstr) is str:
return fstr.format(**globals())
else:
return None
|
def is_cjk_punctuation(char):
"""Returns true if char is a punctuation mark in a CJK language."""
lower = int('0x3000', 16)
higher = int('0x300F', 16)
return ord(char) >= lower and ord(char) <= higher
|
def parse_cutoffs(s):
"""
FUNCTION ADOPTED FROM CUTADAPT 2.7, TO CREATE INPUT PARAMETERS ACCORDING TO CUTADAPT.
Parse a string INT[,INT] into a two-element list of integers
>>> parse_cutoffs("5") => [0, 5]
>>> parse_cutoffs("6,7") => [6, 7]
"""
try:
cutoffs = [int(value) for value in s.split(",")]
except ValueError as e:
exit("Quality cutoff value not recognized: {}".format(e))
if len(cutoffs) == 1:
cutoffs = [0, cutoffs[0]]
elif len(cutoffs) != 2:
exit("Expected one value or two values separated by comma for the quality cutoff")
return cutoffs
|
def formatTime(seconds):
"""
Takes a number of elapsed seconds and returns a string in the format h:mm.
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d" % (h, m)
|
def similarTuple(left, right, max):
"""
Compare the end return codes for tree.endpoint
"""
for i in range(0, max):
if left[i] > right[i]:
if left[i] - right[i] > 0.5:
return False
else:
if right[i] - left[i] > 0.5:
return False
return True
|
def getcompanyrow(values):
"""
:param values:
:return: list of values representing a row in the company table
"""
companyrow = []
companyrow.append(values['_COMPANYNUMBER_'])
companyrow.append(values['_COMPANYNAME_'])
companyrow.append(values['_WEBADDRESS_'])
companyrow.append(values['_STREETADDRESS1_'])
companyrow.append(values['_STREETADDRESS2_'])
companyrow.append(values['_CITY_'])
companyrow.append(values['_STATE_'])
companyrow.append(values['_ZIPCODE_'])
companyrow.append(values['_NOTES_'])
companyrow.append(values['_PHONE_'])
return companyrow
|
def greet_documented(name, greeting="Hi,"):
"""Greet `name` with `greeting`
:param name: name of person we are greeting
:param greeting: greeting we are using defaults to "Hi,"
:returns: string of greeting and name
"""
return f"{greeting} {name}"
|
def get_uniprot_evidence_level(header):
"""Returns uniprot protein existence evidence level for a fasta header.
Evidence levels are 1-5, but we return 5 - x since sorting still demands
that higher is better."""
header = header.split()
for item in header:
item = item.split('=')
try:
if item[0] == 'PE':
return 5 - int(item[1])
except IndexError:
continue
return -1
|
def linearlyInterpolate(v0, v1, blend=.5):
"""Get the vector interpolated between 2 vectors.
Arguments:
v0 (vector): vector A.
v1 (vector): vector B.
blend (float): Blending value.
Returns:
vector: The interpolated vector.
"""
vector = v1 - v0
vector *= blend
vector += v0
return vector
|
def fahrenheit_to_celsius(fahrenheit):
"""
Fahrenheit to Celsius
:param celsius: Degrees Fahrenheit float
:return: Fahrenheit float
"""
return (float(fahrenheit) - 32) * 5.0/9.0
|
def power_law(x, amp, slope):
"""
:param x: Dispersion
:type x: np.ndarray
:param amp: Amplitude of the power law
:type amp: float
:param slope: Slope of the power law
:type slope: float
:return: Power law model
:rtype: np.ndarray
"""
return amp*(x**slope)
|
def replaceInvalidStr(outputStr):
"""
function: replace invalid str
input : outputStr
output : str
"""
return outputStr.replace("\'", "").replace("\"", "").replace("`",
"").replace(
"echo", "e c h o").replace("\n", " ")
|
def split_config(raw):
"""Split configuration into overall and per-stage."""
for (i, entry) in enumerate(raw):
if "overall" in entry:
del raw[i]
return entry["overall"], raw
return {}, raw
|
def calc_median(values_list):
"""calculates the median of the list in O(n log n); thus also returns sorted list for optional use"""
median = 0.0
sorted_list = sorted(values_list)
n = len(sorted_list)
if n == 0:
return median, sorted_list, n
half = n >> 1
if n % 2 == 1:
median = sorted_list[half]
else:
median = 0.5 * (sorted_list[half] + sorted_list[half + 1])
return median, sorted_list, n
|
def clip_string(s, limit=1000, sep=None):
"""
Clip a string at a given character and add "..." if the string was clipped.
If a separator is specified, the string is not clipped at the given limit
but after the last occurence of the separator below the limit.
:param s: string to clip
:type s: str
:param limit: number of characters to retain (including "...")
:type limit: int
:param sep: separator
:type sep: str
:rtype: str
"""
if len(s) < limit:
return s
s = s[: limit - 3]
if sep is None:
return s
sep_pos = s.rfind(sep)
if sep_pos == -1:
return s
return s[: sep_pos + len(sep)] + "..."
|
def klGauss(x, y, sig2 = 1.):
"""Kullback-Leibler divergence for Gaussian distributions."""
return (x-y)*(x-y)/(2*sig2)
|
def precondition_operand_kinds(operand_kinds):
"""For operand kinds that have the same number, make sure they all have the
same extension list."""
# Map operand kind and value to list of the union of extensions
# for same-valued enumerants.
exts = {}
for kind_entry in operand_kinds:
kind = kind_entry.get('kind')
for enum_entry in kind_entry.get('enumerants', []):
value = enum_entry.get('value')
key = kind + '.' + str(value)
if key in exts:
exts[key].extend(enum_entry.get('extensions', []))
else:
exts[key] = enum_entry.get('extensions', [])
exts[key] = sorted(set(exts[key]))
# Now make each entry the same list.
for kind_entry in operand_kinds:
kind = kind_entry.get('kind')
for enum_entry in kind_entry.get('enumerants', []):
value = enum_entry.get('value')
key = kind + '.' + str(value)
if len(exts[key]) > 0:
enum_entry['extensions'] = exts[key]
return operand_kinds
|
def vwap(dollar_volume: list, volume: list) -> float:
"""
Get Volume Weighted Average Price (VWAP).
:param dollar_volume: (list) of dollar volumes
:param volume: (list) of trades sizes
:return: (float) VWAP value
"""
return sum(dollar_volume) / sum(volume)
|
def get16(bytes_arr):
"""Create littleendian 16 bit from byte array"""
val = bytes_arr[0] + (bytes_arr[1]<<8)
return val
|
def get_name_from_email(email):
"""
Extract first and last name from an email address.
Expected format of the email address is `first_name.last_name@example.com`.
The function returns two values. The first name and the last name.
If the function fails to extract both from the email, both returns will be
empty.
Parameters
----------
email : str
Email address to extract the first name and last name from
Return
------
str
First name extracted from the email address
str
Last name extracted from the email address
"""
first_name = ""
last_name = ""
email_before_at = email.split("@")[0]
email_before_at_split = email_before_at.split(".")
if len(email_before_at_split) > 1:
possible_first_name = email_before_at_split[0]
possible_last_name = email_before_at_split[1]
# Only if both name parts are found, set the return values
if possible_first_name and possible_last_name:
first_name = possible_first_name
last_name = possible_last_name
return first_name, last_name
|
def countAndSayA(n):
"""
:type n: int
:rtype: str
"""
sample=["1","11","21","1211","111221"]
if n <= 5:
return sample[n-1]
for x in range(5,n):
result=""
string=sample[x-1]
initial=string[0]
count=0
for i in range(len(string)):
if string[i] == initial:
count+=1
if i == len(string)-1:
result+=str(count)+str(initial)
else:
result+=str(count)+str(initial)
initial=string[i]
count=1
if i == len(string)-1:
result+=str(count)+str(initial)
sample.append(result)
return sample[n-1]
|
def _mg_eq(xt, xtau, a=0.2, b=0.1, n=10):
"""
Mackey-Glass time delay diffential equation, at values x(t) and x(t-tau).
"""
return -b*xt + a*xtau / (1+xtau**n)
|
def segment_range_to_fragment_range(segment_start, segment_end, segment_size,
fragment_size):
"""
Takes a byterange spanning some segments and converts that into a
byterange spanning the corresponding fragments within their fragment
archives.
Handles prefix, suffix, and fully-specified byte ranges.
:param segment_start: first byte of the first segment
:param segment_end: last byte of the last segment
:param segment_size: size of an EC segment, in bytes
:param fragment_size: size of an EC fragment, in bytes
:returns: a 2-tuple (frag_start, frag_end) where
* frag_start is the first byte of the first fragment, or None if this
is a suffix byte range
* frag_end is the last byte of the last fragment, or None if this is a
prefix byte range
"""
# Note: segment_start and (segment_end + 1) are
# multiples of segment_size, so we don't have to worry
# about integer math giving us rounding troubles.
#
# There's a whole bunch of +1 and -1 in here; that's because HTTP wants
# byteranges to be inclusive of the start and end, so e.g. bytes 200-300
# is a range containing 101 bytes. Python has half-inclusive ranges, of
# course, so we have to convert back and forth. We try to keep things in
# HTTP-style byteranges for consistency.
# the index of the first byte of the first fragment
fragment_start = ((
segment_start // segment_size * fragment_size)
if segment_start is not None else None)
# the index of the last byte of the last fragment
fragment_end = (
# range unbounded on the right
None if segment_end is None else
# range unbounded on the left; no -1 since we're
# asking for the last N bytes, not to have a
# particular byte be the last one
((segment_end + 1) // segment_size
* fragment_size) if segment_start is None else
# range bounded on both sides; the -1 is because the
# rest of the expression computes the length of the
# fragment, and a range of N bytes starts at index M
# and ends at M + N - 1.
((segment_end + 1) // segment_size * fragment_size) - 1)
return (fragment_start, fragment_end)
|
def remove_lower(free_text):
""" remove lowercase letters (assumed to not be associated with
countries, races or ancestries.)
Keyword arguements:
text: the free text string prior to splitting
Taken from the comms bio paper, possibly needs updating periodically.
"""
free_text = free_text.replace('up to', '')
for word in free_text.split(' '):
if (word.title() != word.strip()):
try:
float(word)
except ValueError:
if ';' in word:
free_text = free_text.replace(word, ';').strip()
elif (';' not in word) and (word != "and") and (word != "or"):
if free_text.find(word) == 0:
free_text = free_text.replace(word + ' ', ' ')
else:
free_text = free_text.replace(' ' + word, ' ')
return free_text.strip()
|
def convert(data):
"""Convert the output to JSON."""
lines = data.split('\n')
output = []
for line in lines:
line_data = line.split(' ', 1)
output.append({
'qty': int(line_data[0]),
'information': line_data[1]
})
return output
|
def is_internet_file(url):
"""Return if url starts with http://, https://, or ftp://.
Args:
url (str): URL of the link
"""
return (
url.startswith("http://")
or url.startswith("https://")
or url.startswith("ftp://")
)
|
def subset(data: list, default_length: int):
"""
Get a subset of a list
:param data: list
:param default_length: default length of list
:return: list
"""
# Stop if nothing was found:
if len(data) == 0:
return []
# Select some IDs
length = default_length if len(data) > default_length else len(data)
return data[:length]
|
def generate_natural_language_explanation(bias_dict):
"""
Given the output of the classifer, generate a natural language explanation like:
"This article likely has a conservative bias"
Input:
bias_dict, which maps biases to probabilities e.g.:
{ "left bias" : 0.2 , etc }
Output:
a natural language explanation as string
"""
# First, get the string label of the overall bias, defined as the most probable category.
# E.g. "left-center bias"
bias_type = [k for k, v in bias_dict.items() if v == max(bias_dict.values())][0]
# Next, represent confidence as a string.
# <= 75% confident in the label? --> "may have"
# > 75% confident in the label? -> "likely has"
confidence = ""
threshold = 0.75
if max(bias_dict.values()) >= threshold:
confidence = "likely has"
else:
confidence = "may have"
# Return a natural language explanation string.
explanation = "This article {} a {}".format(confidence, bias_type)
return explanation
|
def add_one(value):
"""Simple add function for test purposes."""
import time
time.sleep(0.1)
return value + 1
|
def to_hass_level(level):
"""Convert the given Lutron (0-100) light level to Home Assistant (0-255)."""
return int((level * 255) // 100)
|
def Exclude_Confls(tributary: list, disexl: float):
""" Function to exclude tributaries or main stem intervals less than the exclusion length (disexl)
"""
incl_tribs=[]
for cell in tributary:
if cell[3]>=disexl:
incl_tribs.append(cell)
return incl_tribs
|
def fix_escape_chars(text: str) -> str:
"""
replaces escaped \\ followed by a letter to the appropriate char
ignore/replace are kind of just guesses at what i think would be best
if there is a more logical reason to use something else LMK!
(ex. "\\t" --> "\t")
"""
return text.encode(encoding="utf8", errors="ignore").decode(encoding="unicode_escape", errors="replace")
|
def share_diagonal(x0, y0, x1, y1):
""" Is (x0, y0) on a shared diagonal with (x1, y1)? """
dy = abs(y1 - y0) # Calc the absolute y distance
dx = abs(x1 - x0) # CXalc the absolute x distance
return dx == dy # They clash if dx == dy
|
def move(x, y, direction, multiplier):
""" Moves point (x,y) in direction, returns a pair """
if direction == "UP":
return (x,y-multiplier)
elif direction == "DOWN":
return (x,y+multiplier)
elif direction == "LEFT":
return (x-multiplier, y)
elif direction == "RIGHT":
return (x+multiplier, y)
return (x,y)
|
def test_average_intensity(intensities, intensity, zero_input):
"""Find average intensity of channels, excluding "empty" channels.
Return True if average above "intensity", False otherwise.
"""
int_vector = [x for x in intensities if x > zero_input]
# exclude PSM if too few channels with minimal intensity
if len(int_vector) < 3:
return False
# compute the average
average = sum(int_vector)/float(len(int_vector))
# test threshold
if average >= intensity:
return True
else:
return False
|
def findtype(s, makeintfloats = False):
"""
Return a tuple with the data type of the string along with
the string converted to the data type.
args:
s : mandatory, string
returns:
tuple: (type, sprime)
type is either int, float or str
sprime is the quantaty s converted into its type.
example usage:
t, s = findtype(s)
status:
seems to be working,
R. Biswas, Sun Mar 24 21:40:53 CDT 2013
Copied from ioutilst as is
R. Biswas, Tue Oct 22 14:47:48 CDT 2013
"""
try:
int(s)
if makeintfloats:
return 'f4', float(s)
else:
return 'i8' , int(s)
except ValueError:
pass
try:
float(s)
return 'f4' , float(s)
except ValueError:
pass
return "a20", s
|
def hexWithoutQuotes(l):
""" Return a string listing hex without all the single quotes.
>>> l = range(10)
>>> print hexWithoutQuotes(l)
[0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9]
"""
return str([hex (i) for i in l]).replace("'", "")
|
def reverse_bits(x):
"""
Question 5.3: Reverse the bits in a number
"""
MAX_LEN = 64
bin_str = '{:08b}'.format(x)
remaining = MAX_LEN - len(bin_str)
full_str = bin_str[::-1] + ('0' * remaining)
return int(full_str, 2)
|
def percent_format(x: float) -> str:
"""Return a formatted string for percent."""
return f"{x:.2f}%"
|
def sanitize_kwargs(param_kwargs):
"""Remove 'data' and 'cmd' keys, if present."""
arguments = param_kwargs
arguments.pop("data", None)
arguments.pop("cmd", None)
return arguments
|
def is_odd(x):
""" Judge whether the parameter is odd """
if x % 2:
return True
return False
|
def _validate_bound_type(bound_type):
"""Valdiate the bound type input.
Warnings
--------
This method is intended for internal use only.
"""
# Validate bound type
if bound_type not in {"absolute", "abs", "deviation", "dev"}:
raise ValueError("`bound_type` must be 'absolute' or 'deviation'")
return bound_type
|
def list_2_dict(kv_list, fld_del="."):
"""Function: list_2_dict
Description: Change a key.value list into a dictionary list. The
key_value is a list of keys and values delimited. Values for the same
key will be appended to the list for that key.
Arguments:
(input) kv_list -> Key_Value list.
(input) fld_del -> Field delimiter for the split.
(output) dict_list -> Dictionary list.
"""
kv_list = list(kv_list)
dict_list = {}
for item in kv_list:
dbs, tbl = item.split(fld_del)
if dbs not in dict_list:
dict_list[dbs] = [tbl]
else:
dict_list[dbs].append(tbl)
return dict_list
|
def get_catalog_info(key):
"""
Returns an dict representing the catalog level, instrument, path and
row. Keys are:
- level(int), 0 is the deepest level, satellite/camera/path/row
- satellite, mission, camera, path, row
represents the key containing the items. Value is increased
for each parent dir.
- is_collection: bool if catalog at the level is a collection
Input:
key(string): 'directory' key
"""
ret = dict()
# Level
levels = key.split("/")
level = 4 - len(levels)
assert 0 <= level <= 2, "Unsupported level " + str(level)
ret["level"] = level
ret["satellite+mission"] = None
ret["camera"] = None
ret["path"] = None
ret["row"] = None
# @todo not currently used, check for removal
ret["is_collection"] = level == 2
keys = ["satellite+mission", "camera", "path", "row"]
for index, key_id in enumerate(levels):
ret[keys[index]] = key_id
return ret
|
def get_device_mapping(idx):
"""Given an integer, return a unix device"""
return chr(98+idx)
|
def get_filename(key):
"""Rename tensor name to the corresponding Keras layer weight name.
# Arguments
key: tensor name in TF (determined by tf.variable_scope)
"""
filename = str(key)
filename = filename.replace('/', '_')
filename = filename.replace('xception_65_', '')
filename = filename.replace('decoder_','',1)
filename = filename.replace('BatchNorm','BN')
if 'Momentum' in filename:
return None
if 'entry_flow' in filename or 'exit_flow' in filename:
filename = filename.replace('_unit_1_xception_module','')
elif 'middle_flow' in filename:
filename = filename.replace('_block1','')
filename = filename.replace('_xception_module','')
# from TF to Keras naming
filename = filename.replace('_weights', '_kernel')
filename = filename.replace('_biases', '_bias')
return filename + '.npy'
|
def get_A1_const(alpha1, alpha2, lam_c):
"""Function to compute the constant A1.
Args:
alpha1 (float): The alpha1 parameter of the WHSCM.
alpha2 (float): The alpha2 parameter of the WHSCM.
lam_c (float): The switching point between the
two exponents of the double power-laws
in the WHSCM.
Returns:
A1 (float): The A1 constant of the WHSCM.
"""
A1 = (alpha1 - 1.) * (alpha2 - 1.) / ( (lam_c**(1.-alpha1))*(alpha1 - alpha2) + alpha2 - 1. )
return A1
|
def register_new_peak(background_index_left, background_index_right, keys):
"""Registers a new peak by creating a peak from the background values given
Args:
background_index_left (int): index in compressed depths of background left to a peak
background_index_right (int): index in compressed depths of background right to a peak
keys (List): The keys for the compressed depths dictionary
Returns:
str: A peak in start - end format that is the result of registration
"""
if background_index_right - background_index_left >= 5:
peak = keys[background_index_left + 1][0], keys[background_index_right - 1][1]
else:
peak = keys[background_index_left][0], keys[background_index_right][1]
return peak
|
def is_int(kills, deaths, assists):
"""Determines whether or not a match with given kda is an "int"
Args:
kills (int): # of kills
deaths (int): # of deaths
assists (int): # of assists
Returns:
boolean: True if int, False if not
"""
if deaths == 0:
return False
if ((kills * 2) + assists) / (deaths * 2) < 1.3 and deaths - kills > 2 and deaths > 3:
if deaths < 6 and kills + assists > 3:
return False
if deaths < 10 and kills > 2 and kills + assists > 7:
return False
return True
return False
|
def sum_all(node):
"""
Sum all list elements together
:param node: value of head node, start of list
:return: int: sum of list elements
"""
if node is not None:
return node.value + sum_all(node.next_node) # tally those bad boys up
return 0
|
def _mr_gi(gi,feh,dgi=False,dfeh=False):
"""Ivezic+08 photometric distance"""
if dgi:
return 14.32-2.*12.97*gi+3.*6.127*gi**2.-4.*1.267*gi**3.\
+5.*0.0967*gi**4.
elif dfeh:
return -1.11-0.36*feh
else:
mro= -5.06+14.32*gi-12.97*gi**2.+6.127*gi**3.-1.267*gi**4.\
+0.0967*gi**5.
dmr= 4.5-1.11*feh-0.18*feh**2.
mr= mro+dmr
return mr
|
def findIndex(f, seq):
"""Return first item in sequence where f(item) == True."""
for index, item in enumerate(seq):
if f(item):
return index
|
def indexMultiple(x,value):
"""
Return indexes in x with multiple values.
"""
return [ i[0] for i in enumerate(x) if i[1] == value ]
|
def all_none(centre_list):
"""
:param centre_list:
:return: checks if all positions in a list are None
"""
none_yes = 1
for element in centre_list:
if element is None:
none_yes = none_yes*1
else:
none_yes = none_yes * 0
return bool(none_yes)
|
def executeScriptToGetData():
""" Simulate some actions for text tile exemple"""
return {
"items":
["Leader: 5",
"Product Owner: 0",
"Scrum Master: 3",
"Developer: 0"]
}
|
def leafnames(deep_dict):
"""
Find keys of leaf-items in a nested dict, using recursion.
Parameters:
deep_dict (dict): nested dictionary
Returns:
list: list of keys that map to leaves of a deeply nested dict
"""
keylist = []
def get_keys(doc):
if isinstance(doc, dict):
for key, val in doc.items():
if not isinstance(val, dict):
keylist.append(key) # side-effect :(
get_keys(val)
get_keys(deep_dict)
return keylist
|
def percent_uppercase(text):
"""Calculates percentage of alphabetical characters that are uppercase, out of total alphabetical characters.
Based on findings from spam.csv that spam texts have higher uppercase alphabetical characters
(see: avg_uppercase_letters())"""
alpha_count = 0
uppercase_count = 0
for char in text:
if char.isalpha():
alpha_count += 1
if char.isupper():
uppercase_count += 1
# calculate percentage - make sure not to divide by 0
try:
perc_uppercase = float(uppercase_count) / float(alpha_count)
return str(perc_uppercase)
except ZeroDivisionError:
return "0"
|
def choose_init(module):
"""
Select a init system
Returns the name of a init system (upstart, sysvinit ...).
"""
# Currently clearlinux only has systemd.
return 'systemd'
|
def rec_pts2(rec=0, yds=0, tds=0, ppr=1):
"""
takes number of receiving: yards, receptions and touchdowns AND points per
reception and returns fantasy points scored
"""
return yds*0.1 + rec*ppr + tds*6
|
def is_in_top_level_ui(modname):
""" these can be imported 'from pyjamas.ui import modname'
everything else must be imported
'from pyjamas.ui.modname import classname', where modname happens
to be the same as classname
"""
if modname == 'Focus':
return True
if modname == 'Event':
return True
if modname == 'MouseListener':
return True
if modname == 'KeboardListener':
return True
if modname == 'FocusListener':
return True
if modname.startswith("Has") and modname.endswith("Alignment"):
return True
return False
|
def format_verification_record(verification_record):
""" format and return given verification record into dictionary """
return {
"origin_id": verification_record["origin_id"],
"transfer_to": verification_record["transfer_to"],
"verification_id": verification_record["verification_id"],
"sent": verification_record["sent"]
}
|
def neighbour_squares(x, y, num_rows, num_cols):
"""
(x, y) 0-based index co-ordinate pair.
num_rows, num_cols: specifiy the max size of the board
returns all valid (x, y) coordinates from starting position.
"""
offsets = [(-1,-1), (-1,0), (-1,1),
( 0,-1), ( 0,1),
( 1,-1), ( 1,0), ( 1,1)]
result = []
for x2, y2 in offsets:
px = x + x2
py = y + y2
#row_check = 0 <= px < num_rows
#col_check = 0 <= py < num_cols
row_check = 0 <= px < num_cols
col_check = 0 <= py < num_rows
if row_check and col_check:
point = (px, py)
result.append(point)
return result
|
def create_get_settings_payload(context: str):
"""Create and return "getSettings" dictionary to send to the Plugin Manager.
Args:
context (str): An opaque value identifying the instance's action you want to modify.
Returns:
dict: Dictionary with payload to get save data for an instance's context.
"""
return {
"event": "setSettings",
"context": context,
}
|
def reschedule(completed, remaining):
"""
determines which components in 'remaining' can be run based on the
'completed' components
"""
visited = [m.node.id for m in completed]
available_layers = set()
for v in completed:
available_layers.update(set(v.node.out))
to_run = []
for m in remaining:
flag = False
if m.node.after:
flag = all(p in visited for p in m.node.after)
else:
flag = all(x in available_layers for x in m.node.ins)
if flag:
visited.append(m.node.id)
available_layers.update(m.node.out)
to_run.append(m)
return to_run
|
def validate_same_rank(cards):
"""
validate 3 of a kind or 4 of a kind
:param cards: list of Card objects
:return: Boolean
"""
if len(cards) not in (3, 4):
return False
return all(card.rank == cards[0].rank for card in cards)
|
def getNextURL(links):
"""
Helper function for getAllPatients, get the next url so we can loop to get all resources
:param links:
:return:
"""
if type(links) == list:
for link in links:
if link.relation == 'next':
return link.url
return None
|
def dowker_to_gauss(code):
"""
Convert from Dowker-Thistlethwaite code to signed Gauss code.
EXAMPLES::
sage: from sage.knots.gauss_code import dowker_to_gauss
sage: dowker_to_gauss([6,-12,2,8,-4,-10])
[-3, 1, 6, -2, -1, 3, -4, 4, 2, -5, 5, -6]
sage: dowker_to_gauss([-4,-6,-2])
[2, -1, 3, -2, 1, -3]
TESTS::
sage: dowker_to_gauss([])
[]
"""
n = len(code)
# vertices are numbered by half of their even label
signes = {abs(j): (1 if j > 0 else -1) for j in code}
gauss = []
for i in range(1, 2 * n + 1):
if i % 2:
letter = code[(i - 1) // 2] // 2
gauss.append(-letter)
else:
gauss.append(signes[abs(i)] * i // 2)
return gauss
|
def v5_tail(iterable, n):
"""Return the last n items of given iterable.
Instead of storing every item in the given iterable into a new sequence,
we could store just the last n items we've seen.
Here we're using a * to unpacking the last (n-1) items into a new list
with our current item at the end and then reassigning that to items.
This use of * expressions only works in Python 3.5+.
Note that this doesn't work when n is 1. When n is 1, (n-1) will be 0 which
means our items list will grow continually instead of only keeping the
last 1 item.
"""
items = []
if n <= 0:
return []
for item in iterable:
items = [*items[-(n-1):], item]
return items
|
def count(xs):
"""
counts elements of a generator
"""
return sum(1 for _ in xs)
|
def delete_none(_dict):
"""
Delete None values recursively from all of the dictionaries, tuples, lists, sets
"""
if isinstance(_dict, dict):
for key, value in list(_dict.items()):
if isinstance(value, (list, dict, tuple, set)):
_dict[key] = delete_none(value)
elif value is None or key is None:
del _dict[key]
elif isinstance(_dict, (list, set, tuple)):
_dict = type(_dict)(delete_none(item) for item in _dict if item is not None)
return _dict
|
def splinter_firefox_profile_preferences():
"""Firefox profile preferences."""
return {
"browser.cache.memory.enable": False,
"browser.sessionhistory.max_total_viewers": 0,
"network.http.pipelining": True,
"network.http.pipelining.maxrequests": 8,
"browser.startup.page": 0,
"browser.startup.homepage": "about:blank",
"startup.homepage_welcome_url": "about:blank",
"startup.homepage_welcome_url.additional": "about:blank",
"browser.startup.homepage_override.mstone": "ignore",
"toolkit.telemetry.reportingpolicy.firstRun": False,
"datareporting.healthreport.service.firstRun": False,
"browser.cache.disk.smart_size.first_run": False,
"media.gmp-gmpopenh264.enabled": False, # Firefox hangs when the file is not found
}
|
def to_camel(snake_str: str) -> str:
"""
Converts snake_case_string to camelCaseString
"""
first, *others = snake_str.split("_")
return "".join([first.lower(), *map(str.title, others)])
|
def sort_processor_counts(p_string):
""" Simple wrapper to help sort processor counts """
return int(p_string.split('-')[0][1:])
|
def dict_strip_quotes(dict_item: dict) -> dict:
"""
Strips quote characters from dict values.
:param dict_item: A dictionary to work with.
:return dict: A dictionary with quotes stripped.
"""
_output = dict()
delimiter = '\"'
for _key, _value in dict_item.items():
_output.update({_key: _value.strip(delimiter)})
_tmp = _output
_output = dict()
delimiter = '\''
for _key, _value in _tmp.items():
_output.update({_key: _value.strip(delimiter)})
return _output
|
def get_theoretical_onset_tuning_thickness(f_central):
"""
Parameters
----------
f_central : float
wavelet central frequency in Hz
Returns
-------
float
"""
return 1 / f_central * 1000
|
def connector(string):
"""
Return logical operator and index which joins
two halves of an expression
"""
operator = None
paren_count = 0
itr_index = 0
while itr_index<len(string):
if string[itr_index]=='(':
paren_count = paren_count + 1
if string[itr_index]==')':
paren_count = paren_count - 1
if paren_count==0 and (string[itr_index]=="&" or string[itr_index]=="|" or string[itr_index]=="-"):
return string[:itr_index], string[itr_index:itr_index+2], string[itr_index+2:]
if paren_count==0 and string[itr_index]=="U":
return string[:itr_index], string[itr_index:itr_index+1], string[itr_index+1:]
itr_index += 1
return -1
|
def _make_argo_task(name, dependencies):
"""Generate an Argo Task spec
"""
task = {
'name': name,
'dependencies': dependencies,
'template': 'run-task',
'arguments': {
'parameters': [{
'name': 'task_name',
'value': name,
}]
}
}
return task
|
def joinTerms(terms, sep=', ', last=None, sort=False):
"""Return a string joining *terms* as reStructuredText."""
terms = list(terms)
if sort:
terms.sort()
if last is None:
last = ''
else:
last = last + ':term:`' + terms.pop() + '`'
return ':term:`' + ('`' + sep + ':term:`').join(terms) + '`' + last
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.