content
stringlengths 42
6.51k
|
|---|
def gql_project_version(fragment):
"""
Return the GraphQL projectVersion query
"""
return f'''
query ($where: ProjectVersionWhere!, $first: PageSize!, $skip: Int!) {{
data: projectVersions(where: $where, first: $first, skip: $skip) {{
{fragment}
}}
}}
'''
|
def issubclass_(t1, t2):
"""
Version of builtin ``issubclass`` but doesn't throw error when the first argument is not a class.
"""
if not isinstance(t1, type):
return False
return issubclass(t1, t2)
|
def empty_to_none(param_mode):
"""
noticed some tuned hyperparameters
come back as {}
change to None for consistency
"""
try:
if len(eval(param_mode) ) == 0:
return 'None'
else:
return param_mode
except:
return param_mode
|
def artists_to_mpd_format(artists):
"""
Format track artists for output to MPD client.
:param artists: the artists
:type track: array of :class:`mopidy.models.Artist`
:rtype: string
"""
artists = list(artists)
artists.sort(key=lambda a: a.name)
return ', '.join([a.name for a in artists if a.name])
|
def clean_headers(headers):
"""
Sanitize a dictionary containing HTTP headers of sensitive values.
:param headers: The headers to sanitize.
:type headers: dict
:returns: A list of headers without sensitive information stripped out.
:rtype: dict
"""
cleaned_headers = headers.copy()
authorization_header = headers.get("Authorization")
if authorization_header:
sanitized = "****" + authorization_header[-4:]
cleaned_headers["Authorization"] = sanitized
return cleaned_headers
|
def increment(dictionary, k1, k2):
"""
dictionary[k1][k2]++
:param dictionary: Dictionary of dictionary of integers.
:param k1: First key.
:param k2: Second key.
:return: same dictionary with incremented [k1][k2]
"""
if k1 not in dictionary:
dictionary[k1] = {}
if 0 not in dictionary[k1]:
dictionary[k1][0] = 0
if k2 not in dictionary[k1]:
dictionary[k1][k2] = 0
dictionary[k1][0] += 1 # k1 count
dictionary[k1][k2] += 1 # k1, k2 pair count
return dictionary
|
def split_columns(x):
"""Helper function to parse pair of indexes of columns
This function is called by ArgumentParser
"""
try:
a, b = map(int, x.split(","))
# columns from command line are numbered starting from 1
# but internally we start counting from 0
return a - 1, b - 1
except ValueError as ex:
message = """
Cannot understand the pair of columns you want to print.
Columns are identified by numbers starting from 1. Each pair
of columns is identified by two numbers separated by a comma
without a space 1,2 1,5 6,4\n\n\n"""
print(message, ex)
raise ex
|
def str_snake_to_camel(term):
"""Convert a string from snake_case to camelCase.
Parameters
----------
term : string
Returns
-------
string
"""
camel = term.split("_")
return "".join(camel[:1] + list([x[0].upper() + x[1:] for x in camel[1:]]))
|
def get_union_over_keys(map_to_set):
"""Compute the union of the map range values.
For a map whose range is a set, union the range
over keys in the map return the resulting set.
"""
result = set()
for key in map_to_set:
value_set = map_to_set[key]
result |= value_set;
return result
|
def convert_format(fformat):
"""
"""
fformat = fformat.lower()
if 'netcdf' in fformat:
fid, term = 'netcdf', 'netcdf'
elif 'grib' in fformat:
fid, term = 'grib', 'grib'
elif 'hdf' in fformat:
fid, term = 'hdf', 'hdf'
elif fformat == 'um':
fid, term = 'binary', 'other binary'
elif 'geotiff' in fformat:
fid, term = 'geotiff', 'geotiff'
elif 'mat' in fformat:
fid, term = 'matlab', 'matlab'
else:
fid, term = 'other', 'other'
return fid, term
|
def _maven_artifact(group, artifact, version, packaging = None, classifier = None, override_license_types = None, exclusions = None, neverlink = None):
"""
Generates the data map for a Maven artifact given the available information about its coordinates.
Args:
group: *Required* The Maven artifact coordinate group name (ex: `"com.google.guava"`)
artifact: *Required* The Maven artifact coordinate artifact name (ex: `"guava"`)
version: *Required* The Maven artifact coordinate version name (ex: `"27.0-jre"`)
packaging: *Optional* The Maven packaging specifier (ex: `"jar"`)
classifier: *Optional* The Maven artifact classifier (ex: `"javadoc"`)
override_license_types: *Optional* An array of Bazel license type strings to use for this artifact's rules (overrides autodetection) (ex: `["notify"]`)
exclusions: *Optional* An array of exclusion objects to create exclusion specifiers for this artifact (ex: `maven.exclusion("junit", "junit")`)
neverlink: *Optional* Determines if this artifact should be part of the runtime classpath.
Output Schema:
{
"group": String
"artifact": String
"version": String
"packaging": Optional String
"classifier": Optional String
"override_license_types": Optional Array of String
"exclusions": Optional Array of exclusions (see below)
"neverlink": Optional Boolean
}
"""
maven_artifact = {}
maven_artifact["group"] = group
maven_artifact["artifact"] = artifact
maven_artifact["version"] = version
if packaging != None:
maven_artifact["packaging"] = packaging
if classifier != None:
maven_artifact["classifier"] = classifier
if override_license_types != None:
maven_artifact["override_license_types"] = override_license_types
if exclusions != None:
maven_artifact["exclusions"] = exclusions
if neverlink != None:
maven_artifact["neverlink"] = neverlink
return maven_artifact
|
def nextGap(gap):
"""Finds the next gap to increment the sort by"""
newGap = (gap * 10) // 13
return 1 if newGap < 1 else newGap
|
def api_url(lang):
"""Return the URL of the API based on the language of Wikipedia."""
return "https://%s.wikipedia.org/w/api.php" % lang
|
def combine_args(supported_args, *arg_sets):
""" Combine args with priority from first in the list to last in the list """
base_dict = arg_sets[0]
sup_arg_dict = {arg['name']: arg['arg_params'] for arg in supported_args}
for i in range(1,len(arg_sets)):
this_dict = arg_sets[i]
if this_dict is None:
continue
for key, value in base_dict.items():
supported_arg = sup_arg_dict[key]
if key in this_dict:
if value is None or ('default' in supported_arg and value == supported_arg['default']):
base_dict[key] = this_dict[key]
return base_dict
|
def restrict_by(payload, pkey, dest_table=None, tkey=None):
"""
1. Restrict the list of dicts payload by unique instances of a payload key
2. Remove items where payload key is already in the tkey value of the dest_table
"""
unique_payload = list({v[pkey]: v for v in payload if v[pkey] is not None}.values())
if dest_table:
existing_ids = dest_table.fetch(tkey).tolist()
return [d for d in unique_payload if d[pkey] not in existing_ids]
else:
return unique_payload
|
def clamp(x, l, u):
"""
clamp x to be l <= x <= u
>>> clamp(5, 1, 10)
5
>>> clamp(-1, 1, 10)
1
>>> clamp(12, 1, 10)
10
"""
return l if x < l else u if x > u else x
|
def _rescale_param(param, value, dnu):
"""
Rescales the resolution value if a dnu parameter is chosen.
Parameters
----------
param : str
Parameter name.
value : float, array
Value(s) to be rescaled
dnu : float
Solar value of dnu given in input.
Returns
-------
value : float, array
The rescaled value(s)
"""
# Only run for dnu params, do nothing for other parameters
if param.startswith("dnu") and param != "dnufit":
print(
"Note: {0} converted to solar units from {1} muHz".format(param, value),
"assuming dnusun = {0:.2f} muHz".format(dnu),
)
value /= dnu
return value
|
def is_property(k: str) -> bool:
"""Check if key is a property."""
return k.startswith("__") and k.endswith("__")
|
def get_compatible_version(version):
"""Return the compatible version.
:arg str version: Version string.
:return: The compatible version which could be used as ``~={compatible_version}``.
:rtype: str
Suppose the version string is ``x.y.z``:
* If ``x`` is zero then return ``x.y.z``.
* Otherwise return ``x.y``.
"""
if version.startswith("0."):
return version
return ".".join(version.split(".")[:2])
|
def english_score(input_bytes: bytes):
"""Takes in a byte string. Outputs a score representing likelihood the string is English text (compared to other strings)"""
# The following set contains the decimal number that corresponds to the ASCII for the most common english
# characters ('ETAOIN SHRDLU') in both upper and lower case, also including the space
point_worthy = {
32,
65,
68,
69,
72,
73,
76,
78,
79,
82,
83,
84,
85,
97,
101,
104,
105,
100,
108,
110,
111,
114,
115,
116,
117
}
chars_in_point_worthy = list(filter(point_worthy.__contains__, input_bytes))
return len(chars_in_point_worthy)
|
def _subtract(groupA, groupB):
"""Returns groupA without elements that are also in groupB"""
return [item for item in groupA if not item in groupB]
|
def split_strip(input, delimiter=','):
"""
Splits ``input`` on ``delimiter``, stripping each resulting string
and returning a list of non-empty strings.
"""
words = [w.strip() for w in input.split(delimiter)]
return [w for w in words if w]
|
def sfdr(iip3=0, mds=0):
"""
Calculate SFDR from IIP3 and mds.
:param iip3: Input-referred 3rd-order intercept point in dBm.
:param mds: Minimum detectable signal in dBm.
"""
return 2 / 3 * (iip3 - mds)
|
def writePageHeader(functionName):
"""
Write the page header for the man page. Takes in the form of:
.TH [name of program] [section number] [center footer] [left footer]
[center header]
"""
titleHeader = ".TH " + functionName.upper() + " 3 " \
+ "\"Open Source Software Solutions, Inc.\" " \
+ "\"OpenSHMEM Library Documentation\"" + "\n"
return titleHeader
|
def toggle_modal_sso(n1, n2, is_open):
""" Callback for the modal (open/close)
"""
if n1 or n2:
return not is_open
return is_open
|
def get_new_board(dimension):
"""
Return a multidimensional list that represents an empty board (i.e. string with a space at every position).
:param: dimension: integer representing the nxn dimension of your board.
For example, if dimension is 3, you should return a 3x3 board
:return: For example if dimension is 3x3, you should return:
--> [[" ", " ", " "],
[" ", " ", " "],
[" ", " ", " "]]
"""
result = []
for _ in range(dimension):
result.append([])
for _ in range(dimension):
result[-1].append(" ")
return result
|
def return_words(lst, word_set):
"""
Return combinations in that are words in word_set
@type lst: [str]
@type word_set: set(str)
@rtype: [str]
"""
returned_list = []
for word in lst:
if word in word_set or word.capitalize() in word_set:
# Some words are capitalized in the word_set
returned_list.append(word)
return returned_list
|
def era_minus(data, lg_era):
"""ERA- = ERA / lgERA * 100
:param
:returns:
"""
return data["era"] / lg_era * 100
|
def fdr_cutoff(entries, cutoff):
"""
entries should be a list of dictionaries, each of the form {'score':, 'label':, 'peptide':}
label is -1 for decoy, 1 for target
"""
#first, sort by score in descending order.
sorted_entries = sorted(entries, key=lambda x: float(x['score']), reverse=True)
#if a peptide has multiple entries, take the one with the best score
peptides = []
unique_peptide_entries = []
for x in sorted_entries:
if x['peptide'] not in peptides:
peptides.append(x['peptide'])
unique_peptide_entries.append(x)
num_targets = 0
num_decoys = 0
cutoff_index = -1
for i in range(0, len(unique_peptide_entries)):
entry = unique_peptide_entries[i]
if entry['label'] == -1:
num_decoys += 1
elif entry['label'] == 1:
num_targets += 1
if num_targets == 0:
fdr = 1.0
else:
fdr = 1.0*num_decoys/num_targets
if fdr <= cutoff:
cutoff_index = i
if cutoff_index == -1:
return []
else:
return list(filter(lambda x: x['label'] == 1, unique_peptide_entries[0:(cutoff_index + 1)]))
|
def count_disordered(arr, size):
"""Counts the number of items that are out of the expected
order (monotonous increase) in the given list."""
counter = 0
state = {
"expected": next(item for item in range(size) if item in arr),
"checked": []
}
def advance_state():
state["expected"] += 1
while True:
in_arr = state["expected"] in arr
is_overflow = state["expected"] > size
not_checked = state["expected"] not in state["checked"]
if not_checked and (in_arr or is_overflow):
return
state["expected"] += 1
for val in arr:
if val == state["expected"]:
advance_state()
else:
counter += 1
state["checked"].append(val)
return counter
|
def sanitize_win_path(winpath):
"""
Remove illegal path characters for windows
"""
intab = "<>:|?*"
if isinstance(winpath, str):
winpath = winpath.translate({ord(c): "_" for c in intab})
elif isinstance(winpath, str):
outtab = "_" * len(intab)
trantab = "".maketrans(intab, outtab)
winpath = winpath.translate(trantab)
return winpath
|
def scalar_prod(n,a):
"""
This function takes a scalar, n, and multiplies it by every
element in a vector.
"""
c = [n*a[0],n*a[1],n*a[2]]
return(c)
|
def Redact(value, from_nth_char=5):
"""Redact value past the N-th character."""
return value[:from_nth_char] + '*' * (len(value) - from_nth_char)
|
def __material_cf(d):
""" Fixed length data fields for computer files. """
return (d[0:4], d[4], d[5], d[6:8], d[8], d[9], d[10], d[11:])
|
def searchkit_aggs(aggs):
"""Format the aggs configuration to be used in React-SearchKit JS.
:param aggs: A dictionary with Invenio facets configuration
:returns: A list of dicts for React-SearchKit JS.
"""
return [
{"title": k.capitalize(), "aggName": k, "field": v["terms"]["field"]}
for k, v in aggs.items()
]
|
def and_(*children):
"""Select devices that match all of the given selectors.
>>> and_(tag('sports'), tag('business'))
{'and': [{'tag': 'sports'}, {'tag': 'business'}]}
"""
return {'and': [child for child in children]}
|
def factorial_recursive(n):
"""
>>> factorial_recursive(3)
6
>>> factorial_recursive(5)
120
>>> factorial_recursive(0)
1
>>> factorial_recursive(1)
1
"""
if (n == 1 or n == 0):
return 1 # Base Case
elif (n > 1):
return factorial_recursive(n-1)*n
|
def pixel2coord(x, y,a,b,xoff,yoff,d,e):
"""Returns global coordinates from pixel x, y coords"""
xp = a * x + b * y + xoff
yp = d * x + e * y + yoff
return(xp, yp)
|
def reverse_complement_sequence(sequence, complementary_base_dict):
"""
Finds the reverse complement of a sequence.
Parameters
----------
sequence : str
The sequence to reverse complement.
complementary_base_dict: dict
A dict that maps bases (`str`) to their complementary bases
(`str`).
Returns
-------
str
The reverse complement of the input sequence.
"""
rev_comp_bases = [complementary_base_dict[b] for b in
sequence[::-1]]
return ''.join(rev_comp_bases)
|
def time_delta(date1,date2):
"""
we have to write a tiny little script to calcualte time differences
it's assumed that date1 is later than date2
"""
f = lambda d: map(int,d.split(":"))
h,m,s = (d[0] - d[1] for d in zip(f(date1), f(date2)))
return h*60*60 + m*60 + s
|
def es_vocal(letra):
"""
(str of len == 1) bool
Determina si una letra es vocal
>>> es_vocal('a')
True
>>> es_vocal('x')
False
:param letra: str of len 1 representa un caracter
:return: True si es vocal False de lo contrario
"""
return 1 == len(letra) and letra in 'aeiouAEIOU'
|
def merge(*objs):
"""Recursively merges objects together."""
def _merge(a, b):
if a is None:
a = {}
elif not isinstance(a, dict):
return a
else:
a = dict(a)
for key, value in b.items():
if isinstance(value, dict):
value = _merge(a.get(key) or {}, value)
a[key] = value
return a
rv = None
for obj in objs:
if obj is not None:
rv = _merge(rv, obj)
return rv or {}
|
def optimize_frame_access(list_data, ra_n_times_slower=40):
"""
implemented by Simon Mandlik
list_data should be array of nodes, must have .frame_ var
returns list of tuples in format (data, ra_access [bool], stay_on_same_frame [bool])
:param list_data:
:param ra_n_times_slower:
:return:
"""
list_data = list(list_data)
sorted_list = sorted(list_data, key=lambda x: x.frame_)
result = []
prev_frame = 0
while sorted_list:
node = sorted_list.pop(0)
frame = node.frame_
prev_bool = frame == prev_frame
if (frame - prev_frame) <= ra_n_times_slower:
tup = (node, True, prev_bool)
result.append(tup)
else:
tup = (node, False, prev_bool)
result.append(tup)
prev_frame = frame
return result
|
def weighted_position_ranking(top, ranking, j):
"""
entrada: um ranking (da imagem q), o tamanho do top, e uma imagem j para ser obtido o peso
saida: peso da imagem j
"""
position = -1
for i in range(top):
if ranking[i] == j:
position = i
break
if position == -1:
weight = 0
else:
weight = top - position
return weight
|
def get_responses(link):
"""Returns documented responses based on the @responds decorator.
In case no documentation exists, the empty object is returned,
instead of a default, which better represents that behavior not to
be formally documented.
"""
if hasattr(link, '_responses'):
return link._responses
return {}
|
def sum_all_to(num: int) -> int:
""" Return the sum of all numbers up to and including the input number """
return num * (num + 1) // 2
|
def _compute_size_by_dict(indices, idx_dict):
"""Copied from _compute_size_by_dict in numpy/core/einsumfunc.py
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
ret *= idx_dict[i]
return ret
|
def split_by_punctuation(text, puncs):
"""splits text by various punctionations
e.g. hello, world => [hello, world]
Args:
text (str): text to split
puncs (list): list of punctuations used to split text
Returns:
list: list with split text
"""
splits = text.split()
split_by_punc = False
for punc in puncs:
if punc in text:
splits = text.split(punc)
split_by_punc = True
break
if split_by_punc:
return splits
else:
return [text]
|
def timestamp(x, year):
"""Add timestamp YYYY-MM-DD"""
DD = "01"
MM = str(x)
ts = f"{year}-{MM}-{DD}"
return ts
|
def calc_baseflow(satStore, k_sz):
""" Calculate baseflow from the saturated zone
Parameters
----------
satStore : int or float
Storage in the saturated zone [mm]
k_sz : float
Runoff coefficient for the saturated zone [day^-1]
Returns
-------
baseflow : float
Baseflow from the saturated zone [mm day^-1]
"""
baseflow = satStore * k_sz
return baseflow
|
def pre_process(url):
"""
This method is used to remove the http and / from the url.
:param url: The original url read from file. A String. like: https://www.shore-lines.co.uk/
:return: The processed url. A String. Example: www.shore-lines.co.uk
"""
# delet the http:// or https://
if url.startswith('http://'):
url = url[7:]
elif url.startswith('https://'):
url = url[8:]
# delet ending '/'
if url.endswith('/'):
url = url.strip('/')
return url
|
def min_max_rescale(data, data_min, data_max):
"""Rescale the data to be within the range [new_min, new_max]"""
return (data - data_min) / (data_max - data_min)
|
def escape_token(token, alphabet):
"""Escape the token and replace unknown chatacters with uni-code."""
token = token.replace(u'\\', u'\\\\').replace(u'_', u'\\u')
escaped = [c if c in alphabet and c != u'\n' else r'\%d;' %
ord(c) for c in token]
return u''.join(escaped) + '_'
|
def attributes_pairs(attributes, prefix='', medfix=' - ', suffix=''):
"""
Make a list of unique pairs of attributes.
Convenient to make the names of elements of the mixing matrix
that is flattened.
"""
N = len(attributes)
col = []
for i in range(N):
for j in range(i+1):
col.append(prefix + attributes[i] + medfix + attributes[j] + suffix)
return col
|
def status_for_results(results):
"""Given a list of (output, ok) pairs, return the ok status
for each pair
"""
return [result[-1] for result in results]
|
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
|
def transformNull(requestContext, seriesList, default=0):
"""
Takes a metric or wild card seriesList and an optional value
to transform Nulls to. Default is 0. This method compliments
drawNullAsZero flag in graphical mode but also works in text only
mode.
Example:
.. code-block:: none
&target=transformNull(webapp.pages.*.views,-1)
This would take any page that didn't have values and supply negative 1 as a default.
Any other numeric value may be used as well.
"""
def transform(v):
if v is None: return default
else: return v
for series in seriesList:
series.name = "transformNull(%s,%g)" % (series.name, default)
series.pathExpression = series.name
values = [transform(v) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList
|
def _open_flag_to_letter(open_flag):
"""This maps the flag used in a open()system call, to "R" or "W"
if this is a read or write access. Used to analyse dependencies. """
if open_flag is None:
return "R"
if open_flag.startswith("????"):
return "R"
# ltrace just displays the integer value of the flag.
dict_flag_to_letter = {
# ltrace values
'0x80000': "R", # O_RDONLY|O_CLOEXEC
'0': "R", # O_RDONLY
'0x100': "R", # O_CREAT
'0x241': "W", # O_EXCL | O_WRONLY | ??
'0x242': "W", # O_EXCL | O_RDWR | ??
'0x441': "W", # O_NOCTTY | | O_WRONLY | ??
'0x802': "W", # O_EXCL | O_RDWR | ??
'0xc2': "W", # O_RDWR | ??
}
try:
return dict_flag_to_letter[open_flag]
except KeyError:
pass
# strace values: strace translates the integer value of the flag to the constant.
if open_flag.find("O_RDONLY") >= 0:
return "R"
if open_flag.find("O_RDWR") >= 0:
return "W"
if open_flag.find("O_WRONLY") >= 0:
return "W"
raise Exception("Unexpected open flag:" + open_flag)
|
def check_length(message):
"""Length of the subject line should not be more than 72 characters."""
splitted = message.splitlines()
return len(splitted[0]) < 73
|
def pivotize(m):
"""Creates the pivoting matrix for m."""
n = len(m)
ID = [[float(i == j) for i in range(n)] for j in range(n)]
for j in range(n):
row = max(range(j, n), key=lambda i: abs(m[i][j]))
if j != row:
ID[j], ID[row] = ID[row], ID[j]
return ID
|
def is_mastercard(card_num):
"""Return true if first two numbers are 51, 52, 53, 54 or 55"""
first_nums = card_num[:2]
if int(first_nums) >= 51 and int(first_nums) <= 55:
return True
return False
|
def intersect(list1, list2):
"""
Intersect two list and get components that exists in both list.
Args:
list1 (list): input list.
list2 (list): input list.
Returns:
list: intersected list.
"""
inter_list = list(set(list1).intersection(list2))
return(inter_list)
|
def valid_bins(num: int) -> bool:
"""Checks valid bin amount. Returns Boolean."""
return 2 <= num and num <= 1000
|
def iam_policy_to_dict(bindings):
"""
iam_policy_to_dict takes an iam policy binding in the GCP API format and converts it into a python dict
so that it can be easily updated
"""
bindings_dict = dict()
for binding in bindings:
role = binding['role']
bindings_dict[role] = set(binding['members'])
return bindings_dict
|
def _update_ZS(stored_dict,this_dict) -> dict:
"""Updates stats dictionary with values from a new window result
Parameters
----------
stored_dict:dict
Dictionary to be updated with new data
this_dict:dict
New data with which to update stored_dict
"""
out_dict = stored_dict
# loop over admin zones in this_dict
for k in this_dict.keys():
this_info = this_dict[k]
try:
stored_info = stored_dict[k]
except KeyError: # if stored_dict has no info for zone k (new zone in this window), set it equal to the info from this_dict
out_dict[k] = this_info
continue
# calculate number of visible arable pixels for both dicts by multiplying arable_pixels with percent_arable
arable_visible_stored = (stored_info["arable_pixels"] * stored_info["percent_arable"] / 100.0)
arable_visible_this = (this_info["arable_pixels"] * this_info["percent_arable"] / 100.0)
try:
# weight of stored_dict value is the ratio of its visible arable pixels to the total number of visible arable pixels
stored_weight = arable_visible_stored / (arable_visible_stored + arable_visible_this)
except ZeroDivisionError:
# if no visible pixels at all, weight everything at 0
stored_weight = 0
try:
# weight of this_dict value is the ratio of its visible arable pixels to the total number of visible arable pixels
this_weight = arable_visible_this / (arable_visible_this + arable_visible_stored)
except ZeroDivisionError:
# if the total visible arable pixels are 0, everything gets weight 0
this_weight = 0
## weighted mean value
value = (stored_info['value'] * stored_weight) + (this_info['value'] * this_weight)
## sum of arable pixels
arable_pixels = stored_info['arable_pixels'] + this_info['arable_pixels']
## directly recalculate total percent arable from sum of arable_visible divided by arable_pixels
percent_arable = ((arable_visible_stored + arable_visible_this) / arable_pixels) * 100
#percent_arable = (stored_info['percent_arable'] * stored_weight) + (this_info['percent_arable'] * this_weight)
out_dict[k] = {'value':value,'arable_pixels':arable_pixels,'percent_arable':percent_arable}
return out_dict
|
def compute_confidence_intervals(param_estimate, std_dev, critical_value):
"""Compute confidence intervals (ci). Note assumptions about the distributions
apply.
Parameters
----------
param_estimate: float
Parameter estimate for which ci should be computed
variance: float
Variance of parameter estimate.
critical_value: float
Critical value of the t distribution, e.g. for the 95-percent-ci it's 1.96.
Returns
-------
confidence_interval_dict: dict
Lower (upper) bound of the ci can be accessed by the key 'lower_bound'
('upper_bound').
"""
confidence_interval_dict = {}
confidence_interval_dict["lower_bound"] = param_estimate - critical_value * std_dev
confidence_interval_dict["upper_bound"] = param_estimate + critical_value * std_dev
return confidence_interval_dict
|
def map_diameter(c: int) -> float:
""" Compute the diameter """
return 1. / 3. * (c + 1) * (c - 1)
|
def find_dups(mappings):
"""
Searches the mappings for any duplicates in the source or destination paths
An error is created for each line that is not unique.
:param mappings: dict {(file#,line#):("old_path","int_path","ext_path",...)}
:return: errors list [(file#, line#, "Issue")]
"""
errors = []
seen_sources = set()
seen_destinations = set()
src_dups = set()
dest_dups = set()
for mapping in mappings.values():
source = mapping[0]
if source is None:
continue
if source not in seen_sources:
seen_sources.add(source)
else:
src_dups.add(source)
for mapping in mappings.values():
old_path, int_path, ext_path, ext2_path, status = mapping[:5]
destination = int_path if int_path is not None else ext_path
if destination is None:
destination = ext2_path
if destination is None:
continue
if status == "duplicate" or status == "similar":
continue
if destination not in seen_destinations:
seen_destinations.add(destination)
else:
dest_dups.add(destination)
for key in mappings:
src, dest1, dest2, dest3 = mappings[key][:4]
if src in src_dups:
file_num, line_num = key
errors.append(
(file_num, line_num, "Source '{0}' is a duplicate".format(src))
)
if dest1 in dest_dups:
file_num, line_num = key
errors.append(
(file_num, line_num, "Destination '{0}' is a duplicate".format(dest1))
)
if dest2 in dest_dups:
file_num, line_num = key
errors.append(
(file_num, line_num, "Destination '{0}' is a duplicate".format(dest2))
)
if dest3 in dest_dups:
file_num, line_num = key
errors.append(
(file_num, line_num, "Destination '{0}' is a duplicate".format(dest3))
)
return errors
|
def seq_format_from_suffix(suffix):
"""
Guesses input format from suffix
>>> print(seq_format_from_suffix('gb'))
genbank
"""
suffixes = {'fasta': ['fas','fasta','fa','fna'],
'genbank': ['gb','genbank'],
'embl': ['embl']}
found = False
for key in suffixes.keys():
if suffix in suffixes[key]:
found = True
return key
if not found:
raise RuntimeError(suffix+' is not a recognised suffix of an unaligned sequence file')
|
def check_colour_unique(board):
"""
(list) -> (bool)
Function checks third condition, i.e. unique numbers in each colourful block
>>> check_colour_unique(["**** ****","***1 ****","** 3****",\
"* 4 1****"," 9 5 "," 6 83 *","3 1 **"," 8 2***"," 2 ****"])
True
"""
for j in range(9):
lst_of_digits = []
#first loop, looking for 'cut' columns
for i in range(0,9 - j - 1):
try:
digit = int(board[i][j])
lst_of_digits.append(digit)
except ValueError:
continue
#second loop, looking for 'cut' rows
for i in range(j, 9):
try:
digit = int(board[9 - j - 1][i])
lst_of_digits.append(digit)
except ValueError:
continue
if len(lst_of_digits)!=len(set(lst_of_digits)):
return False
lst_of_digits.clear()
return True
|
def ERR_calc(ACC):
"""
Calculate Error rate.
:param ACC: accuracy
:type ACC: float
:return: error rate as float
"""
try:
return 1 - ACC
except TypeError:
return "None"
|
def remove(source: list, els=None):
"""
Remove elements.
:param list source: Source list
:param els: Element(s) to be removed
:return: list
"""
r = []
if els is None:
els = ['', None]
elif not isinstance(els, list):
els = [els]
for el in source:
if el not in els:
r.append(el)
return r
|
def getJunitTestRunnerClass(version):
""" Get the correct junit test running class for the given junit version
Parameters
----------
version : int
The major version for junit
Returns
-------
str or None
Returns str if `version` is either 3 or 4
Returns None otherwise
"""
if version == 4:
return "org.junit.runner.JUnitCore"
elif version == 3:
# Use the JUnit 3 test batch runner
# info here: http://www.geog.leeds.ac.uk/people/a.turner/src/andyt/java/grids/lib/junit-3.8.1/doc/cookbook/cookbook.htm
# Does anyone actually use this version of junit?????
return "junit.textui.TestRunner"
return None
|
def var_replace(vars, value):
"""Replace all instances of ${x} in value with the value of vars['x']."""
if value is None:
return value
for var, rep in vars.items():
if isinstance(rep, str):
value = value.replace(f"${{{var}}}", rep)
return value
|
def try_anafas_float(floatstr):
"""
Try converting a string into a float. Trims empty space and checks whether
there is a decimal separator. When a decimal separator is unspecified, assumes
two decimals separators by default (Anafas' default) dividing the resulting
number by 100.
"""
try:
num = float(floatstr.strip())
# checks if the decimal separator was omitted
thereIsDot = not (floatstr.find(".") == -1)
if not thereIsDot:
num = num / 100.0
except ValueError:
num = 0.0
return num
|
def extract_value(error):
"""
Returns the value in the error message (as a string)
If error is a ValueError, then the error is guaranteed
to be after the final colon.
Parameter error: The error object
Precondition: error is a ValueError
"""
assert isinstance(error,ValueError), repr(error)+' is not a ValueError'
msg = error.args[0]
pos = msg.rfind(':')
result = msg[pos+1:]
return result.strip()
|
def eliminateExistingImages(conn, candidate, detections, detectionsWithImages):
"""
We'd really like to avoid requesting images that we already have.
"""
imagesToRequest = []
for row in detections:
if '%d_%s_%d_%d' % (candidate, row['tdate'], row['imageid'], row['ipp_idet']) not in detectionsWithImages:
imagesToRequest.append(row)
return imagesToRequest
|
def _repoowner_reponame(repo_str, username):
"""Returns a repo owner and repo name from a repo string (owner username(or org)/repo name)
If only repo is given, username will be provided username who's using the API
Arguments:
repo_str str -- Full repo string (owner username(or org)/repo name)
username str -- Username of current user
Raises:
ValueError -- Invalid repo_str
"""
user = ""
repo = ""
split = repo_str.split("/")
if len(split) == 1:
user = username
repo = split[0]
elif len(split) == 2:
user = split[0]
repo = split[1]
else:
raise ValueError(
"Repo %s is made of %s parts, only 1 and 2 part repo names are supported" % (repo, str(len(repo)))
)
return user, repo
|
def canonicalObjName(raw) :
"""Turn object name to identifier safe in external environment.
"""
# assume raw is a string
return raw.lower().replace(" ", "_").replace(".", "_")
# todo: conversion of non-ascii letters, changing non-letters to something...
|
def abstract_uri(abstract_id):
""" generates abstract uri with id"""
return "article/"+abstract_id
|
def return_number(number):
"""
Returns number as string. If number is None, string 'None' is returned instead.
:param number: number to be converted as string
:return: number converted to string and round to 2 positions after decimal point or 'None' in case of given None
"""
if number is None:
return 'None'
else:
return str(round(number, 2))
|
def fix_public_key(str_key):
"""eBay public keys are delivered in the format:
-----BEGIN PUBLIC KEY-----key-----END PUBLIC KEY-----
which is missing critical newlines around the key for ecdsa to
process it.
This adds those newlines and converts to bytes.
"""
return (
str_key
.replace('KEY-----', 'KEY-----\n')
.replace('-----END', '\n-----END')
.encode('utf-8')
)
|
def convert_index(index_map, pos, M=None, is_start=True):
"""Working best with lcs_match(), convert the token index to origin text index
Parameters
----------
index_map: list of int
Typically, it is a map form origin indices to converted indices
pos: int
The origin index to be converted.
M: int
The maximum index.
is_start: bool
True if pos is a start position.
Returns
-------
int : the converted index regarding index_map
"""
if index_map[pos] is not None:
return index_map[pos]
N = len(index_map)
rear = pos
while rear < N - 1 and index_map[rear] is None:
rear += 1
front = pos
while front > 0 and index_map[front] is None:
front -= 1
assert index_map[front] is not None or index_map[rear] is not None
if index_map[front] is None:
if index_map[rear] >= 1:
if is_start:
return 0
else:
return index_map[rear] - 1
return index_map[rear]
if index_map[rear] is None:
if M is not None and index_map[front] < M - 1:
if is_start:
return index_map[front] + 1
else:
return M - 1
return index_map[front]
if is_start:
if index_map[rear] > index_map[front] + 1:
return index_map[front] + 1
else:
return index_map[rear]
else:
if index_map[rear] > index_map[front] + 1:
return index_map[rear] - 1
else:
return index_map[front]
|
def c2f(celsius):
"""Convert temperature degree from C to F"""
return (celsius * 9/5) + 32
|
def fit_header(header, value):
"""Convert headers to fit to our trees."""
if header == "content-type":
value = value.split(";")[0] # Remove charset and stuff
# All audio/video/javascript/image cts should behave the same
# (this is not completly correct, e.g., unsupported cts, but should be good enough)
# Thus change them to the ct we used for building the trees
if "audio" in value:
value = "audio/wav"
elif "video" in value:
value = "video/mp4"
elif "javascript" in value:
value = "application/javascript"
elif "image" in value:
value = "image/png"
elif header == "x-frame-options":
if value in ["deny", "sameorigin"]:
value = "deny" # if xfo is set, it cannot be framed by an attacker
else:
value = "empty" # invalid values are interpreted as not set by (most?) browsers
elif header == "location":
value = "http://172.17.0.1:8000" # if location is set, set it to our location
# Problem no distinction between same-origin/cross-origin redirect! + if both cookie/no-cookie redirect, we might not see any difference
# This special case is handeled in dil_predict.py check_single_methods
elif header == "content-disposition":
value = value.split(";")[0] # Remove filename
if value == "inline":
value = "empty" # inline behaves the same as not set
else:
value = "attachment" # everything else behaves like attachment
elif header == "x-content-type-options":
if value == "nosniff":
value = "nosniff"
else:
value = "empty" # only nosniff should be accepted
elif header == "cross-origin-opener-policy":
if value == "unsafe-none": # unsafe-none should be the same as not set
value = "empty"
else:
value = "same-origin"
elif header == "cross-origin-resource-policy":
if value == "cross-origin": # cross-origin should be the same as not set
value = "empty"
else:
value = "same-origin"
return value
|
def _include_location_in_line(line, ip, location):
"""
:param line: Original line to place location string into.
:type line: str
:param ip: IP address to be replaced with location.
:type ip: str
:param location: IP address string with location data appended.
:type location: str
:return: Line with ip addresses followed by location strings.
:rtype: str
"""
return line.replace(ip, location)
|
def __fmt_str_quotes(x):
"""Return a string or list of strings where the input string or list of strings have single quotes around strings"""
if isinstance(x, (list, tuple)):
return '{}'.format(x)
if isinstance(x, str):
return "'%s'" % x
return str(x)
|
def gc_fib(x):
"""Calculate fibonacci numbers."""
if x == 0 or x == 1:
return 1
return gc_fib(x - 1) + gc_fib(x - 2)
|
def modulate(v):
"""Modulates a value
>>> modulate((1, (81740, None)))
'110110000111011111100001001111110100110000'
>>> modulate(0)
'010'
>>> modulate(1)
'01100001'
>>> modulate(-1)
'10100001'
>>> modulate(81740)
'0111111000010011111101001100'
"""
if v is None:
return "00"
if type(v) is tuple:
if len(v) != 2:
raise ValueError()
return "11" + modulate(v[0]) + modulate(v[1])
ret = ""
if v >= 0:
ret += "01"
else:
ret += "10"
v *= -1
bits = ""
while v:
bits += str(v % 2)
v //= 2
bits = bits[::-1]
bitlen = 0
while bitlen * 4 < len(bits):
bitlen += 1
ret += '1' * bitlen + '0'
ret += '0' * (bitlen * 4 - len(bits)) + bits
return ret
|
def to_bytes(value, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(value, bytes):
return value
if isinstance(value, str):
return value.encode(encoding)
elif isinstance(value, bytearray):
return bytes(value)
else:
raise TypeError("Not a string or bytes like object")
|
def filter_hosts(hosts, query):
"""
Simple filter to group hosts by some basic criteria
We do short acronyms for site and to designate production/test/etc
"""
matches = set()
for host in hosts:
if query in host:
matches.add(host)
return matches
|
def consolidate_grades(grade_decimals, n_expect=None):
"""
Consolidate several grade_decimals into one.
Arguments:
grade_decimals (list): A list of floats between 0 and 1
n_expect (int): expected number of answers, defaults to length of grade_decimals
Returns:
float, either:
average of grade_decimals padded to length n_extra if
necessary, and subtracting 1/n_extra for each extra, or
zero
whichever is larger.
Usage:
>>> consolidate_grades([1, 0, 0.5], 4)
0.375
>>> consolidate_grades([1, 0.5, 0], 2)
0.25
>>> consolidate_grades([1, 0.5, 0, 0, 0], 2)
0
"""
if n_expect is None:
n_expect = len(grade_decimals)
n_extra = len(grade_decimals) - n_expect
if n_extra > 0:
grade_decimals += [-1] * n_extra
elif n_extra < 0:
grade_decimals += [0] * abs(n_extra)
avg = sum(grade_decimals)/n_expect
return max(0, avg)
|
def children_to_list(node):
"""Organize children structure."""
if node['type'] == 'item' and len(node['children']) == 0:
del node['children']
else:
node['type'] = 'folder'
node['children'] = list(node['children'].values())
node['children'].sort(key=lambda x: x['name'])
node['children'] = map(children_to_list, node['children'])
return node
|
def fourPL(x, A, B, C, D):
"""4 parameter logistic function"""
return ((A-D)/(1.0+((x/C)**(B))) + D)
|
def __check_list(data, varname=None, dtype=None):
"""
Checks if data is a list of dtype or turns a variable of dtype into a list
Args:
data:
Data to check
varname (str):
Name of variable to check
dtype (type)
Data type to check data against
Returns:
list(dtype)
"""
if type(data) == dtype:
return [data]
if type(data) == list:
for elem in data:
if not type(elem) == dtype:
if varname:
raise ValueError('All entries for {} must be {}.'.format(str(varname), str(dtype)))
return data
raise ValueError('{} is not the correct type.'.format(str(varname)))
|
def get_location(my_map, location):
"""
Get the value of the current location
"""
(current_x, current_y) = location
# Map repeats infinitely on the X axis
return my_map[current_y][current_x % len(my_map[current_y])]
|
def preconvert_preinstanced_type(value, name, type_):
"""
Converts the given `value` to an acceptable value by the wrapper.
Parameters
----------
value : `Any`
The value to convert.
name : `str`
The name of the value.
type_ : ``PreinstancedBase`` instance
The preinstanced type.
Returns
-------
value : ``PreinstancedBase`` instance
Raises
------
TypeError
If `value` was not given as `type_` instance, neither as `type_.value`'s type's instance.
ValueError
If there is no preinstanced object for the given `value`.
"""
value_type = value.__class__
if (value_type is not type_):
value_expected_type = type_.VALUE_TYPE
if value_type is value_expected_type:
pass
elif issubclass(value_type, value_expected_type):
value = value_expected_type(value)
else:
raise TypeError(f'`{name}` can be passed as {type_.__name__} or as {value_expected_type.__name__} '
f'instance, got {value_type.__name__}.')
try:
value = type_.INSTANCES[value]
except LookupError:
raise ValueError(f'There is no predefined `{name}` for the following value: {value!r}.') from None
return value
|
def get_text_lines(text):
"""
Get all lines from a text storing each lines as a different item in a list
:param text: str, text to get lines from
:return: list<str>
"""
text = text.replace('\r', '')
lines = text.split('\n')
return lines
|
def reset_light(_):
"""Clean the light waveform plot when changing event"""
return {"display": "none"}
|
def get_progressive_min(array):
"""Returns an array representing the closest to zero so far in the given array.
Specifically, output value at index i will equal `min(abs(array[:i+1]))`.
Parameters
----------
array : list of :obj:`~numbers.Number` or :obj:`numpy.array`
Input
Returns
-------
list
Progressively "best so far" minimum values from the input array
"""
result = [0] * len(array)
best = abs(array[0])
for i, value in enumerate(array):
if abs(value) < abs(best):
best = value
result[i] = best
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.