content stringlengths 42 6.51k |
|---|
def find_new_news(excluding_seen_news, existing_news):
"""
Finds new news from the API that is not currently in the list
"""
# https://stackoverflow.com/questions/7271482/getting-a-list-of-values-from-a-list-of-dicts
existing_news_titles = list(map(lambda d: d['title'], existing_news) )
return [news_article for news_article in excluding_seen_news if \
news_article['title'] not in existing_news_titles] |
def consecutiveDistanceByProbability(r1,r2,p,xcontact=2):
"""
Upper bound distance constraints for consecutive domains
return surface to surface distance.
Parameters
-----------
r1,r2 : int
Radius for beads
p : float Probability for contact
xcontact : int
scaling of (r1+r2) where a contact is defined. By default,
center to center distance D = 2*(r1+r2) is defined as contact.
"""
if p > 0:
d = (r1+r2)*(1. + (xcontact**3-1)/p)**(1./3.)
else:
d = 100*(r1+r2) # just a big number
return d-r1-r2 |
def _merge_clouds(old_dict, new_dict):
"""Like dict.update, except handling nested dicts."""
ret = old_dict.copy()
for (k, v) in new_dict.items():
if isinstance(v, dict):
if k in ret:
ret[k] = _merge_clouds(ret[k], v)
else:
ret[k] = v.copy()
else:
ret[k] = v
return ret |
def cbrt(x):
"""Cube root."""
if 0 <= x:
return x ** (1.0 / 3.0)
return -(-x) ** (1.0 / 3.0) |
def locales(resp_json, return_obj, options):
"""Locations in space and time at which data was collected."""
for rec in resp_json:
data = dict()
data.update(db='sead')
data.update(locale_id='sead:loc:{0:d}'.format(rec.get('locale_id')))
if rec.get('doi'):
data.update(doi=rec.get('doi'))
if rec.get('locale_name'):
data.update(locale_name=rec.get('locale_name'))
if rec.get('data_type'):
data.update(data_type=rec.get('data_type'))
if rec.get('occurrences_count'):
data.update(occurrences_count=rec.get('occurrences_count'))
if rec.get('site_id'):
data.update(site_id='sead:sit:{0:d}'.format(rec.get('site_id')))
if rec.get('max_age'):
data.update(max_age=rec.get('max_age'))
if rec.get('min_age'):
data.update(min_age=rec.get('min_age'))
if rec.get('lat'):
data.update(lat=rec.get('lat'))
if rec.get('lon'):
data.update(lon=rec.get('lon'))
if rec.get('elevation'):
data.update(elevation=rec.get('elevation'))
return_obj.append(data)
return return_obj |
def find_maxima(x):
"""Find local maxima of x.
Example:
>>> x = [1, 2, 3, 2, 4, 3]
>>> find_maxima(x)
[2, 4]
Input arguments:
x -- 1D list of real numbers
Output:
idx -- list of indices of the local maxima in x
"""
if type(x) != type([]):
message = 'Input argument must be a list, got %d instead' % type(x)
raise TypeError(message)
idx = []
for i in range(len(x)):
# `i` is a local maximum if the signal decreases before and after it
if x[i-1] < x[i] and x[i+1] < x[i]:
idx.append(i)
return idx
# NOTE for the curious: the code above could be written using
# list comprehension as
# return [i for i in range(len(x)) if x[i-1]<x[i] and x[i+1]<x[i]]
# not that this would solve the bugs ;-) |
def find_heads(own_snake, data):
"""
Finds coordinates of all the heads of enemy snakes
:param own_snake:
:param data:
:return: list of coordinates of heads
"""
enemy_heads = [(snake['body'][0]['x'], snake['body'][0]['y']) for snake in data['board']['snakes'] if
snake['id'] != own_snake.id]
return enemy_heads |
def reject_bad_peaks(peaks):
"""
Go through the list of peaks and remove any that look really suspicious.
I refer to this colloquially as "forensic accounting".
Parameters
----------
peaks: sequence of 2-tuples:
peak location and "strength" (e.g., SNR) of peaks
Returns
-------
sequence of 2-tuples:
accepted elements of input list
"""
diff = 3 # Compare 1st brightest to 4th brightest
peaks.sort(key=lambda x: x[1]) # Sort by SNR
while len(peaks) > diff and (peaks[-1][1] / peaks[-(diff + 1)][1] > 3):
del peaks[-1]
return peaks |
def get_tasks(file):
"""Heuristic method to find tasks within a yml/yaml file"""
value_object_list = []
name_found_list = []
for element in file:
if isinstance(element, dict):
if 'name' in element:
if 'tasks' in element:
tasks_list = element['tasks']
flag = False
if tasks_list:
for el in tasks_list:
if 'name' in el:
flag = True
name_found_list.append(el)
if not flag:
name_found_list.append(element)
else:
name_found_list.append(element)
else:
for key, value in element.items():
if isinstance(value, list):
if value:
for val in value:
if isinstance(val, dict):
if 'name' in val:
name_found_list.append(val)
return name_found_list |
def CreateStandbyPolicy(messages, initial_delay_sec):
"""Creates standby policy from args."""
# pylint: disable=g-explicit-bool-comparison
if initial_delay_sec is None:
return None
return messages.InstanceGroupManagerStandbyPolicy(
initialDelaySec=initial_delay_sec) |
def generate_args(num):
""" Returns questionmarks separated by commas as a string to be used by the insert command. """
args_str = ''
for i in range(0, num):
if i == num - 1:
args_str += '?'
else:
args_str += '?,'
return args_str |
def nukenewlines(string):
"""Strip newlines and any trailing/following whitespace; rejoin
with a single space where the newlines were.
Bug: This routine will completely butcher any whitespace-formatted
text."""
if not string: return ''
lines = string.splitlines()
return ' '.join( [line.strip() for line in lines] ) |
def sort_reviews_by_last_updated(reviews):
"""Sort reviews in ascending order by last update date."""
return sorted(reviews, key=lambda review: review['lastUpdated']) |
def replace_with_white(board: list):
"""
Replaces white boxes with '*'
>>> board = [\
"*1** ****",\
"***1 ****",\
"** 3****",\
"* 4 1****",\
" 9 5 ",\
" 6 83 *",\
"3 1 **",\
" 8 2***",\
" 2 ****"]
>>> replace_with_white(board)[0]
'**** ****'
"""
for i in range(4):
j = 4 - i
board[i] = "*" * j + board[i][j:-4] + 4 * '*'
for i in range(-1, -5, -1):
j = -4 + (abs(i) - 1)
board[i] = board[i][:j] + "*" * abs(j)
return board |
def dedupe_list(input):
"""Remove duplicates from the given list"""
return list(set(input)) |
def make_filebody(number_of_lines: int) -> str:
""" Return a string representing the contents of a file with a given number of lines.
Only used for testing purposes.
"""
body = ''
for i in range(0, number_of_lines):
body += '{n}/{c}: line\n'.format(n=i, c=number_of_lines)
return body |
def transform_suffix(filenames, suffix_old, suffix_new):
"""This function removes the suffix from every name
in the set filenames and returns a set with the
new file names
inputs:
filenames - a set with all the filenames
suffix_old - the suffix to be removed from the filenames
suffix_new - the suffix to be added to the filenames
output
new_filenames - a set with the filenames and the added suffix
"""
new_filenames = set([])
len_suffix_old = len(suffix_old) + 1 # add one for the "."
# loop over the list of files and remove the suffix
for name in filenames:
name = name[:-len_suffix_old]
new_filenames.add(name + "." + suffix_new)
return new_filenames |
def containsAny(string, char_set):
"""Check whether 'str' contains ANY of the chars in 'set'"""
return 1 in [c in string for c in char_set] |
def confopt_float(confstr, default=None):
"""Check and return a floating point number."""
ret = default
try:
ret = float(confstr)
except Exception:
pass
return ret |
def get_model_config(config, model_id):
"""
Return config of a single model
"""
for model_config in config["models"]:
if model_config["model_id"] == model_id:
return model_config |
def maybe_add(d, exclude_false=False, **kws):
"""
Adds keywork argumnts to a dict if their values are not None.
Parameters
----------
d: dict
The dictionary to add to.
exclude_false: bool
Exclue keys whose values are false.
kws: dict
The keys to maybe add
"""
for k, v in kws.items():
if v is not None:
d[k] = v
return d |
def get_all_possible_mappings(orig, alt):
"""
Creates a list of lists that contains the mappings of the items in orig
to all possible indexes from alt.
:param orig: a list of items
:param alt: a list of items
:return: a list of lists, each element represents a list of indexes in the
altered list that the original list item matches.
"""
all_mappings = []
for index_orig, item_orig in enumerate(orig):
indexes = []
for index_alt, item_alt in enumerate(alt):
if item_orig == item_alt:
indexes.append(index_alt)
all_mappings.append(indexes)
return all_mappings |
def get_fields_with_datatype(
dataset_ids: list, field_info: dict, data_format: str
) -> dict:
"""
Returns the column name and its data type by looking up stream and log_fields json file.
If a mapping column name is not present in log_fields.json it returns
the column name as `col<idx>` and its data type as `string`
Example:
```
{
"cp": "string",
"bytes" : "bigint",
"col1": "string"
}
```
"""
result = {}
result["version"] = "string"
# start set as 1 as the 0th field is version
# if the column name lookup on field_info fails,
# it sets the column name as col<idx> ex, col5
# for JSON, column format is string by default
for itr, colid in enumerate(dataset_ids, start=1):
result[field_info.get(colid, {"name": "col" + str(itr)})["name"]] = (
"string"
if data_format == "JSON"
else field_info.get(colid, {"dtype": "string"})["dtype"]
)
return result |
def mapping_linkedin_user_info_to_sso_user_info(
cognito_id, cognito_email, linkedin_user_info
):
"""
Map the LinkedIn ID token info to the user info.
"""
sso_user_info = dict()
sso_user_info["cognito_id"] = cognito_id
sso_user_info["cognito_email"] = cognito_email
sso_user_info["federated_id"] = "linkedin" + "_" + linkedin_user_info["id"]
sso_user_info["first_name"] = linkedin_user_info["localizedFirstName"]
sso_user_info["last_name"] = linkedin_user_info["localizedLastName"]
try:
sso_user_info["picture"] = linkedin_user_info["profilePicture"][
"displayImage~"
]["elements"][0]["identifiers"][0]["identifier"]
except Exception as e:
sso_user_info["picture"] = ""
return sso_user_info |
def rule_width(value: float, layer: str, angle_limit: float = 90) -> str:
"""Min feature size"""
category = "width"
error = f"{layer} {category} {value}um"
return (
f"{layer}.{category}({value}, angle_limit({angle_limit}))"
f".output('{error}', '{error}')"
) |
def human_bytes(byte: int, precision: int = 2) -> str:
"""Return a human readable version of the byte amount"""
kilo_byte = 10 ** 3
mega_byte = 10 ** 6
giga_byte = 10 ** 9
tera_byte = 10 ** 12
if byte >= tera_byte:
return f"{round(byte / tera_byte, precision)} TB"
elif byte >= giga_byte:
return f"{round(byte / giga_byte, precision)} GB"
elif byte >= mega_byte:
return f"{round(byte / mega_byte, precision)} MB"
elif byte >= kilo_byte:
return f"{round(byte / kilo_byte, precision)} kB"
return f"{byte} B" |
def simple_preproc(str_list, identifier, unifier):
"""Simple preprocessing applied only for metadata"""
try:
tokens = [token.strip() + identifier if not token == '' else '' for token in str_list.split(';')]
re_unified = unifier.join(tokens)
return re_unified
except Exception:
print("Error Processing this field:", identifier)
print("And this string:", str_list)
raise Exception |
def normalize_metadata(metadata_value):
"""Normalize the metadata received from the form."""
# Removing closing comments.
if metadata_value is None:
return None
if '-->' in metadata_value:
metadata_value = metadata_value.replace('-->', '')
metadata_value = normalize_metadata(metadata_value)
# Let's avoid html tags in
if ('<' or '>') in metadata_value and '-->' not in metadata_value:
metadata_value = ''
if len(metadata_value) > 200:
metadata_value = ''
return metadata_value.strip() |
def replace_message_sig(message, sig_text):
"""Helper method to do a string replacement."""
return message.replace("SIG", sig_text) |
def update_arg(original, new_val, name, log=False):
"""
Decide if update value or keep the original
:param original:
:param new_val:
:param name: name of the variable
:param log: decide if print or not
:return:
"""
if new_val is None:
out_val = original
else:
out_val = new_val
if log:
print(' - ' + name + ' = {}'.format(out_val))
return out_val |
def text_to_sentences(text, sentence_sep="\n\n", word_sep="\n", label_sep="\t"):
"""
Converts text to sentences (a list of list of word) and labels.
"""
def word_label_condition(word_label):
"""
When words and labels are taken into account
"""
c1 = label_sep in word_label
c2 = c1 and (len(word_label.split(label_sep)[0]) > 0)
c3 = c1 and (len(word_label.split(label_sep)[1]) > 0)
return c2 and c3
def sentence_condition(sentence):
"""
When sentences are taken into account
"""
return len(sentence) > 0
sentences_with_labels = [[tuple(word_label.split(label_sep)) for word_label in sentence.split(word_sep) if
word_label_condition(word_label)] for sentence in text.split(sentence_sep) if
sentence_condition(sentence)]
return sentences_with_labels |
def get_locator(*values):
"""
Gets the first available locator.
:rtype: :class:`aria.parser.reading.Locator`
"""
for v in values:
if hasattr(v, '_locator'):
locator = v._locator
if locator is not None:
return locator
return None |
def merge_dict_overwrite_first(dict1, dict2):
"""Desired result is a new dictionary with the values merged,
and the second dict's values overwriting those from the first in pythonic syntax."""
return {**dict1, **dict2} |
def set_num_precision(number, precision, mode='int'):
"""
Return the input number with N digits of precision.
Args:
number: Input value.
precision: Number of digits of desired precision.
Returns:
Input value with 'precision' digits of precision.
"""
fmt = '{:.%ie}' % (precision - 1)
value = float(fmt.format(number))
if mode == 'int':
return int(value)
else:
return value |
def gap_right(hist, x, th, W, K=10):
"""
Checks if this x-coordinate marks the end of a word/block.
:param hist: distribution of pixels.
:param x: x-coordinate.
:param th: threshold value.
:param K: number of columns of empty pixels to consider as new word/block.
:return: whether this x-coordinate marks the end of a word/block.
"""
gap = hist[x-1] > th
for i in range(K):
if x + i > W:
break
gap = gap and (x+i >= len(hist) or hist[x+i] <= th)
return gap |
def abc_q2d(n, m):
"""A, B, C terms for 2D-Q polynomials. oe-20-3-2483 Eq. (A.3).
Parameters
----------
n : `int`
radial order
m : `int`
azimuthal order
Returns
-------
`float`, `float`, `float`
A, B, C
"""
# D is used everywhere
D = (4 * n ** 2 - 1) * (m + n - 2) * (m + 2 * n - 3)
# A
term1 = (2 * n - 1) * (m + 2 * n - 2)
term2 = (4 * n * (m + n - 2) + (m - 3) * (2 * m - 1))
A = (term1 * term2) / D
# B
num = -2 * (2 * n - 1) * (m + 2 * n - 3) * (m + 2 * n - 2) * (m + 2 * n - 1)
B = num / D
# C
num = n * (2 * n - 3) * (m + 2 * n - 1) * (2 * m + 2 * n - 3)
C = num / D
return A, B, C |
def dot_prod(a, b):
"""Returns the dot vector product of two lists of numbers"""
if type(a) is not list:
raise ValueError(f"Incorrect parameter type: {type(a)}")
elif type(b) is not list:
raise ValueError(f"Incorrect parameter type: {type(b)}")
else:
return sum([a[i] * b[i] for i in range(len(b))]) |
def __compute_next(x: int, g: int, y: int, a: int, b: int, p: int, order: int):
"""
Computes x_(i+1), a_(i+1), b_(i+1) from x_i, a_i, b_i
:param x: x_i in the random walk. Positive integer.
:param g: Generator g of g^x = y (mod p). Positive integer.
:param y: y of g^x = y (mod p). Positive integer.
:param a: a_i in the random walk. Non-negative integer.
:param b: b_i in the random walk. Non-negative integer.
:param p: p of g^x = y (mod p). Prime number.
:param order: order of g in (mod p). ie. Smallest i such that g^i = 1 (mod p). Positive integer.
:returns: x_(i+1), a_(i+1), b_(i+1)
"""
# The decision of if x belongs to S0, S1 and S2 is done by taking mod 3
# Compute the next x, a and b
if x % 3 == 0:
x = (x * x) % p
a = (2 * a) % order
b = (2 * b) % order
elif x % 3 == 1:
x = (x * y) % p
b = (b + 1) % order
else:
x = (x * g) % p
a = (a + 1) % order
return x, a, b |
def to_int32(f):
"""
Convert an image to an int32 image.
img = to_int32(f)
`to_int32` clips the input image between the values -2147483648 and
2147483647 and converts it to the signed 32-bit datatype.
Parameters
----------
f : Any image
Returns
-------
img : The converted image
"""
from numpy import int32, asanyarray
return asanyarray(f).astype(int32) |
def B(A: dict) -> int:
"""Function that depends on A, but says it's a primitive type dict."""
return A['a'] + 1 |
def squaremeters_to_ha(value):
"""."""
tmp = value/10000.
return float('{0:4.2f}'.format(tmp)) |
def _relative_likes(favorite_stories, inverted_favorites, fandom):
"""
.. versionadded:: 0.3.0
Returns how many stories a user likes in a fandom over all fandoms
that they like.
:param favorite_stories: List of story-ids liked by a user.
:type favorite_stories: list.
:param inverted_favorites: Dictionary mapping fandoms to the story-ids in
that fandom liked by a user.
:type inverted_favorites: dict.
:param fandom: String representing the fandom.
:type fandom: str.
:returns: Approximate calculation for how much a user likes a fandom.
:rtype: float
.. note:: This is far from a perfect calculation. This should really take
a few more metrics into account, such as the number of stories
a person has written for a particular fandom, the sentiment of
the reviews they left over all stories they reviewed for a
fandom, etc.
"""
# Calculate roughly how much this person likes this fandom.
number_of_favorites = len(favorite_stories)
# If the user likes no fanfics
if not number_of_favorites:
return 0.0
# If they like at least one story from the fandom, get the number.
if inverted_favorites.get(fandom):
favorites_for_fandom = len(inverted_favorites[fandom])
else:
return 0.0
# If they like at least one fanfic and one from this fandom, return score.
return favorites_for_fandom / number_of_favorites |
def prepare_mdtau(nrot, jobs):
"""
Returns what mdtau should be set to based on the number of hindered rotors and
inserts MdTau into the joblist if need be
"""
mdtau = None
if nrot > 0 and '1dTau' in jobs:
mdtau = '1'
if nrot > 1:
mdtau = '2'
if nrot > 2:
mdtau = '3'
if 'MdTau' not in jobs:
index = jobs.index('1dTau')
jobs.insert(index+1, 'MdTau')
return mdtau, jobs |
def get_toll_path(toll, point, color):
"""
Get into to plot a path in mapbox from toll to point
:param toll:
:param point:
:param color:
:return: Dictionary info to plot in mapbox a line
"""
return {
'lat': [toll['lat'], point['lat']],
'lon': [toll['long'], point['long']],
'type': 'scattermapbox',
'mode': 'lines',
"hoverinfo": "text",
'line': {
'width': 1,
'color': color
},
'text': [toll['name'], point['name']],
'name': point['name'],
} |
def lat_lon_2_distance(lat1, lon1, lat2, lon2):
"""
return distance (in km) between two locations (lat1, lon1) and (lat2, lon2)
parameter
---------
lat1, lat2: latitude in degrees
lon1, lon2: longitude in degrees
return
------
distance in km
"""
from math import sin, cos, sqrt, atan2, radians
# approximate radius of earth in km
R = 6373.0
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance |
def _ShouldTreatDstUrlAsSingleton(src_url_names_container, have_multiple_srcs,
have_existing_dest_subdir, dst_url,
recursion_requested):
"""Checks that dst_url names a single file/object after wildcard expansion.
It is possible that an object path might name a bucket sub-directory.
Args:
src_url_names_container: Bool indicator of whether the source for the
operation is a container (bucket, bucket subdir, or directory).
have_multiple_srcs: Bool indicator of whether this is a multi-source
operation.
have_existing_dest_subdir: bool indicator whether dest is an existing
subdirectory.
dst_url: StorageUrl to check.
recursion_requested: True if a recursive operation has been requested.
Returns:
bool indicator.
"""
if recursion_requested and src_url_names_container:
return False
if dst_url.IsFileUrl():
return not dst_url.IsDirectory()
else: # dst_url.IsCloudUrl()
return (not have_multiple_srcs and not have_existing_dest_subdir and
dst_url.IsObject()) |
def load(obj, cls, default_factory):
"""Create or load an object if necessary.
Parameters
----------
obj : `object` or `dict` or `None`
cls : `type`
default_factory : `function`
Returns
-------
`object`
"""
if obj is None:
return default_factory()
if isinstance(obj, dict):
return cls.load(obj)
return obj |
def _create_metadata_row_from_string(element_label, element_string):
""" create row of metadata to write """
row = {}
row['Metadata_label'] = element_label
row['Metadata_value'] = element_string
return row |
def to_bytes(text, encoding='utf-8'):
"""Make sure text is bytes type."""
if not text:
return text
if not isinstance(text, bytes):
text = text.encode(encoding)
return text |
def first_in_parens(s):
"""
Returns: The substring of s that is inside the first pair of parentheses.
The first pair of parenthesis consist of the first instance of character
'(' and the first instance of ')' that follows it.
Examples:
first_in_parens('A (B) C') returns 'B'
first_in_parens('A (B) (C)') returns 'B'
first_in_parens('A ((B) (C))') returns '(B'
Parameter s: a string to check
Precondition: s is a string with a matching pair of parens '()'.
"""
assert type(s) == str
assert ('(' in s) and (')' in s)
pos1 = s.find('(')
pos2 = s.find(')', pos1 + 1)
return s[pos1 + 1:pos2]
# pass |
def massage_link(linkstring):
"""Don't allow html in the link string. Prepend http:// if there isn't
already a protocol."""
for c in "<>'\"":
linkstring = linkstring.replace(c, '')
if linkstring and linkstring.find(':') == -1:
linkstring = 'http://' + linkstring
return linkstring |
def change_ratio(data):
"""Calculates the ratio N+1/N for each element in data"""
d = data[:]
for idx, trace in enumerate(data):
ratio = []
derivative = [a-b for a,b in zip(trace[1:], trace[:-1])]
cumulative = trace[1:]
for a, b in zip(derivative, cumulative):
try:
r = a / b
except ZeroDivisionError:
r = 0.0
ratio.append(r)
d[idx] = [0] + ratio
return d |
def validate_subnets(value):
"""Raise exception if subnets fail to match length constraint."""
for subnet_id in [x["SubnetId"] for x in value]:
if len(subnet_id) > 32:
return "have length less than or equal to 32"
return "" |
def sort_coords(line):
""" order set point of the line """
a, b, c, d = line
if b > c:
a, b, c, d = c, d, a, b
return [a,b,c,d] |
def disabled_payments_notice(context, addon=None):
"""
If payments are disabled, we show a friendly message urging the developer
to make his/her app free.
"""
addon = context.get('addon', addon)
return {'request': context.get('request'), 'addon': addon} |
def xml_format(tag):
"""Ensures that tag is encapsulated inside angle brackets."""
if tag[0] != "<":
tag = "<" + tag
if tag[-1:] != ">":
tag += ">"
return tag |
def make_expr_sig(args = None):
"""create experiment signature string from args and timestamp"""
import time
# print ("make_expr_sig", args)
# infile = args.infile.replace("/", "_").replace(".wav", "")
# expr_sig = "MN%s_MS%d_IF%s_IS%d_NE%d_EL%d_TS%s" % (args.mode, args.modelsize, infile, args.samplelen, args.numepochs, args.embedding_length, time.strftime("%Y%m%d-%H%M%S"))
expr_sig = time.strftime("%Y%m%d-%H%M%S")
return expr_sig |
def remove_padding(data: bytes) -> bytes:
"""
Removes ISO/IEC 9797-1 Padding method 2 from data
"""
for idx, b in enumerate(reversed(data), start=1):
if b == 0x80:
return data[: len(data) - idx]
return b"" |
def remove_tag_from_name(name, tag):
"""Removes a tag from a name given as a string."""
name = name.split("/")[-1].split(".")[0]
if name == tag:
return tag
else:
return name.replace(tag, "") |
def snake_case_to_headless_camel_case(snake_string):
"""Convert snake_case to headlessCamelCase.
Args:
snake_string: The string to be converted.
Returns:
The input string converted to headlessCamelCase.
"""
return ''.join([snake_string.split('_')[0]] +
list(sub_string.capitalize()
for sub_string in snake_string.split('_')[1:])) |
def test_closure(a):
"""This is the closure test in the paper."""
def x1(b):
def x4(c):
return b
return x4
x2 = x1(a)
x3 = x2(1)
return x3 |
def ensure_trailing_slash(path):
"""Return path if path ends with a / or path + / if it doesn't."""
return path if path.endswith('/') else path + '/' |
def stats(data):
"""
Assumes a matrix of data with variables on the columns
and observations on the rows. Returns the mean,
variance and standard error of the data.
"""
from numpy import average,sqrt
mean = average(data)
var = average((data-mean)**2)
stderr = sqrt(var)/sqrt(len(data))
return mean,var,stderr |
def score_silence(item):
"""Compute a goodness score for a silent audio span (start, end).
Used by `find_spoken_wordspans()` to find where to split phrases.
"""
start, end = item
# We definitely want to split at the first and last detected silent span,
# since these indicate the beginning and end of the actual recording.
# Otherwise, we rank by duration, so we prefer to split at longer pauses.
if start <= 0.0 or end is None:
return 100000.0 # seconds
else:
return end - start |
def _ipVersionToLen(version):
"""Return number of bits in address for a certain IP version.
>>> _ipVersionToLen(4)
32
>>> _ipVersionToLen(6)
128
>>> _ipVersionToLen(5)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "IPy.py", line 1076, in _ipVersionToLen
raise ValueError("only IPv4 and IPv6 supported")
ValueError: only IPv4 and IPv6 supported
"""
if version == 4:
return 32
elif version == 6:
return 128
else:
raise ValueError("only IPv4 and IPv6 supported") |
def game_result(player, players, value=1):
""" Retorna un dict con todos los jugadores menos jugador con resultado
-valor. El resultado de jugador se ajusta para que la suma de resultados
sea cero.
Por defecto (valor=1) marca a jugador como ganador. Para marcar a
jugador como perdedor usar valor=-1. Si valor=0 se tiene un empate.
Si valor es None se retorna None.
"""
if value is None:
return None
else:
r = {p: -value for p in players if p != player}
r[player] = value * (len(players) - 1)
return r |
def to_lowercase(text):
"""
Transform all characters to lowercase
:param text: input text
:return: transformed text
"""
return text.lower() |
def get_selected_action(body):
"""Get selected value from an action event"""
if body["actions"] is not None and len(body["actions"]) > 0:
return body["actions"][0]["selected_option"]["value"] |
def default(data, dflt):
"""
Helper function for setting default values.
Args:
data:
dflt:
Returns:
"""
if data is None:
return None
elif isinstance(data, (list, tuple)):
newdata = []
allnone = True
for x in data:
if x is None:
newdata.append(dflt)
else:
newdata.append(x)
allnone = False
if allnone:
return None
return newdata
else:
return data |
def set_required(field, render_kw=None, force=False):
"""
Returns *render_kw* with *required* set if the field is required.
Sets the *required* key if the `required` flag is set for the field (this
is mostly the case if it is set by validators). The `required` attribute
is used by browsers to indicate a required field.
..note::
This won't change keys already present unless *force* is used.
"""
if render_kw is None:
render_kw = {}
if 'required' in render_kw and not force:
return render_kw
if field.flags.required:
render_kw['required'] = True
return render_kw |
def isNumber(s):
""" Checks to see if this is a JSON number """
try:
float(s)
return True
except ValueError:
return False |
def remove_sub_strings(string, substrings):
"""Strips substrings from a given string."""
result = string
for substring in substrings:
result = result.replace(substring, '')
return result |
def _(arg):
"""If the lowercase string is 't' or 'true', return True else False."""
argument = arg.lower().strip()
return argument == 'true' or argument == 't' |
def find_largest_digit_helper(max_digit, current):
"""
This function will help the user find the largest digit in a certain number
by continuously dividing 10 to get every digit within the number.
:param max_digit: int, An initial value for comparison.
:param current: int, Each digit found in a certain number.
:return: int, The largest digit found in a certain number after checking all digits within the number.
"""
if current == 0:
# Return the largest digit after all digits has been cross-compared.
return max_digit
else:
# Get each digit.
digit = current % 10
# Compare the obtained digit with the current max_digit.
if digit > max_digit:
max_digit = digit
# Delete the last digit for the next round of comparison
current = current // 10
return find_largest_digit_helper(max_digit, current) |
def compare_dicts(first, second, fine_if_wrong=None):
"""Compare two dictionaries, based on their key, value pairs."""
total_keys = len(set(first.keys()) | set(second.keys()))
if fine_if_wrong is None: fine_if_wrong = set()
resemblance = 0
differences = dict()
for key, value in first.items():
if key not in second or key in fine_if_wrong or second[key] == value:
resemblance += 1
elif key in second and second[key] != value:
differences[key] = (value, second[key])
for key, value in second.items():
if key not in first:
resemblance += 1
# How I love python3 division... S2
resemblance /= total_keys
return resemblance, differences |
def multiply_tuple(row_obj):
"""Demonstrates an object formatter function"""
return str(row_obj[2] * row_obj[3]) |
def get_linked_edges(verts, filter_edges):
""" Find all the edges linked to verts that are also in filter edges
"""
linked_edges = [e for v in verts for e in v.link_edges]
return list(filter(lambda e: e in filter_edges, linked_edges)) |
def computeSimpleInterest(principal, rateInPercent, timeInYears):
"""Computes and returns simple interest"""
interest = 0
if timeInYears < 0 or rateInPercent < 0:
return interest
interest = principal * rateInPercent * timeInYears / 100
return interest |
def shrink_to_hint(s: str):
"""
Shrink a string to hint.
:param str s: Source string
:return: str
"""
length = len(s)
if length < 4:
return '*' * length
return '{}**{}'.format(s[0], s[length - 1]) |
def deep_update(
original, new_dict, new_keys_allowed=False, whitelist=None, override_all_if_type_changes=None,
):
"""Updates original dict with values from new_dict recursively.
If new key is introduced in new_dict, then if new_keys_allowed is not
True, an error will be thrown. Further, for sub-dicts, if the key is
in the whitelist, then new subkeys can be introduced.
:param original: Dictionary with default values.
:type original: dict
:param new_dict(dict: dict): Dictionary with values to be updated
:param new_keys_allowed: Whether new keys are allowed. (Default value = False)
:type new_keys_allowed: bool
:param whitelist: List of keys that correspond to dict
values where new subkeys can be introduced. This is only at the top
level. (Default value = None)
:type whitelist: Optional[List[str]]
:param override_all_if_type_changes: List of top level
keys with value=dict, for which we always simply override the
entire value (dict), iff the "type" key in that value dict changes. (Default value = None)
:type override_all_if_type_changes: Optional[List[str]]
:param new_dict:
"""
whitelist = whitelist or []
override_all_if_type_changes = override_all_if_type_changes or []
for k, value in new_dict.items():
if k not in original and not new_keys_allowed:
raise Exception("Unknown config parameter `{}` ".format(k))
# Both orginal value and new one are dicts.
if isinstance(original.get(k), dict) and isinstance(value, dict):
# Check old type vs old one. If different, override entire value.
if (
k in override_all_if_type_changes
and "type" in value
and "type" in original[k]
and value["type"] != original[k]["type"]
):
original[k] = value
# Whitelisted key -> ok to add new subkeys.
elif k in whitelist:
deep_update(original[k], value, True)
# Non-whitelisted key.
else:
deep_update(original[k], value, new_keys_allowed)
# Original value not a dict OR new value not a dict:
# Override entire value.
else:
original[k] = value
return original |
def wrap_360(valin):
"""
Wraps a value (float) to a 0--360 degree range.
Parameters
----------
valin: float
Input value in degrees
Returns
-------
valout : float
Example
-------
# e.g., 370 degrees is equivalent to 10 degrees. \n
obs.wrap_360(370) #--> 10.
Note
----
Essentially equivalent to calculating as (valin % 360) .
"""
if (valin<0.0): n_revs=int(valin/360.)-1; valout=valin-n_revs*360.
elif (valin>=360.0): n_revs=int(valin/360.); valout=valin-n_revs*360.
else: valout=valin
return valout |
def remove_numbers(words):
"""Remove all numbers from word list.
Unlike words, numbers without any context are not expected to provide any explanatory value for topic classification.
Parameters
----------
words : string
Original word-token list
Returns
-------
new_words : list of strings
List with all remaining word-tokens
"""
new_words = []
for word in words:
if not word.isdigit():
new_words.append(word)
else:
pass
return new_words |
def strictly_increasing(L):
"""Copied from accepted answer to this:
https://stackoverflow.com/questions/4983258/python-how-to-check-list-monotonicity
"""
return all(x < y for x, y in zip(L, L[1:])) |
def stats_tree(keys):
"""Given a bunch of keys, make a tree such that similarly prefixed stats
are at the same level underneath eachother."""
keys = list(keys)
def reduce_tree(tree):
keys = list(sorted(tree.keys()))
for k,v in tree.items():
if '.' not in k and not v:
tree[k] = []
if len(tree) == 1 and not tree.values()[0]:
return tree.keys()
prefixes = [k.split('.', 1)[0] for k in tree.keys() if '.' in k]
if len(prefixes) == len(list(set(prefixes))):
return tree.keys()
for key in keys:
if '.' not in key:
continue
p,k = key.split('.', 1)
tree.setdefault(p, {})
if isinstance(tree[p], dict):
tree[p][k] = {}
elif not tree[p]:
tree[p] = {k:{}}
del tree[key]
for k,v in tree.items():
if v and isinstance(v, dict):
tree[k] = reduce_tree(v)
return tree
tree = dict([(k,{}) for k in keys])
reduce_tree(tree)
return tree |
def nraphson(f, df, x0, tol=1e-3, maxit=20000):
"""
Newto Raphson Equation Solving
Compute Equation roots given its equation and derivate
Xn = Xn-1 - f(x)/f'(x)
:param f: Function f(x) handler
:param df: Derivate of f'(x) handler
:param x0: Inital guess
:param maxit: Max number of iterations
:param tol: Tolerance
:return: [ x, it, error ]
"""
x = 0
x_ = x0
it = 0
error = 0
while it < maxit:
it += 1
x = x_ - f(x_) / df(x_)
error = abs(x - x_) / abs(x_)
if error < tol:
break
x_ = x
# print "it =", it
# print "error = ", error
return x, it, error |
def get_starttag_string(name, attrs, save_attrs=True):
"""Returns string representation of start tag.
Args:
name: Name of tag.
attrs: Attributes of tag -> ((attr1, value), ...).
save_attrs: If it is True, attributes of tag will be save.
"""
tag_content = [name]
if save_attrs:
tag_content += ['{0}="{1}"'.format(attr, value) for attr, value in attrs]
return '<' + ' '.join(tag_content) + '>' |
def wraplines(s, wrap=60):
""" Wrap lines
Args:
s: String
wrap: Number of characters per line
Returns:
str: String with newlines.
"""
return '\n'.join(s[i:(i+wrap)] for i in range(0, len(s), wrap)) |
def do_digest_auth(id):
"""
A function that performs Digest authentication by comparing the
requested user name and password name with its own DB.
"""
id_list = {"admin": "admin", "testUser01": "testUser01"}
if id in id_list:
return id_list.get(id)
return None |
def _json_date(date=None):
"""Given a db datetime, return a steemd/json-friendly version."""
if not date:
return '1969-12-31T23:59:59'
return 'T'.join(str(date).split(' ')) |
def _filter_overlap(index):
"""Filter indexes in list if they overlap."""
if len(index) < 2:
return index
index_f = []
i = 0
j = i + 1
while j < len(index):
if index[i][1] > index[j][0]:
index[i] = (min(index[i][0], index[j][1]),
max(index[i][0], index[j][1]))
j += 1
else:
index_f += [index[i]]
i = j
j += 1
index_f += [index[i]]
return index_f[:i + 1] |
def plato_to_gravity(degrees_plato: float) -> float:
"""
Convert degrees plato to gravity.
"""
return 259.0 / (259.0 - degrees_plato) |
def _get_ref(val: str, start: int) -> tuple:
"""Get the next reference starting from `start` position"""
idx = start
length = len(val)
in_ref = False
ref_name = ''
while idx < length:
ch = val[idx]
if in_ref:
# found end of ref, yeild
if ch == '}':
in_ref = False
if ref_name:
return ref_name, idx + 1
# Ref start inside ref, drop previous and use this one
elif idx != length and ch == '$' and val[idx + 1] == '{':
print(f'Unterminated placeholder starting at pos {start} in "{val}"')
idx += 1
ref_name = ''
# nothing special, just add char
else:
ref_name += ch
idx += 1
# not at the end, and found ref start
elif idx != length - 1 and ch == '$' and val[idx + 1] == '{':
in_ref = True
idx += 2
ref_name = ''
# nothing special, just skip
else:
idx += 1
return None, -1 |
def find_min_op(a: int, b: int, n: int):
"""
"""
count = 0
while b <= n:
a, b = max(a, b), b+a
count += 1
return count |
def apply_filters(genes, records, db_info, filters):
"""Apply predefined and additional filters.
Args:
genes: list of Gene objects
records: list of records as Variant objects
db_info: database configuration as Config object
filters: list of filters as Filter objects
"""
passed = []
qual_filter = "PASS"
try:
for record in records:
# quality filter
if not (record.filter == qual_filter):
continue
# additional filters from user
filters_passed = True
for filt in filters:
if not filt.apply(record):
filters_passed = False
break
if not filters_passed:
continue
passed.append(record)
for gene in genes:
if gene.name == record.gene_name:
gene.set_status("HC LoF found")
except Exception as error:
print(error)
raise
return passed |
def get_station_type(station_name: str) -> str:
"""
Get the station type, one of 'intl', 'core' or 'remote'
Args:
station_name: Station name, e.g. "DE603LBA" or just "DE603"
Returns:
str: station type, one of 'intl', 'core' or 'remote'
Example:
>>> get_station_type("DE603")
'intl'
"""
if station_name[0] == "C":
return "core"
elif station_name[0] == "R" or station_name[:5] == "PL611":
return "remote"
else:
return "intl" |
def cap(value: float, minimum: float, maximum: float) -> float:
"""Caps the value at given minimum and maximum.
Arguments:
value {float} -- The value being capped.
minimum {float} -- Smallest value.
maximum {float} -- Largest value.
Returns:
float -- The capped value or the original value if within range.
"""
if value > maximum:
return maximum
elif value < minimum:
return minimum
else:
return value |
def make_token_dict(sentences, pad_token=None, extra_tokens=None):
"""Extract tokens from tokenized *sentences*, remove all duplicates, sort
the resulting set and map all tokens onto ther indices.
:param sentences: tokenized sentences.
:type sentences: list([list([str])])
:param pad_token: add a token for padding.
:type pad_char: str
:param extra_tokens: add any tokens for other purposes.
:type extra_tokens: list([str])
:return: the dict created and the index of the padding token (it's
always the last index, if pad_token is not None); the indices of
the extra tokens.
:rtype: tuple(dict({char: int}), int, list([int])|None)
"""
t2idx = {
x: i for i, x in enumerate(sorted(set(
x for x in sentences for x in x
)))
}
def add_token(token):
if token:
assert token not in t2idx, \
"ERROR: token '{}' is already in the dict".format(token)
idx = t2idx[token] = len(t2idx)
else:
idx = None
return idx
if extra_tokens is not None:
extra_idxs = [add_token(t) for t in extra_tokens]
else:
extra_idxs = None
pad_idx = add_token(pad_token)
return t2idx, pad_idx, extra_idxs |
def get_repo_content_path(data: dict):
"""
Return all content path
:param data:
:return: list of all paths:
:rtype: list:
"""
paths = []
for key, files in data.items():
for file in files:
if isinstance(file, dict):
for k, v in file.items():
s = get_repo_content_path(file)
for item in s:
path = k + '/' + item
paths.append(path)
else:
path = file
paths.append(path)
return paths |
def init_method_normalizer(name: str):
"""Normalizes the name of an initialization method."""
return name.lower().replace('_', '').replace('nodeembeddinginitializer', '') |
def big_t_abt(s: int) -> int:
"""T_ABT from KPT21"""
if s == 1:
return 1
elif (s & (s - 1) == 0) and s != 0:
"""If s=2^i for some i"""
return int(s / 2)
else:
return 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.