content
stringlengths 42
6.51k
|
|---|
def get_theme_labels(themes: list):
"""Devuelve un string con los labels de themes del dataset separados por comas"""
labels = []
for theme in themes:
labels.append(theme['label'])
return ','.join(labels)
|
def find(data, value_path):
"""
Gets an element in a deeply nested data structure
"""
keys = value_path.split('.')
inner = data
for key in keys:
if inner is None:
return None
else:
inner = inner.get(key)
return inner
|
def make_key(dir, cmdline):
"""Make a single string, combining multiple fields."""
return dir + "|" + cmdline
|
def build_token_dict(vocab):
"""build bi-directional mapping between index and token"""
token_to_idx, idx_to_token = {}, {}
next_idx = 1
vocab_sorted = sorted(list(vocab)) # make sure it's the same order everytime
for token in vocab_sorted:
token_to_idx[token] = next_idx
idx_to_token[next_idx] = token
next_idx = next_idx + 1
return token_to_idx, idx_to_token
|
def backup_yaml_parse(yamlin):
"""for absolute compatibility, a very very simple yaml reader
built to purpose. strips all helpful yaml stuff out, so be careful!"""
head_dict = dict()
try:
decoded = yamlin.decode()
except:
decoded = yamlin[0].decode()
split = decoded.split('\n')
#print(split)
for k in split:
#print(k)
split2 = k.split(':')
try:
head_dict[split2[0].lstrip()] = split2[1].lstrip()
except:
pass
return head_dict
|
def determine_alt_weather_safety(weather_data, weather_source_config):
"""
Parse an alt weather source reading dictionary and determine if weather conditions should be
considered safe.
Args:
source_config (dict): Config dictionary for alt weather source, defined in huntsman.yaml.
"""
# if any of the specified bool flags are true, then weather is not safe
bool_flags_check = not any([weather_data[k] for k in weather_source_config["bool_flags"]])
# check parameters that should be above a certain threshold (ie temp degrees above dewpoint)
gt_thresholds = weather_source_config['thresholds']['greater_than']
# check parameters that should be below a certain threshold (ie humidity, wind speed etc)
lt_thresholds = weather_source_config['thresholds']['less_than']
threshold_data = weather_data['threshold_parameters']
gt_thresholds_check = all([threshold_data[k] > v for k, v in gt_thresholds.items()])
lt_thresholds_check = all([threshold_data[k] < v for k, v in lt_thresholds.items()])
# store the safety decision in the dictionary and return it
weather_data['safe'] = all([bool_flags_check, gt_thresholds_check, lt_thresholds_check])
return weather_data
|
def find_min_dist(p1x, p1y, p2x, p2y):
"""given two time frame arrays, find then min dist"""
min_d = 9e4
p1x, p1y = p1x[:8], p1y[:8]
p2x, p2y = p2x[:8], p2y[:8]
for i in range(len(p1x)):
for j in range(len(p1x)):
if ((p2x[i]-p1x[j])**2 + (p2y[i]-p1y[j])**2)**0.5 < min_d:
min_d = ((p2x[i]-p1x[j])**2 + (p2y[i]-p1y[j])**2)**0.5
return min_d
|
def _parse_ctab_bond_block(contents):
"""
"""
bond_block = []
for row in contents:
bond_block.append(row.rstrip('\n'))
return bond_block
|
def strip_ext(filename: str) -> str:
"""Remove file extension name, like 'a.c' -> 'a'"""
return filename[:filename.rfind('.')]
|
def is_section(line: str, pattern: str) -> bool:
"""Returns a boolean
Checks if line matches the pattern and returns True or False
"""
return line.find(pattern) > -1
|
def auco(oracle_rmses, measure_rmses, normalize=False):
"""Computes the Area-Under-the-Confidence-Oracle error, see
https://doi.org/10.1021/acs.jcim.9b00975 for more details.
Parameters
----------
oracle_rmses : list
RMSEs in the ideal case
measure_rmses : list
RMSEs when using the uncertainty measure to evaluate
normalize : bool
Whether the 100% RMSE (including all predictions) should
be set to 1.0, default: False
Returns
-------
float
Sum of all differences between oracle_rmses and measure_rmses
"""
orac, meas = oracle_rmses, measure_rmses
if normalize:
orac = oracle_rmses / oracle_rmses[0]
meas = measure_rmses / measure_rmses[0]
area = sum([m - o for o, m in zip(orac, meas)])
return area
|
def attach_domain(corpus, domt):
""" Indicates whether the corpus is src (source) or tgt
(target) corpus when doing trainsfer learning.
This will return a list of lists of the form ((w,t,d),iob), where
d is the domain ('src' or 'tgt') given by domt.
Parameters
----------
corpus : list
List of lists containing tuples of form ((w,t), iob)
domt : str
Either 'src' or 'tgt'.
"""
if domt not in {'src','tgt'}: # Domain type - source or target
raise ValueError("domt must be 'src' or 'tgt'.")
data_with_domain = [[((w,t,domt),iob) for ((w,t),iob) in d] for d in corpus]
return data_with_domain
|
def common_filenames(file_list1, file_list2):
"""
Find elements common in 2 lists
:param file_list1: a list to compare
:param file_list2: a list to compare
:return: list of common items
"""
return set(file_list1).intersection(file_list2)
|
def format_url(host, resource):
"""
Returns a usable URL out of a host and a resource to fetch.
"""
return host.rstrip("/") + "/" + resource.lstrip("/").rstrip("/")
|
def list_flatten(nested_list):
"""Flattens nested list"""
return [element for sublist in nested_list for element in sublist]
|
def character_test(text, allowed):
"""Test if the characters in 'text' are all made up of characters in 'allowed'"""
if text.strip(allowed):
return False
else:
return True
|
def _dump_filename(config):
"""Give the name of the file where the results will be dumped"""
return (config['out'] + '/' + config['target']['name'] + '_' +
str(config['target']['spectrum']) + '_' +
str(int(config['time']['tmin'])) + '_' +
str(int(config['time']['tmax'])) + '_' +
str(int(config['energy']['emin'])) + '_' +
str(int(config['energy']['emax'])) + "_"+
config['file']['tag'] + ".results")
|
def compute_score(tag_seq, input_length, score):
"""
Computes the total score of a tag sequence
:param tag_seq: Array of String of length input_length. The tag sequence including <START> and <STOP>
:param input_length: Int. input length including the padding <START> and <STOP>
:param score: function from current_tag (string), previous_tag (string), i (int) to the score. i=0 points to
<START> and i=1 points to the first token. i=input_length-1 points to <STOP>
:return:
"""
total_score = 0
for i in range(1, input_length):
total_score += score(tag_seq[i], tag_seq[i - 1], i)
return total_score
|
def complement( intervals, first = None, last = None):
"""complement a list of intervals with intervals not in list.
"""
if len(intervals) == 0:
if first != None and last != None:
return [(first,last)]
else:
return []
new_intervals = []
intervals.sort()
last_from, last_to = intervals[0]
if first != None and first < last_from:
new_intervals.append( (first, last_from) )
for this_from, this_to in intervals:
if this_from > last_to:
new_intervals.append( (last_to, this_from ) )
last_from = this_from
last_to = max(last_to, this_to)
if last and last > last_to:
new_intervals.append( (last_to, last))
return new_intervals
|
def remove_url_trailing_slash(url):
"""
Returns the input url without any trailing / if it had a trailing slash. This is useful for repository url
where https://github.com/lwhjon/repo-labels-cli/ and https://github.com/lwhjon/repo-labels-cli both are equivalent
hence for consistency we remove the trailing / for repository url
:param url: The url to be formatted
:return: Returns the url without any trailing /
"""
return url[:-1] if url[-1] == '/' else url
|
def retr_smaxkepl(peri, masstotl):
"""
Get the semi-major axis of a Keplerian orbit (in AU) from the orbital period (in days) and total mass (in Solar masses).
Arguments
peri: orbital period [days]
masstotl: total mass of the system [Solar Masses]
Returns
smax: the semi-major axis of a Keplerian orbit [AU]
"""
smax = (7.496e-6 * masstotl * peri**2)**(1. / 3.) # [AU]
return smax
|
def suppress_none(value):
"""Suppress converting NoneType to 'None' """
return value if value is not None else ""
|
def ns_faint(item_name):
"""Prepends the faint xml-namespace to the item name."""
return '{http://www.code.google.com/p/faint-graphics-editor}' + item_name
|
def htime(s,
show_seconds=True,
min_digits = 2,
max_digits = 2,
):
""" """
s = int(s)
if s < 0:
s = 0
#d, s = divmod(s, 86400)
#h, s = divmod(s, 3600)
#m, s = divmod(s, 60)
y = s // 31536000 #365 days
mm = s // 2592000 #30 days
d = s // 86400
h = s // 3600
m = s // 60
#(d, h, m, s) = timetuple(s)
x = []
if y and ((len(str(y)) >= min_digits) or (len(str(mm)) > max_digits)):
if y == 1:
x.append('%s year' % y)
else:
x.append('%s years' % y)
elif mm and ((len(str(mm)) >= min_digits) or (len(str(d)) > max_digits)):
if mm == 1:
x.append('%s months' % mm)
else:
x.append('%s months' % mm)
elif d and ((len(str(d)) >= min_digits) or (len(str(h)) > max_digits)):
if d == 1:
x.append('%s day' % d)
else:
x.append('%s days' % d)
elif h and ((len(str(h)) >= min_digits) or (len(str(m)) > max_digits)):
if h == 1:
x.append('%s hour' % h)
else:
x.append('%s hours' % h)
elif m and ((len(str(m)) >= min_digits) or (len(str(s)) > max_digits)):
if m == 1:
x.append('%s minute' % m)
else:
x.append('%s minutes' % m)
elif show_seconds:
if s == 1:
x.append('%s second' % s)
else:
x.append('%s seconds' % s)
if not x:
if show_seconds:
x = ['%s seconds' % s]
else:
x = ['0 minutes']
x.append(' ago')
return ''.join(x)
|
def build_base_url(host, port, protocol):
"""
Build the base URL for the given parameters and do not explicitly put the
standard HTTP(s) ports into the URL.
"""
base_url = "%s://%s" % (protocol, host)
if protocol.lower() == "http" and int(port) != 80:
base_url += ":%d" % int(port)
elif protocol.lower() == "https" and int(port) != 443:
base_url += ":%d" % int(port)
base_url += "/"
return base_url
|
def safe_repr(obj):
"""
Like repr(obj) but falls back to a simpler "<type-name>" string when repr() itself fails.
"""
try:
return repr(obj)
except Exception:
return '<' + type(obj).__name__ + '>'
|
def clean_path(path, isShort):
"""clean_path
:param path:
:param isShort:
"""
path = path.strip('/')
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
if isShort:
path = path.replace('redfish/v1', '').strip('/')
return path
|
def PrettifyCompactedTimestamp(x):
"""
Formats a compacted timestamp string into human readable time representation
:type x: ``str``
:param x: The timestamp to be formatted (required)
:return: A string represeting the time
:rtype: ``str``
"""
return '%s-%s-%sT%s:%s:%s' % (x[:4], x[4:6], x[6:8], x[8:10], x[10:12], x[12:])
|
def _update_gn_executable_output_directory(commands):
"""Update the output path of executables and response files.
The GN and ZN builds place their executables in different locations
so adjust then GN ones to match the ZN ones.
Args:
commands: list of command strings from the GN build.
Returns:
A new list of command strings.
"""
replacements = {
'TOOLCHAIN/main_with_static':
'TOOLCHAIN/obj/public/canaries/main_with_static',
'TOOLCHAIN/main_with_shared':
'TOOLCHAIN/obj/public/canaries/main_with_shared',
'TOOLCHAIN_SHARED/libfoo_shared':
'TOOLCHAIN_SHARED/obj/public/canaries/libfoo_shared',
}
result = []
for cmd in commands:
for key, val in replacements.items():
cmd = cmd.replace(key, val)
result.append(cmd)
return result
|
def make_modifier_plane(plane):
"""Make a string designating a plane orthogonal transformation.
Args:
plane: Plane to which the image was transposed.
Returns:
String designating the orthogonal plane transformation.
"""
return "plane{}".format(plane.upper())
|
def _get_ixp_param(request_params):
"""Get the "ixp" request parameter and parse it to an integer.
:returns: None, if no "ixp" is given, or an integer.
:raises ValueError: The "ixp" parameter is not a valid integer.
"""
raw = request_params.get('ixp')
if raw is not None:
return int(raw)
else:
return raw
|
def tuple_transpose(xs):
"""Permutes environment and agent dimension.
Specifically, VecMultiEnv has an agent-major convention: actions and observations are
num_agents-length tuples, with the i'th element a num_env-length tuple containing an
agent & environment specific action/observation. This convention is convenient since we can
easily mutex the stream to different agents.
However, it can also be convenient to have an environment-major convention: that is, there is
a num_envs-length tuple each containing a num_agents-length tuple. In particular, this is the
most natural internal representation for VecEnv, and is also convenient when sampling from
the action or observation space of an environment.
"""
inner_len = len(xs[0])
for x in xs:
assert len(x) == inner_len
return tuple(tuple([x[i] for x in xs]) for i in range(inner_len))
|
def _convert_repoed_service_to_sorted_perms_and_services(repoed_services):
"""
Repokid stores a field RepoableServices that historically only stored services (when Access Advisor was only data).
Now this field is repurposed to store both services and permissions. We can tell the difference because permissions
always have the form <service>:<permission>. This function splits the contents of the field to sorted sets of
repoable services and permissions.
Args:
repoed_services (list): List from Dynamo of repoable services and permissions
Returns:
list: Sorted list of repoable permissions (where there are other permissions that aren't repoed)
list: Sorted list of repoable services (where the entire service is removed)
"""
repoable_permissions = set()
repoable_services = set()
for entry in repoed_services:
if len(entry.split(':')) == 2:
repoable_permissions.add(entry)
else:
repoable_services.add(entry)
return (sorted(repoable_permissions), sorted(repoable_services))
|
def denormalize_bbox(bbox, rows, cols):
"""Denormalize coordinates of a bounding box. Multiply x-coordinates by image width and y-coordinates
by image height. This is an inverse operation for :func:`~albumentations.augmentations.bbox.normalize_bbox`.
Args:
bbox (tuple): Normalized bounding box `(x_min, y_min, x_max, y_max)`.
rows (int): Image height.
cols (int): Image width.
Returns:
tuple: Denormalized bounding box `(x_min, y_min, x_max, y_max)`.
Raises:
ValueError: If rows or cols is less or equal zero
"""
(x_min, y_min, x_max, y_max), tail = bbox[:4], tuple(bbox[4:])
if rows <= 0:
raise ValueError("Argument rows must be positive integer")
if cols <= 0:
raise ValueError("Argument cols must be positive integer")
x_min, x_max = x_min * cols, x_max * cols
y_min, y_max = y_min * rows, y_max * rows
return (x_min, y_min, x_max, y_max) + tail
|
def get_slices(data, slice_size):
"""Slices up and returns the data in slices of slice_size.
:param data: list to divide in one or several slices of size slice_size
:param slice_size: integer designating the size of a slice from data
:return: list of len(data) / slice_size slices of data of size slice_size if
the number of items in data is a multiple of slice_size, or list of
len(data) / slice_size + 1 slices of data of size slice_size except for
the last slice, of size len(data) - slice_size * len(data) / slice_size
"""
slices = list()
indexes = [i for i in range(0, len(data), slice_size)]
for i in range(0, len(indexes) - 1):
slices.append(data[indexes[i]:indexes[i + 1]])
if len(data) > indexes[-1]: # is there a last slice?
slices.append(data[indexes[-1]:])
return slices
|
def _sort_circle(the_dict):
"""
Each item in the dictionary has a list. Return a new dict with each of
those lists sorted by key.
"""
new_dict = {}
for k, v in the_dict.items():
new_dict[k] = sorted(v)
return new_dict
|
def option(name, value = None):
"""Compose a command line option"""
result = ' --' + name
if value:
result += '=' + str(value)
return result
|
def addBinary(a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
return bin(int(a, 2) + int(b, 2))[2:]
|
def isprime(n):
"""Returns True if n is prime.
It uses the fact that a prime (except 2 and 3) is of form 6k - 1 or 6k + 1.
Function looks only at divisors of this form (after checking 2 & 3).
"""
if n == 2:
return True
if n == 3:
return True
if n % 2 == 0:
return False
if n % 3 == 0:
return False
i = 5
w = 2
while i * i <= n:
if n % i == 0:
return False
i += w
w = 6 - w
return True
|
def format_type(*types):
""" Format a string from a list of a pokemon's types. """
return " | ".join(t.capitalize() for t in types if t is not None)
|
def fix_subnets(data1):
"""
Manipulate the subnet data and massage accordingly.
:param data1:
:return: str
"""
data=str(data1)
data=data.replace("'", "")
data=data.replace("[", "")
data=data.replace("]", "")
return data
|
def combination(n:int,r:int) -> int:
"""
Returns the combination i.e nCr of given
n and r
>>> combination(5,3)
10
"""
#Change the code below
return 10
|
def complement_nt_letter(letter):
"""Get the complement of the input necleotide letter."""
complement_map = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}
return complement_map[letter]
|
def size_filter(clusters, cluster_size):
"""
Filters out clusters which are smaller than cluster_size.
Parameters
----------
clusters : list of set of tuple of int
The clusters to filter.
cluster_size : int
The minimum size of a cluster needed to pass this filter.
Returns
-------
list of set of tuple of int
The filtered clusters.
"""
return [c for c in clusters if len(c) >= cluster_size]
|
def is_iterable(iterable):
""" Check if input is iterable
:param iterable:
:return:
"""
try:
iter(iterable)
return True
except TypeError:
return False
|
def analyze_friends (names,phones,all_areacodes,all_places):
"""
names: tuple of names
phones: tuple of phone numbers (cleaned)
all_areacodes: tuple of area codes (3char ints)
all_places: tuple of places
Goal: Print out how many friends you have and every unique state
"""
# For TESTING MAKE THE PHONE NUMBER FIRST 3 DIGITS THE SAME AS THE AREA CODE
# def get_unique_area_codes():
# """
# Returns a tuple of all unique area codes
# """
# area_codes = ()
# for ph in phones:
# if ph[0:3] not in area_codes:
# area_codes += (ph[0:3],)
# return area_codes
def get_States(some_areacodes):
"""
some_areacodes: tuple of area codes
Return a tuple of states ASSOCIATED with area codes
"""
states = ()
for ac in some_areacodes:
if ac not in all_areacodes:
states += ("BAD AREA CODE",)
else:
index = all_areacodes.index(ac)
states += (all_places[index],)
return states
num_friends = len(names) # Gets number of friends
# unique_areacodes = get_unique_area_codes()
unique_states = get_States(all_areacodes)
print("You have", num_friends, "friends!")
print("They live in", unique_states)
# Function ends with the print, no returns
|
def import_class(module_name, class_name):
"""
Import a class given the specified module name and class name.
"""
klass = __import__(module_name)
class_segments = class_name.split(".")
for segment in class_segments:
klass = getattr(klass, segment)
return klass
|
def get_intersection_range(a0, a1, b0, b1):
"""Gets the intersection between [a0, a1] and [b0, b1]."""
assert a0 <= a1
assert b0 <= b1
start_x = 0
end_x = 0
# Contains
if a0 >= b0 and a1 <= b1:
start_x = a0
end_x = a1
# Contains
elif a0 < b0 and b1 < a1:
start_x = b0
end_x = b1
elif a0 < b0 and a1 > b0:
start_x = b0
end_x = a1
elif a1 > b1 and a0 < b1:
start_x = a0
end_x = b1
else:
pass
return start_x, end_x
|
def check_for_meta_recipes(name, jdict):
"""
check_for_meta_recipes
======================
Check whether or not a recipe is a meta-recipe
Parameters:
-----------
1) name: (str) The name of a package
2) jdict: (dict) A dictionary of packages to check
Returns:
++++++++
(bool) True if meta-recipe, false otherwise
"""
try:
if (
jdict["packages"][name]["identifiers"]["genome-build"] == "meta-recipe"
and jdict["packages"][name]["identifiers"]["species"] == "meta-recipe"
):
return True
else:
return False
except KeyError as e:
return False
|
def normalize_known(x, means, widths, index=None):
"""
Obsolete. Use sklearn.preprocessing
"""
if index is None:
return (x - means) / widths
return (x - means[index]) / widths[index]
|
def anonymize_ip(real_ip: str) -> str:
"""
Sets the last byte of the IP address `real_ip`.
.. note::
It is a good step but it may be possible to find out the physical address
with some efforts.
Example:
If the input is "595.42.122.983", the output is "595.42.122.0".
Raises:
ValueError: ``real_ip`` must have 4 blocks separated with a dot.
Args:
real_ip (str): Full IPv4 address.
Returns:
str: ``real_ip`` with the last `byte` zeroed.
"""
anonymized_ip = real_ip.split(".")
if len(anonymized_ip) != 4:
raise ValueError("Bad format of the IP address '" + real_ip + "'")
anonymized_ip[3] = "0"
return ".".join(anonymized_ip)
|
def get_name(properties, lang):
"""Return the Place name from the properties field of the elastic response
Here 'name' corresponds to the POI name in the language of the user request (i.e. 'name:{lang}' field).
If lang is None or if name:lang is not in the properties
Then name receives the local name value
'local_name' corresponds to the name in the language of the country where the POI is located.
>>> get_name({}, 'fr') is None
True
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, None)
'spontini'
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, 'cz')
'spontini'
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, 'fr')
'spontinifr'
"""
name = properties.get(f'name:{lang}')
if name is None:
name = properties.get('name')
return name
|
def _mul_place(i, j, mul_matrix, max_j):
"""
find the vector in mul_matrix corresponding (i, j),
where 1 <= j <= max_j.
"""
return mul_matrix[(i - 1) * max_j + j]
|
def parse_effect_str(effect_str):
"""
Args:
effect_str (str): View-45;BT-t-number;BT-t-number
Returns:
effect_info (dict):
"""
effect_splits = effect_str.split(";")
effect_info = {
"BT": [],
"View": [],
"keep_length": True
}
for sub_effect in effect_splits:
effects = sub_effect.split("-")
effect_name = effects[0]
if effect_name == "BT":
frame_id = int(effects[1])
duration = int(effects[2])
effect_info["BT"].append((frame_id, duration))
if duration > 0:
effect_info["keep_length"] = False
if effect_name == "View":
effect_info["View"].append(float(effects[1]))
return effect_info
|
def divides_fully_against_denominators(target, denominators):
"""Determine of a target integer can be divided by any number amongst a list of denominators, with a remainder of zero.
Args:
target (int): A number to test if it is prime against the denominators.
denominators (list of int): A list of numbers to act as denominators for the target number.
Returns:
bool: False if the target number can be fully divided against at least one number in denominators. True if it cannot.
"""
denominators.sort()
for num in denominators:
if target % num == 0:
return False
return True
|
def _compute_deviations(counts, middle_value):
"""
For each count, compute the deviation (distance) between it and some value.
:counts: The list of counts.
:middle_value: The value used to compute the deviations.
:return: A list of deviations.
"""
# For each count, compute the deviation (distance) between it and 'middle_value'.
deviations = [abs(count - middle_value) for count in counts]
return deviations
|
def dispatch_strategy(declaration):
"""How are we going to call the underlying implementation of a
declaration? There are two strategies:
- use_derived: we want to call the implementation on CPUDoubleType
(or a similar, derived Type instance). Because these derived
instances deal in Tensors, not Variables (it's a completely different
object, so it doesn't dispatch back to VariableType), code on
this dispatch path needs to wrap/unwrap tensors. If the
derived implementation takes and returns tensors, the
implementation is usually differentiable (although we also use
the derived dispatch path for non-differentiable functions
that we still want to dispatch on the derived Type instance;
e.g., size())
- use_type: we want to call the implementation on Type, because
it is implemented concretely, and the functions it invokes will
get dispatched back to VariableType (which will ensure that they
are differentiable.)
"""
if (declaration['abstract'] or declaration['requires_tensor'] or
declaration['derivative'] is not None):
# If the function is abstract (not implemented on at::Type), we must
# call the implementation on the derived type with unpacked tensors.
# If the function has a derivative specified and is concrete, we could
# call either implementation. We prefer the calling the derived
# type's implementation with unpacked tensors because it is more
# performant in some cases: any internal calls to other ATen functions
# won't have the history tracked.
# If the function has a type dispatched argument (i.e. is a factory),
# we prefer calling the derived type's implementation both because it is
# more performant and to ensure factory functions return tensors with _version
# of 0 (probably not strictly necessary, but nice to have to keeps versions simple
# to understand.
return 'use_derived'
else:
# If the function is concrete (we don't have to override it) and we
# didn't declare it in derivatives.yaml, we'll assume that it is
# actually implemented out of differentiable functions. (This
# assumption might not hold, but then you'll see gradcheck fail.)
return 'use_type'
|
def obj_name(obj, oid):
"""Return name of folder/collection object based on id
Args: obj - dict
oid - string
"""
path = obj[oid]['name']
if path == 'No Folder':
path = '/'
return path
|
def get_doc_by_input_hash(dataset, hash):
"""Return a doc from a dataset where hash matches doc["_input_hash"]
Assumes there will only be one match!
"""
return [doc for doc in dataset if doc["_input_hash"] == hash][0]
|
def initialize_cache(
decoding_states,
attention_keys=None,
memory=None,
memory_bias=None):
""" Creates a cache dict for tf.while_loop.
Args:
decoding_states: A Tensor or a structure of Tensors for decoding while loop.
attention_keys: A Tensor. The attention keys for encoder-decoder attention.
memory: A Tensor. The attention values for encoder-decoder attention.
memory_bias: A Tensor. The attention bias for encoder-decoder attention.
Returns: A dict.
"""
cache = {"decoding_states": decoding_states}
# encoder-related information (not influenced by beam search)
if attention_keys is not None:
cache["attention_keys"] = attention_keys
if memory is not None:
cache["memory"] = memory
if memory_bias is not None:
cache["memory_bias"] = memory_bias
return cache
|
def rev_comp_motif( motif ):
"""
Return the reverse complement of the input motif.
"""
COMP = {"A":"T", \
"T":"A", \
"C":"G", \
"G":"C", \
"W":"S", \
"S":"W", \
"M":"K", \
"K":"M", \
"R":"Y", \
"Y":"R", \
"B":"V", \
"D":"H", \
"H":"D", \
"V":"B", \
"N":"N", \
"X":"X", \
"*":"*"}
rc_motif = []
for char in motif[::-1]:
rc_motif.append( COMP[char] )
return "".join(rc_motif)
|
def get_mu(X, Y, Z):
""" Find the mean molecular weight
Positional Arguments:
X -> Hydrogen Abundence
Y -> Helium Abundence
Z -> Metallicity
Returns:
The mean molecular weight
"""
return (1/(2*X+(3/4)*Y+(1/2)*Z))
|
def _get_key(data):
"""
Retrieve the key for a particular image
@param data: Dictionary of information from the Alaska Satellite Facility
@return Dictionary key for data
"""
return data['track'], data['frameNumber']
|
def _change_recur(amount, coins, n):
"""Helper function for num_coin_changes_recur()."""
# Base cases.
if amount < 0:
return 0
if amount == 0:
return 1
# When number of coins is 0 but there is still amount remaining.
if n <= 0 and amount > 0:
return 0
# Sum num of ways with coin n included & excluded.
n_changes = (_change_recur(amount - coins[n - 1], coins, n)
+ _change_recur(amount, coins, n - 1))
return n_changes
|
def ring_to_vector(l):
"""
Convert the ring sizes vector to a fixed length vector
For example, l can be [3, 5, 5], meaning that the atom is involved
in 1 3-sized ring and 2 5-sized ring. This function will convert it into
[ 0, 0, 1, 0, 2, 0, 0, 0, 0, 0].
Args:
l: (list of integer) ring_sizes attributes
Returns:
(list of integer) fixed size list with the i-1 th element indicates number of
i-sized ring this atom is involved in.
"""
return_l = [0] * 9
if l:
for i in l:
return_l[i - 1] += 1
return return_l
|
def pretty_print_hex(a, l=16, indent=''):
"""
Format a list/bytes/bytearray object into a formatted ascii hex string
"""
s = ''
a = bytearray(a)
for x in range(0, len(a), l):
s += indent + ''.join(['%02X ' % y for y in a[x:x+l]]) + '\n'
return s
|
def byte_to_zwave_brightness(value):
"""Convert brightness in 0-255 scale to 0-99 scale.
`value` -- (int) Brightness byte value from 0-255.
"""
if value > 0:
return max(1, round((value / 255) * 99))
return 0
|
def camelcasify(token):
"""Convert capitalized underscore tokens to camel case"""
return ''.join([x.lower().capitalize() for x in token.split('_')])
|
def fullname(o):
"""Get fully qualified class name"""
module = o.__module__
if module is None or module == str.__module__:
return o.__name__
return module + "." + o.__name__
|
def group_by_sum(count_data_list, group_key, count_key='agency_count'):
"""
{ "agency__id": 1, "agency_count": 39, "approval_status": "APPROVED", "is_enabled": true},
returns
dict
"""
count_dict = {}
for record_dict in count_data_list:
group_key_in_record = group_key in record_dict
if group_key_in_record:
group_key_value = record_dict[group_key]
group_key_count_value = record_dict[count_key]
group_key_value_in_count_dict = group_key_value in count_dict
if group_key_value_in_count_dict:
count_dict[group_key_value] = count_dict[group_key_value] + group_key_count_value
else:
count_dict[group_key_value] = group_key_count_value
total_count = 0
for key in count_dict:
value = count_dict[key]
total_count = total_count + value
count_dict['_total_count'] = total_count
return count_dict
|
def calc_alpha_init(alpha, decay):
"""
Calculate the numerator such that at t=0, a/(decay+t)=alpha
"""
if not decay or decay <= 0:
return alpha
else:
return float(alpha * decay)
|
def updateESGUIDs(guids):
""" Update the NULL valued ES guids """
# necessary since guids are used as dictionary keys in some places
# replace the NULL values with different values
# guids = 'NULL,NULL,NULL,sasdasdasdasdd'
# -> 'DUMMYGUID0,DUMMYGUID1,DUMMYGUID2,sasdasdasdasdd'
for i in range(guids.count('NULL')):
guids = guids.replace('NULL', 'DUMMYGUID%d' % (i), 1)
return guids
|
def fib(n):
"""
Simple programa de fibonacci recursivo
:param n:
:return:
"""
if n == 1:
return 1
else:
return n + fib(n-1)
|
def encrypt_letter(letter):
""" (str) -> str
Precondition: len(letter) == 1 and letter.isupper()
Return letter encrypted by shifting 3 places to the right.
>>> encrypt_letter('V')
'Y'
"""
# Translate to a number in the range 0-25. 'A' translates to 0, 'B' to 1,
# and so on.
ord_diff = ord(letter) - ord('A')
# Apply the right shift; we use % to handle the end of the alphabet.
# The result is still in the range 0-25.
new_char_ord = (ord_diff + 3) % 26
# Convert back to a letter.
return chr(new_char_ord + ord('A'))
|
def rle_to_string(runs):
"""Array to str"""
return " ".join(str(x) for x in runs)
|
def get_config_value(config, section, option):
"""Read config value from test config"""
try:
return config[section][option]
except KeyError:
return None
|
def pf(x, A, B, C):
""" power function for fitting the CCS vs. m/z data """
return A * (x ** B) + C
|
def remove_lib_prefix(module):
"""Removes the lib prefix, as we are not using them in CMake."""
if module.startswith("lib"):
return module[3:]
else:
return module
|
def bool_to_true_false(val):
"""Convert True/False to TRUE / FALSE"""
return 'TRUE' if val else 'FALSE'
|
def scrub(content):
"""Replaces triple back ticks with triple grave accents."""
return content.replace(
"\N{GRAVE ACCENT}" * 3, "\N{MODIFIER LETTER GRAVE ACCENT}" * 3
)
|
def taille(arbre):
"""Renvoie la taille de l'arbre"""
if arbre is None:
return 0
else:
return 1 + taille(arbre.get_gauche()) + taille(arbre.get_droite())
|
def prime_divisors_sieve(lim):
"""Computes the list of prime divisors for values up to lim included."""
# Pretty similar to totient
div = [set() for i in range(lim + 1)]
for i in range(2, lim + 1):
if not div[i]:
for j in range(i, lim + 1, i):
div[j].add(i)
return div
|
def strip_bpe(text):
"""Deodes text that was processed using BPE from
https://github.com/rsennrich/subword-nmt"""
return text.replace("@@ ", "").strip()
|
def sort_by_index(names):
"""
Sort images by index
:param names: List with names
:return: List of names sorted by index
"""
names_dict = dict()
for name in names:
if "right" in name:
index = name.split("right")[1].split(".png")[0]
else:
index = name.split("left")[1].split(".png")[0]
names_dict[int(index)] = name
keys = list(names_dict.keys())
keys.sort()
sorted_images = []
for key in keys:
sorted_images.append(names_dict[key])
return sorted_images
|
def setup_probe_value(type, arg_values):
"""
type is the probe type.
arg_values is a list of args passed in by user
"""
if type == 'asn' or type == 'msm':
return int(arg_values[0]) #return an integer value
elif type == 'probes':
arg_values = map(str, arg_values)
return ','.join(arg_values) #return command separated list of probe ids
else:
return arg_values[0] #for everything else just return single item from list
|
def transform_field(x, attr_type):
"""Method for transforming a field from the returned entities to the corresponding format."""
if x is None or x == 'NaN':
return None
if attr_type == 'KEYWORD_SET':
return x
if attr_type == 'NUMBER':
val = int(float(x)) if x != '' else 0
return val
if attr_type == 'GEOLOCATION' or attr_type == 'DATE_TIME':
return x
if attr_type == 'id':
if x.startswith('http'):
return '[{}]({})'.format(x, x)
else:
return x
|
def rgba_from_argb_int(color):
"""
Converts ARGB int into RGBA tuple.
Returns:
(int, int, int, int)
Red, green, blue and alpha channels.
"""
a = ((color >> 24) & 0xFF) / 255.
r = (color >> 16) & 0xFF
g = (color >> 8) & 0xFF
b = color & 0xFF
return r, g, b, a
|
def get_references(term):
"""Convert reference string to the corresponding reference property schema
Args:
term: a string with the format "source:id_content"
Returns:
a tuple: (property_line,new_source_map). property_line is the reference
property in schema, and new_source_map is the dictionary of with source
name as the key and the identifier as the value, if new source exists.
For example:
("pubMedID: 1007323", {aNewSource:100100})
"""
term_split = term.split(':')
source = term_split[0]
id_content = ':'.join(term_split[1:])
new_source_map = {}
if source in ('PMID', 'pmid'):
property_line = 'pubMedID: ' + '"' + id_content + '"'
elif source == 'GO':
property_line = 'goID: ' + '"' + id_content + '"'
elif source == 'RESID':
property_line = 'residID: ' + '"' + id_content + '"'
elif source == 'doi':
property_line = 'digitalObjectID: ' + '"' + id_content + '"'
else:
new_source_map[source] = id_content
property_line = None
return (property_line, new_source_map)
|
def _escape_filename(filename):
"""Escape filenames with spaces by adding quotes (PRIVATE).
Note this will not add quotes if they are already included:
>>> print _escape_filename('example with spaces')
"example with spaces"
>>> print _escape_filename('"example with spaces"')
"example with spaces"
"""
#Is adding the following helpful
#if os.path.isfile(filename):
# #On Windows, if the file exists, we can ask for
# #its alternative short name (DOS style 8.3 format)
# #which has no spaces in it. Note that this name
# #is not portable between machines, or even folder!
# try:
# import win32api
# short = win32api.GetShortPathName(filename)
# assert os.path.isfile(short)
# return short
# except ImportError:
# pass
if " " not in filename:
return filename
#We'll just quote it - works on Windows, Mac OS X etc
if filename.startswith('"') and filename.endswith('"'):
#Its already quoted
return filename
else:
return '"%s"' % filename
|
def revcomp(sequence):
"""
Find reverse complementary sequence
:param sequence: The RNA sequence in string form
:return: The reverse complement sequence in string form
"""
complement = {"A": "U", "U": "A", "C": "G", "G": "C", "N": "N"}
revcompseq = ""
sequence_list = list(sequence)
sequence_list.reverse()
for letter in sequence_list:
revcompseq += complement[letter.upper()]
return revcompseq
|
def find_opposite(direction:str):
"""
Finds the oppisite direction of the current direction
:param direction: The current direction of which Pac-Man is currently moving
:return string: Returns the opposite direction of the parameter direction
"""
if direction == 'u':
return 'd'
if direction == 'l':
return 'r'
if direction == 'r':
return 'l'
if direction == 'd':
return 'u'
|
def even_split(n, k):
"""
n and k must be ints.
returns a list of as-even-as-possible shares when n is divided into k pieces.
Excess is left for the end. If you want random order, shuffle the output.
>>> even_split(2,1)
[2]
>>> even_split(2,2)
[1, 1]
>>> even_split(3,2)
[1, 2]
>>> even_split(11,3)
[3, 4, 4]
"""
ans = []
if type(n) is not int or type(k) is not int:
raise TypeError("n and k must be ints")
r = n % k
ans = ([n/k] * (k-r))
ans.extend([n/k + 1] * r)
return ans
|
def rename_method_df_none(df_column, rename_key):
""" Transform method names """
return [rename_key.get(i, None) for i in df_column]
|
def sanitize_line(line, commenter='#'):
"""Clean up input line."""
return line.split(commenter, 1)[0].strip()
|
def check_bonus1(flag):
""" map/status.cpp:6789 status_calc_aspd() """
"""Calculate bonus from skills and statuses"""
bonus = 0
if flag:
pass
else:
pass
return bonus
|
def error_message(e):
"""
Returns a custom error message
:param e: error raised
:type e: PermissionError|OSError
:return: custom error message
:rtype: str
"""
errno, strerror = e.args
return f"Error: [Errno {errno}] {strerror}"
|
def sum_n(n):
"""
Sum of first n natural numbers
>>> sum_n(10)
55
"""
if n == 0:
return 0
else:
return n + sum_n(n-1)
|
def _check_res(res):
"""Helper for avoiding empty dictionary as function argument in
morphological dictionaries"""
if not res:
res = {
"suffixes": [], "prefixes": [], "roots": [], "other": [],
"original_word": []
}
return res
|
def _get_tag_label(token):
"""Splits a token into its tag and label."""
tag, label, = token.split('|')
return tag, label
|
def shift_slice(unshifted_list, element_to_shift_on):
"""Shift a slice of elements from the front of the list to the back.
Shift the slice of elements before the first occurence of element to shift
on to the back of the list.
Note:
This function assumes that the element to shift on has been stripped
from the front of the list.
Args:
unshifted_list (List[str]): The list of elements.
element_to_shift_on (str): The element to shift on.
Returns:
List[str]: The shifted list.
Example:
>>> shift_slice(['1', '1', '2', '3', '4', '1'], element_to_shift_on='1')
['1', '1', '2', '3', '4', '1']
>>> shift_slice(['2', '3', '4', '1', '1', '1'], element_to_shift_on='1')
['1', '1', '1', '2', '3', '4']
"""
if unshifted_list and unshifted_list[0] != element_to_shift_on:
first_element = unshifted_list.index(element_to_shift_on)
return unshifted_list[first_element:] + unshifted_list[:first_element]
return unshifted_list
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.