content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def split_gens(gens_df, chroms):
"""
Takes in a gens file with multiple loci, and splits it based on chromosome.
:param gens_df: gens dataframe.
:param chroms: list of chromosome notations.
:return: list of dataframse, each dataframe with one chromosome notation at the CHROM column.
"""
return [gens_df.loc[gens_df["chrom"] == c] for c in chroms]
|
12d33733b99aa8d4eeb2f9b5f674e2fc152bb230
| 59,169
|
def num_hours(s):
"""
Return number of data points in 's'
Args:
s(:obj:`pandas.Series`): The data to be checked for number of data points
Returns:
:obj:`int`: Number of hours in the data
"""
n_hours = len(s.resample("H"))
return n_hours
|
2b150d75fa557977684898d2328e42cab183ee91
| 59,178
|
def get_all(client, query_params):
"""Requests the list of servers."""
return client.get_servers(**query_params)
|
9eb23476b08de929803bcd54544da2fa18c56f52
| 59,189
|
import re
def remove_win_special_char(before_str):
"""
windows์์ ํ์ผ๋ช
์ผ๋ก ์ฌ์ฉํ์ง ๋ชปํ๋ ํน์๋ฌธ์ ์ ๊ฑฐ
:param before_str: ๋ฌธ์์ด
:return: ํน์๋ฌธ์๊ฐ ์ ๊ฑฐ๋ ๋ฌธ์์ด
"""
return re.sub('[\\\/:*?"<>|]', '', before_str)
|
15be8b10261748291c6eabd475499c1ce2bdc0d3
| 59,193
|
def GetComment(node) :
"""Get the first rdfs:comment we find on this node (or "No comment")."""
for triple in node.arcsOut:
if (triple.arc.id == 'rdfs:comment'):
return triple.text
return "No comment"
|
78cbce8cdeee52a342c9842538afdbbee63b7e8b
| 59,195
|
def generate_oauth_headers(access_token: str) -> dict:
"""Convenience function to generate oauth stand authorization header
:param access_token: Oauth access token
:return: Request headers
"""
return {'Authorization': 'Bearer ' + access_token}
|
7692f8fc6c9863a607682b0d1be28e7774729945
| 59,197
|
def create_test_function(test_manifest_loc, test_name, check_parse=True, regen=False):
"""
Return a test function closed on test arguments for `test_manifest_loc`
location and with `test_name` method name..
If check_parse is True, test the parse_manifest; otherwise, test the package
data normalization.
"""
# closure on the test params
if check_parse:
def test_manifest(self):
self.check_parse_manifest(test_manifest_loc, regen)
else:
def test_manifest(self):
self.check_get_normalized_package_data(test_manifest_loc, regen)
# set a proper function name to display in reports and use in discovery
# function names are best as bytes
if isinstance(test_name, bytes):
test_name = test_name.decode('utf-8')
test_manifest.__name__ = test_name
return test_manifest
|
17eb6cb3c17695da14200f1bd5224352db2d754c
| 59,201
|
def zfill(string, width):
"""zfill(x, width) -> string
Pad a numeric string x with zeros on the left, to fill a field
of the specified width. The string x is never truncated.
"""
size = len(string)
if size >= width: return string
sign = ''
if string[0] in ('-', '+'):
sign, string = string[0], string[1:]
return sign + '0' * (width - size) + string
|
26c51cde003f54f0c9d01b9e9f3ccc4cc4acaf83
| 59,208
|
import ast
def get_classes(tree):
"""Pick classes from AST `tree`."""
tree = filter(lambda x: isinstance(x, ast.ClassDef), tree)
return map(lambda x: "class " + x.name, tree)
|
e6162d7d250eb4860ae0cac974a014537c3027e9
| 59,209
|
import re
import click
import time
def increment_time(time_str, offset):
"""increment hhmmss by offset seconds"""
m = re.match(r'^(\d{2})(\d{2})(\d{2})$', time_str)
if not m:
click.echo("can't extract h:m:s from time str: {}".format(time_str))
return
h, m, s = m.group(1), m.group(2), m.group(3)
sec = int(h) * 3600 + int(m) * 60 + int(s) # convert to total seconds
sec += offset
if sec < 0:
# sanity check to make sure sec is not negative
# if negative we need to tweak the date, ignore for now
click.echo("negative time is not yet supported")
return
return time.strftime('%H%M%S', time.gmtime(sec))
|
026bd390cdb0e6cee77ae427f845241190b79339
| 59,210
|
import torch
def generate_sent_masks(enc_hiddens, source_lengths):
""" Generate sentence masks for encoder hidden states.
@param enc_hiddens (Tensor): encodings of shape (b, src_len, h), where b = batch size,
src_len = max source length, h = hidden size.
@param source_lengths (List[int]): List of actual lengths for each of the sentences in the batch.len = batch size
@returns enc_masks (Tensor): Tensor of sentence masks of shape (b, src_len),
where src_len = max source length, b = batch size.
"""
enc_masks = torch.zeros(enc_hiddens.size(0),
enc_hiddens.size(1),
dtype=torch.float)
for e_id, src_len in enumerate(source_lengths):
enc_masks[e_id, :src_len] = 1
return enc_masks
|
e7fe6336aa84a25af4fcbca7a7a54f2bb0e09624
| 59,211
|
def is_segment(other):
"""Return true if this is a Segment.
The purpose of this helper function is for testing if something
is a segment without requiring the import of the class.
"""
return getattr(other, "is_segment", False)
|
15043983eb4948eba3a6d1308d11b239c950e0e8
| 59,218
|
from pathlib import Path
def get_set_local_dir(basename='pymodaq_local'):
"""Defines, creates abd returns a local folder where configurations files will be saved
Parameters
----------
basename: (str) how the configuration folder will be named
Returns
-------
Path: the local path
"""
local_path = Path.home().joinpath(basename)
if not local_path.is_dir(): # pragma: no cover
try:
local_path.mkdir()
except Exception as e:
local_path = Path(__file__).parent.parent.joinpath(basename)
info = f"Cannot create local folder from your **Home** defined location: {Path.home()}," \
f" using PyMoDAQ's folder as local directory: {local_path}"
print(info)
if not local_path.is_dir():
local_path.mkdir()
return local_path
|
08b38abf7e011acce68df9e577ce8d6c90da6857
| 59,225
|
def custom_locale_negotiator(request):
""" The :term:`custom locale negotiator`. Returns a locale name.
- First, the negotiator looks for the ``_LOCALE_`` attribute of
the request object (possibly set by a view or a listener for an
:term:`event`).
- Then it looks for the ``request.params['_LOCALE_']`` value.
- Then it looks for the ``request.cookies['_LOCALE_']`` value.
- Then it looks for the ``Accept-Language`` header value,
which is set by the user in his/her browser configuration.
- Finally, if the locale could not be determined via any of
the previous checks, the negotiator returns the
:term:`default locale name`.
"""
name = '_LOCALE_'
if request.params.get(name) is not None:
return request.params.get(name)
if request.cookies.get(name) is not None:
return request.cookies.get(name)
return request.accept_language.best_match(
request.registry.settings.available_languages.split(),
request.registry.settings.default_locale_name)
|
08ef0ed01647623e38fcdec4c6daf071884381cc
| 59,227
|
def nf_input_to_cl(inp):
"""Convert an input description into command line argument.
"""
sep = " " if inp.get("separate") else ""
val = "'%s'" % inp.get("default") if inp.get("default") else "$%s" % inp["name"]
return "%s%s%s" % (inp["prefix"], sep, val)
|
54943a85ffd0b8c7f5e8b5e5b6d5223b767e6b91
| 59,228
|
def _time_to_minutes(time_dhms):
""" Converting time from 'd-hh:mm:ss' to total minutes """
x = time_dhms.split('-')
if len(x) > 1:
days = int(x.pop(0))
else:
days = 0
x = x[0].split(':')
hours = int(x[0])
minutes = int(x[1])
# return number of minutes
return days * (24 * 60) + hours * 60 + minutes
|
bb7e4b20c9e0b138f7ddd63fc41cc5fd2a97f5d2
| 59,229
|
def reindex_columns_partial(df, cols):
"""
Reorder a DataFrame so that the given columns come first.
:param df: The DataFrame to reorder
:param cols: The columns which should come first, in order.
:return df: The reindex DataFrame
"""
cols = list(cols)
ordering = cols[:]
for c in df.columns:
if c in cols:
cols.remove(c)
else:
ordering.append(c)
if cols:
raise KeyError("Unable to find keys: {}".format(",".join(cols)))
return df.reindex(columns=ordering)
|
338120b91c4c07b99b3d60eae69be7299d594a66
| 59,230
|
def check_bool(x):
"""check_bool checks if input 'x' either a bool or
one of the following strings: ["true", "false"]
It returns value as Bool type.
"""
if isinstance(x, bool):
return x
if not x.lower() in ["true", "false"]:
raise RuntimeError("{} is not a boolean value.".format(x))
else:
return (x.lower() == "true")
|
eb362767b42234af7db79ac8e2fbd37dce84c66e
| 59,231
|
def find_sysroot(rust_toolchain):
"""Locate the rustc sysroot from the `rust_toolchain`
Args:
rust_toolchain (rust_toolchain): The currently configured `rust_toolchain`.
Returns:
str: A path assignable as `SYSROOT` for an action.
"""
sysroot_anchor = rust_toolchain.rust_lib.files.to_list()[0]
directory = sysroot_anchor.path.split(sysroot_anchor.short_path, 1)[0]
return directory.rstrip("/")
|
b0f60f7ff797c1fa3107bc681cba7922c249a079
| 59,234
|
def show_toolbar(request):
"""Prevent DjDT from appearing in Django-CMS admin page iframes"""
path = request.get_full_path()
if 'cms/' in path or 'admin/' in path:
return False
return True
|
63e0771f2c6b97395b56b2b945d79ea9b74abefc
| 59,236
|
def find_handshape_old(string, handshape_base="๎๎๎๎๎๎
๎๎๎๎๎๎", handshape_diacritic="๎๎๎๎๎๎๎๎๎ฐ๎ฑ๎ฒ๎ณ๎ด๎ฆ๎ต๎ถ๎ท๎ธ๎น๎บ๎"): # updated function above
"""
Alternative version without re.
"""
in_handshape = False
handshapes = []
rest = ''
for char in string:
if char in handshape_base:
if in_handshape:
handshapes[-1] += char # append to existing handshape
else:
in_handshape = True # switch on environment of handshape
handshapes += [char] # append a new segment to the list
elif char in handshape_diacritic:
if in_handshape:
handshapes[-1] += char
else:
in_handshape = False
rest += char
return {'handshape': handshapes, "rest": rest}
|
26edd5d03940464319fce2db662873357cc2b9d1
| 59,238
|
def retrieve_tags_info(alignment):
"""
extract all tag information from the alignment line
Arguments
----------
alignment: one line of the sam/bam file
Returns
----------
a dict of all tags and their tag values
"""
tags = {}
for i in range(len(alignment)-1, 10, -1):
val = alignment[i].split(":", 2)
if (len(val) == 3):
tags[val[0]] = val[1:]
return tags
|
9457b6e12d1256feda53d55859816ecb64629775
| 59,242
|
def invalid_params_warning(command):
"""Warning message that the format of the command is correct."""
return ('*Please try again using the correct format:*\n'
'> Try @synapse help')
|
8d9e4463c1fb69feddabfd5eb34220c1376c1998
| 59,243
|
def remove_duplicates_in_list(seq):
"""Removes and returns a new list with duplicate elements removed and the
order of elements in the sequence is preserved
Parameters
----------
seq : list
the list
Returns
-------
newSeq : list
the new list with duplicate elements removed
Examples
--------
>>> a = [1, 2, 4, 1, 2, 3, 6, 2, 2, 5, 5, 10, 3, 20, 21, 20, 8, 6]
>>> gnu.remove_duplicates_in_list(a)
[1, 2, 4, 3, 6, 5, 10, 20, 21, 8]
"""
seen = set()
seen_add = seen.add
newSeq = [x for x in seq if x not in seen and not seen_add(x)]
return newSeq
|
b7e24e2a096202237064e38976e601268a78fa93
| 59,246
|
import re
def ExtractMacro(filename, macro):
"""
Return the string value of the macro `macro' defined in `filename'.
"""
# Simple regex is far from a complete C preprocessor but is useful
# in many cases
regexp = re.compile(r'^\s*#\s*define\s+%s\s+"(.+[.].+[.].+)"\s*$' % macro)
try:
for line in open(filename):
m = regexp.match(line)
if m:
return m.group(1)
except EnvironmentError:
pass
return ''
|
c20e40ca32f911f2313870d0afc3b5bcda69cb8a
| 59,248
|
def get_method_full_signature(class_name, method_name, method_signature):
"""
Based on the class name, method name and method signature to get the full method signature
:param class_name: class name
:param method_name: method name
:param method_signature: method signature
:return: method full signature
"""
if class_name:
class_name = 'L' + class_name + ';'
method_full_signature = class_name + '.' + method_name + ':' + method_signature
# print(method_full_signature)
return method_full_signature
else:
return None
|
c2d8dc7dc3a13036aea2d6782d7873d67a751219
| 59,249
|
def _url(server_url, physical_port, action):
"""
Helper function to build an url for given port and target
Args:
server_url: a str, the url for mux server, like http://10.0.0.64:8080/mux/vms17-8
physical_port: physical port on switch, an integer starting from 1
action: a str, either "output" or "drop"
Returns:
The url for posting flow update request, like http://10.0.0.64:8080/mux/vms17-8/1/drop(output)
"""
return server_url + "/{}/{}".format(physical_port - 1, action)
|
4108d2b00580f0fd128791cb914ae61f87934daa
| 59,250
|
def parseFileName(url):
"""Parses the file name from a given url"""
pieces = url.split('/')
return pieces[len(pieces)-1].replace("%20", " ")
|
cd044cb619e0301f6131d2a918b19031229b6fad
| 59,251
|
def get_time_integer(time):
"""Return time as integer value."""
hours, minutes, seconds = time.split(":")
return int(hours) * 3600 + int(minutes) * 60 + int(seconds)
|
52c1f07ebeda55531c7f181a5155dd96f6af35b2
| 59,252
|
def get_reaction_label(rmg_reaction):
"""
Returns the AutoTST reaction string in the form of r1+r2_p1+p2 (e.g., `CCC+[O]O_[CH2]CC+OO`).
`reactants` and `products` are lists of class:`Molecule`s.
"""
reactants = rmg_reaction.reactants
products = rmg_reaction.products
if len(reactants) > 1:
reactants_string = '+'.join([reactant.molecule[0].toSMILES() for reactant in reactants])
else:
reactants_string = reactants[0].molecule[0].toSMILES()
if len(products) > 1:
products_string = '+'.join([product.molecule[0].toSMILES() for product in products])
else:
products_string = products[0].molecule[0].toSMILES()
reaction_label = '_'.join([reactants_string, products_string])
return reaction_label
|
481f1d24c46144ee218c4f10c021da3e85de1551
| 59,257
|
def find_snapshots_to_delete(from_front, to_front):
""" Find all snapshots in from_front that has been deleted, but
has not yet been deleted in the clone to_front. """
snapshots_to_delete = []
self_max_rev = to_front.get_highest_used_revision()
already_deleted_snapshots = set(to_front.get_deleted_snapshots())
for rev in from_front.get_deleted_snapshots():
if rev > self_max_rev:
continue
if rev in already_deleted_snapshots:
continue
deleted_name, deleted_fingerprint = from_front.get_deleted_snapshot_info(rev)
session_info = to_front.get_session_info(rev)
assert session_info['name'] == deleted_name
assert to_front.get_session_fingerprint(rev) == deleted_fingerprint
snapshots_to_delete.append(rev)
return snapshots_to_delete
|
e1298e5c9a1c2cb33ae46740b12e4b33980b5d37
| 59,258
|
def _get_dim(data):
"""
Data dimensionality with error checking
"""
if data.ndim == 2:
ndim = 2
elif data.ndim == 3:
ndim = 3
else:
raise RuntimeError('Unsupported number of dimensions {}. We only supports 2 or 3D arrays.'.format(data.ndim))
return ndim
|
de9b272f1f6acc3a49bf1111a63647d1697136d7
| 59,263
|
def to_update_param(d: dict) -> dict:
"""
Convert data dict to update parameters.
"""
param = {f"set__{k}": v for k, v in d.items()}
return param
|
6e1c69a8315da5723d207020d31eb5853b02dbdc
| 59,274
|
import itertools
def product_dict(**kwargs):
"""
Returns the cartesian product of a dict of lists.
"""
keys = kwargs.keys()
vals = kwargs.values()
product = []
for item in itertools.product(*vals):
product.append(dict(zip(keys, item)))
return product
|
ad891b8451d8af2a0992200331253ff0639fbb0c
| 59,277
|
def time_int_to_str(value: int) -> str:
"""Convert integer (seconds) to time as string."""
if not value:
return '00:00'
minutes = str(value // 60).zfill(2)
seconds = str(value % 60).zfill(2)
return f'{minutes}:{seconds}'
|
43037bbd356585b9f9fccca138d58d67aa7af1de
| 59,282
|
def get_survival_function(cv_pipeline, X_test_df):
"""Get model-predicted survival function for test data."""
return {
'samples': X_test_df.index.values,
'functions': cv_pipeline.best_estimator_.predict_survival_function(X_test_df)
}
|
57acf1d6a1dd7c951867410919afee3308557c96
| 59,286
|
def get_trimmed_string(value):
"""
Returns a string of value (stripped of \'.0\' at the end). Float values
are limited to 1 decimal place.
"""
if isinstance(value, float):
value = round(value, 1)
value_str = str(value)
if value_str.endswith('.0'): value_str = value_str[:-2]
return value_str
|
ae6f8d89e31d1074826b657191e062e4ecafdef6
| 59,293
|
def partner_point_each_sd(all_iterations_of_sd, beta, store_grad):
"""
Compute all corresponding partner points for all iterations of
steepest descent.
Parameters
----------
all_iterations_of_sd : 2-D array with shape (iterations + 1, d), where
iterations is the total number of steepest
descent iterations.
beta : float or integer
Small constant step size to compute the partner points.
store_grad : 2-D array with shape (iterations + 1, d)
Gradients at each point of all_iterations_of_sd.
Returns
-------
all_iterations_of_sd_partner_points : 2-D array with shape
(iterations + 1, d)
Corresponding partner
points for all_iterations_of_sd.
"""
all_iterations_of_sd_partner_points = (all_iterations_of_sd -
beta * store_grad)
return all_iterations_of_sd_partner_points
|
a77f358723ea21cac06b29c614f9ed913bc92a7f
| 59,294
|
from typing import Tuple
def find_line_col(text: str, offset: int) -> Tuple[int, int]:
"""
Returns the line and column corresponding to an offset in a given text.
Args:
text: The text to search.
offset: The 0-based character offset. The function will essentially look for the position of the character
at ``text[offset]``. It can also be equal to ``len(text)`` in which case the function will report the
position of a potential character after the last character in the text.
Returns:
A (line, column) tuple corresponding to `offset`. The line and column are 1-based.
Notes:
- The offset, line and column all refer to character, not byte, offsets.
- This only handles input where the lines are separated by a single ``\\n`` character.
- If ``text[offset]`` is a newline, its reported column will be 1 more than the position of the last character in the
line. Thus, for a file of 80-column text, the column may be 81.
- If `offset` is ``len(text)``, the virtual character is placed either:
- One column to the right of the last character in the last line, if it does not end with a newline
- On the first column of the next line number, if it does
- The function is not particularly optimized for working with huge data and cannot use a prebuilt line index, etc.
It is meant for one-off analyses such as when building an exception text for a syntax error in a config file.
"""
if (offset < 0) or (offset > len(text)):
raise IndexError(f"Offset {offset} lies outside text of length {len(text)}")
cur_line_no = 1
cur_line_start = 0
while cur_line_start < len(text):
cur_line_end = text.find('\n', cur_line_start)
if cur_line_end == -1:
cur_line_end = len(text) - 1
if offset <= cur_line_end:
break
cur_line_no += 1
cur_line_start = cur_line_end + 1
return cur_line_no, 1 + offset - cur_line_start
|
6d95ecdbc40d1720a380c87fc69f896a4176b100
| 59,301
|
def hrs_bw(begin, end):
"""
Returns the floating point number of hours between
the beginning and the end events.
"""
return (end - begin).total_seconds() / 3600
|
04a58934b9d76738ce3ad9c36efcc87f84588678
| 59,302
|
def f_prime(x: float) -> float:
"""The derivative for a function (x-2)^4."""
return 4. * (x - 2) ** 3
|
189d530028ed02b0e634a35aa6e4e28812c5fa90
| 59,308
|
import torch
def draw_binary_line(x0, y0, x1, y1, imsize=224, width=1):
"""Non-differentiable way to draw a line with no fuzz
:param x0: int, x coordinate of point 0
:param y0: int, y coordinate of point 0
:param x1: int, x coordinate of point 1
:param y1: int, y coordinate of point 1
:param imsize: size of image frame
:param width: width of line
:return template: torch Tensor of imsize x imsize
"""
if width % 2 == 0:
width += 1
hw = int((width - 1) / 2)
template = torch.zeros((imsize, imsize))
dx, dy = x1 - x0, y1 - y0
is_steep = abs(dy) > abs(dx)
if is_steep:
x0, y0 = y0, x0
x1, y1 = y1, x1
swapped = False
if x0 > x1:
x0, x1 = x1, x0
y0, y1 = y1, y0
swapped = True
dx = x1 - x0
dy = y1 - y0
error = int(dx / 2.0)
ystep = 1 if y0 < y1 else -1
y = y0
for x in range(x0, x1 + 1):
if is_steep:
if hw > 0:
template[max(y-hw, 0):min(y+hw, imsize),
max(x-hw, 0):min(x+hw, imsize)] = 1
else:
template[y, x] = 1
else:
if hw > 0:
template[max(x-hw, 0):min(x+hw, imsize),
max(y-hw, 0):min(y+hw, imsize)] = 1
else:
template[x, y] = 1
error -= abs(dy)
if error < 0:
y += ystep
error += dx
return template
|
50902c408c4faa531224c058cece324a00aea2e2
| 59,311
|
def strings(n, k):
"""Number of distinct unordered samples (with replacement) of k items from a set of n items."""
return pow(n, k)
|
f908384278304b4d8d87cc6727d8827e5e28ad7f
| 59,313
|
def enumerations(item_lists: list, recursive=False) -> list:
"""
[
['a', 'b'],
['X', 'Y'],
[1, 2, 3],
]
=>
[
['a', 'X', 1],
['a', 'X', 2],
['a', 'X', 3],
['a', 'Y', 1],
['a', 'Y', 2],
['a', 'Y', 3],
['b', 'X', 1],
['b', 'X', 2],
['b', 'X', 3],
['b', 'Y', 1],
['b', 'Y', 2],
['b', 'Y', 3]
]
"""
def _enumerations(output: list, item_lists: list, item_list: list, index: int):
if index == len(item_list):
output.append(item_list.copy())
else:
for item in item_lists[index]:
item_list[index] = item
_enumerations(output, item_lists, item_list, index + 1)
if not item_lists:
return list()
output = list()
if recursive:
_enumerations(output, item_lists, [None] * len(item_lists), 0)
else:
stack, item_lists_size = list(), len(item_lists)
for item in item_lists[0][::-1]:
stack.append([item])
while len(stack) > 0:
template = stack.pop()
template_size = len(template)
if template_size == item_lists_size:
output.append(template)
else:
for item in item_lists[template_size][::-1]:
stack.append(template + [item])
return output
|
2b7fea3e395562c0a6b8b0c3cac6b2672123d0ee
| 59,315
|
def calculate_z_serial_purepython(maxiter, zs, cs):
"""
Calculate output list using Julia update rule.
Args:
- maxiter: max number of iterations before breaking. This is to prevent iterating to
infinity, which is possible with the julia set.
- zs: Complex coordinate grid --> real = [-1.8,...,1.8], imaginary = [-1.8j, 1.8j)
- cs: list of constants
This is a CPU bound calculation function. This specifically is a serial implementation.
We have operations being satisfied one at a time, each one waiting for the previous
operation to complete.
For each complex coordinate in list zs, while the condition abs(z) < 0 holds perform update
rule z = z^2 + c and count the number of times this occurs before the condition breaks.
"""
output = [0] * len(zs)
for i in range(len(zs)):
n = 0
z = zs[i]
c = cs[i]
while abs(z) < 2 and n < maxiter:
z = z*z + c
n += 1
output[i] = n
return output
|
02279fdf924882501936867822635a4f19da1d6f
| 59,321
|
def str_insert(string, index, content):
"""Insert a substring into an existing string at a certain index."""
return "%s%s%s" % (string[:index], content, string[index:])
|
0bb5c6e76858bb7a2bcd8aa6aa1f726269a61605
| 59,325
|
def build_run_cmd(raw_cmd, start_date=None, end_date=None, database=None):
"""Replace placeholder inputs in the model command with given values.
Parameters
----------
raw_cmd : str
Raw command, whichs hould contain placeholders <start_date>, <end_date> and
<database>.
start_date : str or datetime.datetimie , optional
Dataset start date to pass to command (metrics script should use this to modify
database queries to return data restricted to the given date range), by default
None
end_date : str or datetime.datetime , optional
Dataset end date to pass to command (metrics script should use this to modify
database queries to return data restricted to the given date range), by default
None
database : str, optional
Name of the database to pass to command (metrics script should use this to
modify the database it connects to), by default None
Returns
-------
str
Command to run with at least one of the <start_date>, <end_date> and <database>
placeholders, to be replaced by the input values.
Raises
------
ValueError
If raw_cmd does not contain at least one of the <start_date>, <end_date> and
<database> placeholders.
"""
placeholders = {
"<start_date>": start_date,
"<end_date>": end_date,
"<database>": database,
}
no_placeholders_found = True
for key, value in placeholders.items():
if key in raw_cmd and value is None:
raise ValueError(f"No value given for {key}")
else:
no_placeholders_found = False
raw_cmd = raw_cmd.replace(key, str(value))
if no_placeholders_found:
raise ValueError(
"Command doesn't include any of the possible placeholders: "
f"{list(placeholders.keys())}"
)
return raw_cmd
|
009c837ea9b355b3ec135577c6aff5593ffaa879
| 59,327
|
def find_in_IL(il, addr):
""" Finds everything at the given address within the IL function passed in """
out = []
for block in il:
for instruction in block:
if instruction.address == addr:
out.append(instruction)
return out
|
3251e03f0dc045bd54e2d922805fe58f17f46663
| 59,328
|
def scalb(x, i):
"""
This is like ``>>``/``<<``, but without precision loss i.e. it changes the fixed-point format.
In general 'i' can only be constant, changing the fixed-point format dynamically makes little sense in FPGA.
>>> a = Sfix(0.5, 0, -17)
>>> a
0.5 [0:-17]
>>> scalb(a, 8)
128.0 [8:-9]
>>> scalb(a, -8)
0.001953125 [-8:-25]
"""
return x.scalb(i)
|
060522171ce6182cb06de04df023142a864dd3ee
| 59,336
|
def applyFunc(func, num, input):
"""Recursively applies a given function 'num' times.
Assumes func takes input as its sole argument, and returns something equivalent."""
cur = input
for i in range(num):
cur = func(cur)
return cur
|
e2c26b3a13b6c352f884f82209e065aa6a3edc95
| 59,339
|
import inspect
def find_caller(level):
"""Return a string with the caller of the function that called
find_caller, and the line number of the call. Intended for use
with exception calls.
Inputs: level - integer - if 0, the caller of find_caller,
if 1, the caller above that
"""
stack_tup = inspect.stack()[level + 1][1:3]
return '{:s}:{:d}'.format(stack_tup[0], stack_tup[1])
|
a02464b6c289773e2ba6b448fe473a3d98b6623e
| 59,340
|
def _append_params(oauth_params, params):
"""Append OAuth params to an existing set of parameters.
Both params and oauth_params is must be lists of 2-tuples.
Per `section 3.5.2`_ and `3.5.3`_ of the spec.
.. _`section 3.5.2`: https://tools.ietf.org/html/rfc5849#section-3.5.2
.. _`3.5.3`: https://tools.ietf.org/html/rfc5849#section-3.5.3
"""
merged = list(params)
merged.extend(oauth_params)
# The request URI / entity-body MAY include other request-specific
# parameters, in which case, the protocol parameters SHOULD be appended
# following the request-specific parameters, properly separated by an "&"
# character (ASCII code 38)
merged.sort(key=lambda i: i[0].startswith('oauth_'))
return merged
|
6c87fd567376d78fb8b13e3ed6022f86f04a20ba
| 59,342
|
import torch
import warnings
def whiten(obs, check_finite=True):
"""
Normalize a group of observations on a per feature basis.
Before running k-means, it is beneficial to rescale each feature
dimension of the observation set with whitening. Each feature is
divided by its standard deviation across all observations to give
it unit variance.
Parameters
----------
obs : ndarray N x M x D
Each row of the array is an observation. The
columns are the features seen during each observation.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
result : ndarray
Contains the values in `obs` scaled by the standard deviation of each column.
"""
std_dev = torch.std(obs, dim=1, keepdim=True) # (N, 1, D)
zero_std_mask = std_dev == 0
if zero_std_mask.any():
std_dev[zero_std_mask] = 1.0
warnings.warn("Some columns have standard deviation zero. "
"The values of these columns will not change.",
RuntimeWarning)
return obs / std_dev
|
31842b7a2446da1eabb6a4bc6acc7625bb466f95
| 59,345
|
from operator import and_
def _is_in_directory(table, user_id, db_dirname):
"""
Return a WHERE clause that matches entries in a directory.
Parameterized on table because this clause is re-used between files and
directories.
"""
return and_(
table.c.parent_name == db_dirname,
table.c.user_id == user_id,
)
|
e4f4ce7afb495259ac0a5ebf085cf0149b9a85a9
| 59,347
|
def data_filter(df, min_price, max_price, sqrt_ft, num_bedroom, city_name):
"""Function to filter the given dataframe as per selection inputs
Parameters
----------
df: panda.DataFrame
A cleaned dataframe
min_price: int
Minimum price
max_price: int
Maximum price
sqrt_ft: int
Minimum square feet
num_bedroom: int
Number of bedroom
city_name: string
A city
Returns
-------
A panda.DataFrame
The filtered dataframe based on user selection criteria
Examples
-------
>>> data_filter(cleaned_df, 2000, 3000, 900, 2, "Vancouver")
"""
# first check input type
if not isinstance(min_price, (int, float)):
raise TypeError("The minimum price entered is not a number")
elif not isinstance(max_price, (int, float)):
raise TypeError("The maximum number entered is not a number.")
elif not isinstance(sqrt_ft, int):
raise TypeError("The square feet entered is not an integer.")
elif not isinstance(num_bedroom, int):
raise TypeError("The number of bedroom entered is not an integer")
elif not isinstance(city_name, str):
raise TypeError("The city entered is not a string.")
# then check input range
elif not 0 <= min_price <= max_price:
raise ValueError("Please enter appropriate positive price range.")
elif num_bedroom < 0:
raise ValueError("Please enter non-negative bedroom number.")
# function body
filtered_df = df.query(
"(@min_price <= price <= @max_price)\
and (num_bedroom.isnull() or num_bedroom >= @num_bedroom)\
and (area_sqft.isnull() or area_sqft >= @sqrt_ft)\
and (city.isnull() or city.str.casefold() == @city_name.casefold())"
)
return filtered_df
|
7b6f3390c88c19f901a7188d6caafffe41b759b1
| 59,348
|
def binaryToCounts(number, length):
"""
Convert the last elements of a binary list to the corresponding photon
decimal number and remove these elements from the list. Both the number
and the new list are returned.
========== ===============================================================
Input Meaning
---------- ---------------------------------------------------------------
number List of 0's and 1's.
length Integer number representing the number of bits that make up the
last photon count number in the list.
========== ===============================================================
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
counts Decimal representation of the binary string of length 'length',
formed by the last elements in the list.
number New binary list with the last 'length' elements removed.
========== ===============================================================
=======
Example
=======
binaryToCounts(['1', '0', '1', '1'], 3) will return (3, ['1']).
3: the decimal representation of the last elements in the list (0,1,1)
['1']: list with the remaining elements of the original list
"""
counts = int(''.join(number[-length:]), 2)
del number[-length:]
return counts, number
|
420d5a5b4a1dc46cf72934d278f56d60346e0ba9
| 59,354
|
def mark_exact(citation):
"""Highlight exact matches"""
return '<mark class="exact-match">%s</mark>' % citation
|
58494550bb925d5770969dd25e849e24ce5392d4
| 59,360
|
def f2f(value):
"""Converts a fortran-formatted double precision number (e.g.,
2.323d2) to a python float. value should be a string.
"""
value = value.replace('d', 'e')
value = value.replace('D', 'e')
return float(value)
|
9fb2016281824e6a313b04afcca4c585fc117d4e
| 59,361
|
def get_audit_ccs(assessment):
"""Returns audit CCs regarding assessment.
Args:
assessment: An instance of Assessment model.
Returns:
List of audit ccs
"""
audit_issuetracker_issue = assessment.audit.issuetracker_issue
if audit_issuetracker_issue is not None and audit_issuetracker_issue.cc_list:
audit_ccs = audit_issuetracker_issue.cc_list.split(",")
else:
audit_ccs = []
return audit_ccs
|
0630f9c69acf0b2f61c362b2bed1dc35eb2bd8f2
| 59,374
|
import re
def add_select(query: str) -> str:
"""If the query doesn't start with SELECT or WITH, add it."""
# Note: doesn't work if there are comments in the beginning of the query
if re.match(r"\s*(SELECT|WITH)\b", query, re.I):
return query
else:
return "SELECT " + query
|
938ef98f121cf331ea846adf795dd11c912eb951
| 59,376
|
def sym(v):
"""
Returns that linear combination of basis vector symbols which corresponds
to vector v, itself a linear combination of basis vectors.
"""
# Obtain the coefficients in basis vector expansion of `v`.
# Then construct and return corresponding basis vector symbol expansion.
coefs = v.blade_coefs(v.Ga.mv())
return sum(coefs[j]*v.Ga.basis[j] for j in range(v.Ga.n))
|
51cffcadddb302051143c55bed02a6134e19aa4a
| 59,378
|
def flattenjson(obj, delim="__"):
"""
Flattens a JSON object
Arguments:
obj -- dict or list, the object to be flattened
delim -- string, delimiter for sub-fields
Returns:
The flattened JSON object
"""
val = {}
for i in obj:
if isinstance(obj[i], dict):
ret = flattenjson(obj[i], delim)
for j in ret:
val[i + delim + j] = ret[j]
else:
val[i] = obj[i]
return val
|
470719e9570e20ee1ddc93e4d684f04981825f21
| 59,382
|
def add_ago_to_since(since: str) -> str:
"""
Backwards compatibility hack. Without this slices with since: 7 days will
be treated as 7 days in the future.
:param str since:
:returns: Since with ago added if necessary
:rtype: str
"""
since_words = since.split(" ")
grains = ["days", "years", "hours", "day", "year", "weeks"]
if len(since_words) == 2 and since_words[1] in grains:
since += " ago"
return since
|
48810f2cc4be3d444b5efa83d898d68abe543b1f
| 59,385
|
import json
def read_frequencies(frequencies_file):
"""Returns a dictionary of frequencies and their parameters indexed by strain
name from a given auspice tip frequencies file.
"""
with open(frequencies_file) as fh:
frequencies_json = json.load(fh)
parameters = {}
frequencies = {}
for key, values in frequencies_json.items():
if "frequencies" in values:
frequencies[key] = values["frequencies"]
else:
parameters[key] = values
return frequencies, parameters
|
60979cd669cbb0ea1588f5b4c16c3f21827c5176
| 59,389
|
def create_vcf_path(interval_path):
"""Create a vcf path based on the interval name."""
return f"cohort_variants/{interval_path.stem}.vcf"
|
60deb6945f498e8e9abd3e13f8c37a4469a06f1e
| 59,397
|
import requests
def request(url, to_json=False):
"""
Sends a request to an url and
makes sure it worked
"""
response = requests.get(url)
if not response.ok:
raise ValueError(
f"Failed to get a good response when retrieving from {url}. Response: {response.status_code}"
)
if not to_json:
return response.content.decode("utf-8")
else:
return response.json()
|
4e672095f7257c1deb86c60697cf97c00daef8fd
| 59,401
|
def calc_total_stocking_density(herbivore_list):
"""Calculate the total stocking density of herbivores, including multiple
classes."""
stocking_density = 0
for herb_class in herbivore_list:
stocking_density += herb_class.stocking_density
return stocking_density
|
3a6c24f55451d2346d4d4d50fe88884b9fba7c3b
| 59,405
|
def add(lhs: list[int], rhs: list[int], base: int) -> list[int]:
"""
Adds two arbitrary-precision values in some base together.
Given two arrays lhs and rhs of digits in some base 'base,' returns an
array of the number lhs + rhs encoded in base 'base.'
"""
# Pad the two inputs to be the same length.
length = max(len(lhs), len(rhs))
lhs = [0] * (length - len(lhs)) + lhs
rhs = [0] * (length - len(rhs)) + rhs
# Track the carry from the previous column; initially this is zero.
carry = 0
# Track the result. We'll build the array up in reverse to avoid costly
# prepend operations that aren't relevant.
result = []
# Iterate across the digits in reverse, computing the sum.
for i in range(1, len(lhs) + 1):
# Sum the carry and the two values in this column
column = lhs[-i] + rhs[-i] + carry
# Output the column value (after modding by the base)
result.append(column % base)
# Update the carry
carry = column // base
# Prepend the carry to the result if it's nonzero.
if carry != 0:
result.append(carry)
# Reverse the order of the resulting digits; that's the more proper way
# to hand them back.
result.reverse()
return result
|
053b66310ef693f1c40d6da0f83a0dacb9a282c3
| 59,407
|
def get_connectivity_table(molecule, inverse_map):
"""
Generate connectivity table for molecule using map indices from atom map
Parameters
----------
molecule: oechem.OEMol
inverse_map: dict {atom_idx:atom_map}
Returns
-------
connectivity_table: list of lists
[[atom1, atom2, bond_order]...]
"""
connectivity_table = [[inverse_map[bond.GetBgnIdx()]-1, inverse_map[bond.GetEndIdx()]-1, bond.GetOrder()] for bond
in molecule.GetBonds()]
return connectivity_table
|
77df5cd24bd057c622ffd812379e5b078d5c886a
| 59,408
|
def identity(value):
"""
Identity function.
The function is also used as a sentinel value by the
compiler for it to detect a no-op
"""
return value
|
c85bd2603a963e032555171eca66373b3a043963
| 59,409
|
import time
def get_timestamp(with_milliseconds=True):
"""Get current timestamp.
Returns:
Str of current time in seconds since the Epoch.
Examples:
>>> get_timestamp()
'1639108065.941239'
>>> get_timestamp(with_milliseconds=False)
'1639108065'
"""
t = str(time.time())
if not with_milliseconds:
t = t.split(".")[0]
return t
|
1e9fbba08244cd8f9df00b94cd69ba10bf7e5e8b
| 59,410
|
import math
def vectorLength(vector):
"""Return the length of the given vector."""
x, y = vector
return math.sqrt(x**2 + y**2)
|
d79cbfdb0515e5aefd85cb07cfd94bcd02c3c310
| 59,413
|
def stitch(s):
"""Combine the mutated sequence with barcode."""
return s['seq'] + s['tag']
|
e5ac1dfc3bda4e584e5aef65bd843bee980bfbd6
| 59,417
|
def parse_command(logfile):
"""
parse commands from the log file and returns the commands as a list
of command line lists, each corresponding to a step run.
"""
command_list = []
command = []
in_command = False
with open(logfile, 'r') as f:
for line in f:
line = line.strip('\n')
if line.startswith('[job') and line.endswith('docker \\'):
in_command = True
if in_command:
command.append(line.strip('\\'))
if not line.endswith('\\'):
in_command = False
command_list.append(command)
command = []
return(command_list)
|
92105b3b1076e68961e09565fd80ecf2169a6749
| 59,418
|
def xorCrypt(str, key=6):
"""Encrypt or decrypt a string with the given XOR key."""
output = ""
for x in range(0, len(str)):
output += chr(key ^ ord(str[x]))
return output
|
2e1b6775e7c27c451c7ced9008ec6f8dbcd0cf0c
| 59,421
|
import requests
def get_top_pairs(fsym='BTC', tsym='USD', limit=5, optional_params = {}):
""" get the top [limit] pairs (from fsym to tsym)
Args:
Requried:
fsym (str): From symbol
tsym (str): To symbol
Optional:
limit (int): number of pairs to return: default (5)
sign (bool): if true, server will sign request
Return:
top pairs (str): get top pairs by volume for a currency (using aggregate data).
"""
url = "https://min-api.cryptocompare.com/data/top/pairs"
params = {'fsym': fsym, 'tsym': tsym, 'limit': limit}
for k,v in optional_params.items():
params[k] = v
r = requests.get(url=url, params=params)
return r.text
|
637ac2e82b3a627345d4cce724a1eb49980f2451
| 59,429
|
def VtuFilename(project, id, ext):
"""
Create a vtu filename from a project, ID and file extension
"""
return project + "_" + str(id) + ext
|
5ac516771cfe2f875ed2c891ee752ba44fdd58d9
| 59,430
|
def pause_group(torrent_client, params):
"""
Pause several torrents
params['info_hashes']: list of str - the list of info-hashes in lowercase
:return: 'OK'
"""
for info_hash in params['info_hashes']:
torrent_client.pause_torrent(info_hash)
return 'OK'
|
6041dcbddb7ec527f7c0f9fc97c82f3f5815e3af
| 59,432
|
def tidyId(nodeId):
"""
yEd adds a prefix of 'n' to node IDs.
This function removes this prefix.
"""
if nodeId.startswith('n'):
nodeId = nodeId[1:]
return nodeId
|
e1bb57b5c4453fab69ea2524330d60294de67c0e
| 59,433
|
def get_resolution(image, bbox_coords):
""" Get image resolution
"""
height, width = image.shape[:2]
return (bbox_coords[2] - bbox_coords[0]) / width, (bbox_coords[3] - bbox_coords[1]) / height
|
d8d3820a28b6cfd3f11b58bab645045c282efdd1
| 59,439
|
def parse_protocol_from_path(spec_path, protocol_info):
"""
Parses a specific CA imaging protocol information from a given path.
:param spec_path: path containing the protocol information. e.g. "/egl3/urx/ramp210421/"
:param protocol_info: information to be retrieved. Valid entries are
"strain", "neuron", "prot_type", "o2conc".
:return: string with the requested information or an empty string if the information
could not be parsed.
"""
prot_items = spec_path.strip("/").split("/")
ret_val = ""
if protocol_info == "strain":
ret_val = prot_items[0]
elif protocol_info == "neuron":
ret_val = prot_items[1].upper()
elif protocol_info == "prot_type":
if "ramp" in prot_items[2].lower():
ret_val = "ramp"
elif "shift" in prot_items[2].lower():
ret_val = "shift"
elif protocol_info == "o2conc":
ret_val = prot_items[2].lower().replace("shift", "").replace("ramp", "")
return ret_val
|
52c570222dac4df490941d0bde3eb27e85a85d49
| 59,441
|
def is_perm_palindrome(s):
"""
Returns a boolean which is True if the string
is a permutation of a palindrome
>>> is_perm_palindrome('Tact Coa')
True`
Taco Cat
>>> is_perm_palindrome('ab')
False
"""
matches = {}
for c in s:
if c != ' ':
l = c.lower()
if l in matches:
matches[l] = not matches[l]
else:
matches[l] = False
unmatched = 0
for match in matches.values():
if not match:
unmatched += 1
return unmatched <= 1
|
ca052e2a7491fef067379efe3eaa5c8a5df73477
| 59,445
|
import json
def read_json(fname):
"""Read JSON file.
:param fname: str, path to file
:return: dict, json
"""
with open(fname, encoding='utf8') as f:
result = json.load(f)
return result
|
a65c9580c7f3cd46b93888f3d71851602e85635b
| 59,447
|
def get_dict_without_keys(dct, *keys):
"""
Returns a copy of a dictionary without specific keys.
Parameters
----------
dct: dict
A python dictionary.
keys: int or str
The keys of the dictionary to negate.
Returns
-------
new_dict: dict
A new dictionary without *keys.
"""
# https://stackoverflow.com/a/41422467
new_dict = dict(filter(lambda key_value: key_value[0] not in keys, dct.items()))
return new_dict
|
6eeed4459df6b84c2106611768b14de76e88dc0f
| 59,453
|
from typing import Sequence
def bow_tag_tokens(tokens: Sequence[str], bow_tag: str = "<w>"):
"""Applies a beginning of word (BoW) marker to every token in the tokens sequence."""
return [bow_tag + t for t in tokens]
|
f75bdc28e5dd79b43429b16dcc649bf53ac3ae03
| 59,455
|
import re
def safe_name(name):
"""
Converts IAM user names to UNIX user names
1) Illegal IAM username characters are removed
2) +/=/,/@ are converted to plus/equals/comma/at
"""
# IAM users only allow [\w+=,.@-]
name = re.sub(r'[^\w+=,.@-]+', '', name)
name = re.sub(r'[+]', 'plus', name)
name = re.sub(r'[=]', 'equals', name)
name = re.sub(r'[,]', 'comma', name)
name = re.sub(r'[@]', 'at', name)
return name
|
077cb576f3c8cb6b4da905ba9c6f6410fbf691fe
| 59,456
|
def format_input(string: str) -> str:
"""Format input to be used."""
return string.replace("r/", "").lower()
|
3812427cd29e82442e19613e6de4c2a4bf870da1
| 59,457
|
def generate_shifts(key):
"""Generate the vigenere shifts from the key."""
return list(map(lambda x: ord('z') - ord(x) + 1, key))
|
4b58feefcc1c232dc502a69bc35ce61bfceaabb8
| 59,458
|
def erase_boundary(labels, pixels, bg_id):
"""
Erase anything on the boundary by a specified number of pixels
Args:
labels: python nd array
pixels: number of pixel width to erase
bg_id: id number of background class
Returns:
labels: editted label maps
"""
x,y,z = labels.shape
labels[:pixels,:,:] = bg_id
labels[-pixels:,:,:] = bg_id
labels[:,:pixels,:] = bg_id
labels[:,-pixels:,:] = bg_id
labels[:,:,:pixels] = bg_id
labels[:,:,-pixels:] = bg_id
return labels
|
c08736514032b467d82680c10d191f5d80960f5a
| 59,462
|
def revert_model_name(name):
"""Translating display model name to model name"""
if name == 'service':
return 'clusterobject'
elif name == 'component':
return 'servicecomponent'
elif name == 'provider':
return 'hostprovider'
else:
return name
|
1eb8f69767adef8d3a02760f737877a4e5a6b041
| 59,466
|
def get_people_in_meeting(meeting_txt):
"""
takes a meetbot summary file that has a section with the following format
and returns a dict with username<->lines said mapping
People present (lines said)
---------------------------
* username (117)
* username2 (50)
"""
meeting_people = []
in_people = False
txt_file = open(meeting_txt)
for line in txt_file:
if line == "People present (lines said)\n":
in_people = True
elif not in_people:
next
elif in_people and '*' not in line:
next
elif in_people and 'openstack' not in line:
ircnic, linessaid = line[2:-2].split('(')
ircnic = ircnic.strip(" _").lower()
meeting_people.append((ircnic, linessaid))
txt_file.close()
return meeting_people
|
049ac20d12ce368f5993fc9f6530eea2ca3b2280
| 59,470
|
import torch
def viterbi_decode(tag_sequence, transition):
"""
Perform Viterbi decoding in log space over a sequence given a transition matrix
specifying pairwise (transition) potentials between tags and a matrix of shape
(sequence_length, num_tags) specifying unary potentials for possible tags per
timestep.
Parameters
==========
tag_sequence: torch.Tensor, required.
A tensor of shape (sequence_length, num_tags) representing scores for
a set of tags over a given sequence.
trans: torch.Tensor, required.
A tensor of shape (num_tags, num_tags) representing the binary potentials
for transitioning between a given pair of tags.
Returns
=======
viterbi_path: The tag indices of the maximum likelihood tag sequence
viterbi_score: float, The score of the viterbi path
"""
seq_len, vocab = tag_sequence.size()
path_scores = []
path_indices = []
path_scores.append(tag_sequence[0, :])
# Evaluate the scores for all possible paths.
for t in range(1, seq_len):
# Add pairwise potentials to current scores.
summed_potentials = path_scores[t - 1].unsqueeze(-1) + transition
scores, paths = torch.max(summed_potentials, 0)
path_scores.append(tag_sequence[t, :] + scores.squeeze())
path_indices.append(paths.squeeze())
# Construct the most likely sequence backwards.
viterbi_score, best_path = torch.max(path_scores[-1].cpu(), 0)
viterbi_path = [int(best_path.numpy())]
for backward_t in reversed(path_indices):
viterbi_path.append(int(backward_t[viterbi_path[-1]]))
# Reverse the backward path.
viterbi_path.reverse()
return viterbi_path, viterbi_score
|
f45c98a78c80cbcc0f0335d69733d7cf19241688
| 59,473
|
def _whitespace_trimmed(string: str) -> str:
"""Strips any leading and trailing whitespace off from the string"""
return string.lstrip().rstrip()
|
c0a8deeb2fa9d90bbda68d12919fffdb7dd7d821
| 59,475
|
from typing import List
def from_bits(bits: List[int]) -> int:
"""
Convert a set of bits, least significant bit first to an integer.
:param bits: List of bits, least significant bit first.
:return: Integer representation of the bits.
"""
base = 1
integer = 0
for bit in bits:
if bit == 1:
integer += base
base *= 2
return integer
|
b3c9e1a895c4cbb0c6e7d4166293c92d9c76aaca
| 59,476
|
import math
def slide_split(num: int, num_val: int, num_test: int) -> list:
""" A slide dataset split scheme. The slide reference is the test set, so that the test set among all reslut splits cover the whole dataset (execept for the first a few samples)
Args:
num (int): Total number of samples in the dataset
num_val (int): Number of samples for validation dataset
num_test (int): Number of samples for test dataset
Returns:
list: The list of dataset split folds. Each fold (an entry in the list) is a 3-tuple whose elements are train_index, val_index, test_index in order
"""
assert num_val + num_test < num, "Sum of num_val and num_test should be less than num."
index_all = list(range(num))
index_splits = list()
num_folds = math.floor((num - num_val) / num_test)
for fold_idx in range(num_folds):
# ... left ... val_index ... center ... test_index ... right
left = num - (fold_idx + 1) * num_test - num_val
center = num - (fold_idx + 1) * num_test
right = num - fold_idx * num_test
val_index = index_all[left:center]
test_index = index_all[center:right]
train_index = list(set(index_all) - set(val_index) - set(test_index))
index_splits.append((train_index, val_index, test_index))
print(index_splits[-1])
return index_splits
|
1ce97b0de6cb26b720d5f35c05d5c940cc77e9ee
| 59,478
|
def read_id_from_file(path):
"""Reading the id in first line in a file
"""
with open(path) as id_file:
return id_file.readline().strip()
|
19058ec4367bdba683e6e5bff109714c4e81be5f
| 59,479
|
def find_next_path_down(current_path, path_to_reduce, separator):
"""
Manipulate ``path_to_reduce`` so that it only contains one more level of
detail than ``current_path``.
:param current_path: The path used to determine the current level
:type current_path: :class:`basestring`
:param path_to_reduce: The path to find the next level down
:type path_to_reduce: :class:`basestring`
:param separator: The string used to separate the parts of path
:type separator: :class:`basestring`
:return: The path one level deeper than that of ``current_path``
:rtype: :class:`unicode`
"""
# Determine the current and next levels:
current_level = current_path.count(separator)
next_level = current_level + 1
# Reduce the path to reduce down to just one more level deep than the
# current path depth:
return u'%s%s' % (
separator.join(
path_to_reduce.split(separator, next_level)[:next_level]
), separator
)
|
aa4e27996167b600e3608303529c295286e7f2a0
| 59,480
|
def get_name(header_string):
"""Return the first part of an SDRF header in lower case and without spaces."""
no_spaces = header_string.replace(' ', '')
field_name = no_spaces.split('[')[0]
return field_name.lower()
|
42cccdc0908c5ac6bef2c23816640e4921930c8a
| 59,483
|
from typing import List
def parse_instruction(line: str) -> List[str]:
"""Get a list with directions for input line"""
allowed_directions = ["e", "se", "sw", "w", "nw", "ne"]
directions = list()
partial_direction = str()
for char in line:
if (partial_direction + char) in allowed_directions:
directions.append(partial_direction + char)
partial_direction = str()
elif char in allowed_directions:
directions.append(char)
else:
partial_direction += char
return directions
|
f25c471c6338c3286bec9bd8e067fc2937aa33b0
| 59,487
|
import itertools
def pcycle(l):
"""
Infinitely cycle the input sequence
:param l: input pipe generator
:return: infinite datastream
"""
return itertools.cycle(l)
|
0b6d8061762e90418f6b7b3f8d20440d1305e717
| 59,489
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.