content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def normalize_accents(text: str) -> str:
"""
Remove accents
:param text: text with undesired accents
:return: clean text
>>> normalize_accents('suspensám')
'suspensam'
>>> normalize_accents('quăm')
'quam'
>>> normalize_accents('aegérrume')
'aegerrume'
>>> normalize_accents('ĭndignu')
'indignu'
>>> normalize_accents('îs')
'is'
>>> normalize_accents('óccidentem')
'occidentem'
>>> normalize_accents('frúges')
'fruges'
"""
text = text.replace(r"á", "a") # suspensám
text = text.replace(r"Á", "A")
text = text.replace(r"á", "a") # Note: this accent is different than the one above!
text = text.replace(r"Á", "A")
text = text.replace(r"ă", "a") # 'quăm'
text = text.replace(r"Ă", "A")
text = text.replace(r"à", "a")
text = text.replace(r"À", "A")
text = text.replace(r"â", "a")
text = text.replace(r"Â", "A")
text = text.replace(r"ä", "a")
text = text.replace(r"Ä", "A")
text = text.replace(r"é", "e") # aegérrume
text = text.replace(r"è", "e")
text = text.replace(r"È", "E")
text = text.replace(r"é", "e")
text = text.replace(r"É", "E")
text = text.replace(r"ê", "e")
text = text.replace(r"Ê", "E")
text = text.replace(r"ë", "e")
text = text.replace(r"Ë", "E")
text = text.replace(r"ĭ", "i") # ĭndignu
text = text.replace(r"î", "i") # 'îs'
text = text.replace(r"í", "i")
text = text.replace(r"í", "i")
text = text.replace(r"î", "i")
text = text.replace(r"Î", "I")
text = text.replace(r"ï", "i")
text = text.replace(r"Ï", "I")
text = text.replace(r"ó", "o") # óccidentem
text = text.replace(r"ô", "o")
text = text.replace(r"Ô", "O")
text = text.replace(r"ö", "o")
text = text.replace(r"Ö", "O")
text = text.replace(r"û", "u")
text = text.replace(r"Û", "U")
text = text.replace(r"ù", "u")
text = text.replace(r"Ù", "U")
text = text.replace(r"ü", "u")
text = text.replace(r"Ü", "U")
text = text.replace(r"ú", "u") # frúges
text = text.replace(r"ÿ", "y")
text = text.replace(r"Ÿ", "Y")
text = text.replace(r"ç", "c")
text = text.replace(r"Ç", "C")
text = text.replace(r"ë", "e")
text = text.replace(r"Ë", "E")
text = text.replace(r"Ȳ", "Y")
text = text.replace(r"ȳ", "y")
return text | 7954256350d1c67f6a107d2057b1ca37ebe253cf | 99,612 |
from typing import Optional
def convert_str_or_none(val: Optional[str]) -> Optional[str]:
"""Convert to a str or None."""
return str(val) if val is not None else val | abe8e7b6ad0f4eca03c3104b021cce934f5f8a0a | 99,613 |
def dict2str(dictionary, **kwargs):
"""Converts a dict to a string expression.
Args:
dictionary (dict): A dictionary to convert to a string.
Keyword Args:
inverse_dict(boolean): Apply inverse order of string (default: ``False``).
Returns:
str: The dictionary as flattened text.
Example:
``>>> dict2str({e: 1, f: 2, ...})``
``Out[]: "{e: 1, f: 2, ...}"``
"""
if kwargs.get("inverse_dict"):
inverse_dict = kwargs.get("inverse_dict")
else:
inverse_dict = False # optional keyword arg: if true: dictionary keys and values will be inversed
dict_str = "{"
cc = 1
for k, v in dictionary.items():
skey = "\'%s\'" % k if type(k) == str else str(k)
sval = "\'%s\'" % v if type(v) == str else str(v)
if not inverse_dict:
dict_str += "{0}: {1}".format(skey, sval)
else:
dict_str += "{1}: {0}".format(skey, sval)
if not (cc == dictionary.__len__()):
dict_str += ", "
else:
dict_str += "}"
cc += 1
return dict_str | 34aaee742efb8187084ba3a86879cff7dcbaa84c | 99,614 |
import torch
def decode(loc, priors, variances):
"""
:param loc: location predictions for loc layers, shape: [num_priors, 2]
:param priors: center from, shape: [num_priors, 2]
:param variances: list of variances
:return: decoded segments, center form, shape: [num_priors, 2]
"""
segments = torch.cat([
priors[:, :1] + loc[:, :1] * priors[:, 1:] * variances[0],
priors[:, 1:] * torch.exp(loc[:, 1:] * variances[1])], dim=1)
return segments | f739f27c079a7882ebd57cbd1dab51faa0bd9e69 | 99,615 |
def linear_search(a, v):
"""Linear search algorithm. If v exist in a, returns index. Returns None if not found."""
for index, e in enumerate(a):
if e == v:
return index
return None | 30730e31e3003e65f3de878d3612d1dbb6be23c5 | 99,616 |
def clean_lines(lines):
"""removes blank lines and commented lines"""
lines2 = []
for line in lines:
line2 = line.split('#')[0].strip()
if line2:
lines2.append(line2)
return lines2 | 53a967efd6db94bdbeaa6257971653c38d1f545a | 99,623 |
import re
def get_exclamations(text):
""" Count exclamation marks attached to words or standalone"""
ex_count = re.findall("[a-z#]*[!]+", text.lower())
return len(ex_count) | 4f79268d1f868b447c6fac1fdf6985e132ca55af | 99,630 |
import torch
def apply_mask(hidden_states, masks):
"""Apply mask to masked tokens in a batch
Args:
hidden_states (torch.tensor): shape(batch_size, seq_len(PADDED), hidden_dim)
masks (torch.tensor): shape(batch_size, seq_len(PADDED)) where masked tokens=0, unmasked tokens=1
Returns:
masked_hidden_states (torch.tensor): shape(batch_size, seq_len(PADDED), hidden_dim)
"""
hidden_dim = hidden_states.shape[-1]
hidden_states.view(-1, hidden_dim)[~masks.view(-1).type(torch.ByteTensor), :] = 0
return hidden_states | cbd647a50cfcc12ae8640ea80ddeb3b19faa8e9f | 99,631 |
def autodec(fwhm):
"""
Automatically calculates the appropriate decimal place to track based on a full-width-at-half-maximum
:param fwhm: full-width-at-half-maximum
:return: decimal power
:rtype: int
"""
shift = fwhm
n = 0
while shift < 1.:
n += 1
shift = fwhm * 10 ** n
return n + 1 | fdcdf34df5f0c3a8fd6a21226be41bb585d3ff8f | 99,632 |
def alphabetical_value(name):
""" Returns the sum of the alphabetical values of the string passed. Each
letter is equal to it's position in the alphabet.
Example: COLIN is worth 3 + 15 + 12 + 9 + 14 = 53
"""
return sum([ord(x.lower()) - 96 for x in list(name)]) | 119dd623124a1a6e672db6032bd6b30dbf4156da | 99,636 |
def dummy_search_results(start=1, count=0, name='annotation'):
"""Generate some dummy search results."""
out = {'hits': {'total': 0, 'hits': []}}
for i in range(start, start + count):
out['hits']['total'] += 1
out['hits']['hits'].append({
'_id': 'id_{}'.format(i),
'_source': {'name': '{}_{}'.format(name, i)},
})
return out | 8c7bb8178e3e83db780d155cab03fbe10e3d407d | 99,637 |
def _is_required_build_variant(build_variant: str) -> bool:
"""
Determine if the given build variants is a required build variant.
:param build_variant: Name of build variant to check.
:return: True if the given build variant is required.
"""
return build_variant.endswith("-required") | cb12cf8ed2614fa9afde7473666d86540db78927 | 99,638 |
import time
def time_str(arg):
"""Convert arg to time with optional format.
Return arg, converted time if converted or None, otherwise
"""
current = ""
extra = "%Y", "-%m", "-%d", " %H", ":%M", ":%S", ".%f"
for part in extra:
current += part
try:
converted = time.strptime(arg, current)
return arg, converted
except ValueError:
pass
return None | ac923b20d27a80e19f3c39e8ffd72f205ac9b3c0 | 99,643 |
from datetime import datetime
def is_valid_article(date : datetime, state : str, date_start : datetime, date_end : datetime) -> bool:
"""
Determines if the metadata retrived from the article is valid.
Params
------
date: datetime.datetime
Published datetime of the article
state: str
detected state of the incident in the article
date_start: datetime.datetime
article search beginning timeframe
date_end: datetime.datetime
article search ending timeframe
Returns
-------
bool:
boolean value determining whether the article is valid or not
"""
return isinstance(state, str) and date >= date_start and date <= date_end | db6ba5a40ca453ecda0e5872dd9b7c8318db452d | 99,646 |
def digest_line(line):
"""
Returns number of players and last marble count.
"""
sline = line.split()
num_players, num_marble = sline[0], sline[6]
return num_players, num_marble | 599b310edbf5306c7a299bf6f4b3615b72daba6e | 99,647 |
import math
def myfloor(number: float) -> int:
"""We take as input a real number and give as output the greatest integer less than or equal to the number
>>> myfloor(2.4)
2
>>> myfloor(2)
2
>>> myfloor(2.0)
2
>>> myfloor(-3.1)
-4
"""
#if number >= 0:
# return int(number)
#else:
# if (abs(number) - int(abs(number)) > 0):
# return int(number) - 1
# else:
# return int(number)
return math.floor(number) | 6d585110852aa5fb9ca72c403d153d1bda7d2b56 | 99,651 |
def analytical_pulse_duration(q):
"""
Estimate analytical_pulse_duration from electron bunch charge
:param q: electron bunch charge [nC]
:return t: Duration of pulse [s]
"""
t = (q*1e3)/9.8
return t*1e-15 | 4e434562acb8ef04c8e93b0c455fe420e7d2a98d | 99,659 |
def filter_rows_with_index(r, cutoff):
"""Return index of items in the list above the cutoff"""
return [i for i, x in enumerate(r) if abs(x) > cutoff] | 1f6dc3124e403b4a34d73ad153bcf0e8979a324e | 99,663 |
def noop_decorator(param_name, params):
"""noop decorator
"""
def _wrapper(func):
return func
return _wrapper | d67d9de6552d9b63c44d119b0f8eb235faf7e244 | 99,666 |
def get_bbox(index, hdf5_data):
"""Retrieves the bounding box from hdf5 data for a specific index.
Args:
index (int): index of image.
hdf5_data (h5py file): h5py file containing bounding box information.
Returns:
dictionnary: label, left, top, width and height values for the bounding box.
"""
attrs = {}
item = hdf5_data['digitStruct']['bbox'][index].item()
for key in ['label', 'left', 'top', 'width', 'height']:
attr = hdf5_data[item][key]
values = [int(hdf5_data[attr.value[i].item()].value[0][0])
for i in range(len(attr))] if len(attr) > 1 else [int(attr.value[0][0])]
attrs[key] = values
return attrs | df95dcea8b20b35235f00230fce6d9038b1b23a1 | 99,669 |
from typing import Iterable
def _check_wires(wires):
"""Standard checks for the wires argument.
Args:
wires (int or list): (subset of) wires of a quantum node
Return:
tuple(list, int): tuple containing the list of wires and number of wires
Raises:
ValueError: if the wires argument is invalid
"""
if isinstance(wires, int):
wires = [wires]
msg = "Wires must be a positive integer or a " \
"list of positive integers; got {}.".format(wires)
if not isinstance(wires, Iterable):
raise ValueError(msg)
if not all([isinstance(w, int) for w in wires]):
raise ValueError(msg)
if not all([w >= 0 for w in wires]):
raise ValueError(msg)
return wires, len(wires) | d95a76a3d507de7e9858ee9a7638988400842bc9 | 99,673 |
def drop_prefix(prefix, text):
"""drop prefix from text if it exists.
Args:
prefix: the prefix to drop.
text: the full string.
"""
if text.startswith(prefix):
return text[len(prefix):]
return text | 46382bb20b5f1ca2adfcb1dabe625a1987835ae8 | 99,681 |
import itertools
def get_all_possible_words(k):
"""
Return all possible binary words of length k
"""
return list(itertools.product([0, 1], repeat=k)) | d1b04d8e56f78d0b41428637b83cc855d21192de | 99,683 |
import numbers
def strip_empties(d):
"""Returns a like data structure without empty values
All empty values including empty lists, empty strings and
empty dictionaries and their associated keys are removed at
any nesting level.
The boolean value False and numeric values of 0 are exceptions
to the default python `if value` check - both are retained.
"""
def has_value(value):
"""Need to retain 'False' booleans and numeric 0 values"""
if isinstance(value, (bool, numbers.Number)):
return True
return value
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (strip_empties(v) for v in d) if has_value(v)]
return {k: v for k, v in (
(k, strip_empties(v)) for k, v in d.items()) if has_value(v)} | 954ba43eefe2c4dcfee79631dcb8202a3308e406 | 99,685 |
from operator import mod
def degrees(theta):
"""Return a normalize angle theta to range [0,360) degrees."""
return mod(theta, 360) | ca75d03909878dfedd88bd3894aa6263aaa8efdb | 99,686 |
def _range_prod(lo, hi):
"""
Product of a range of numbers.
Returns the product of
lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi
= hi! / (lo-1)!
Breaks into smaller products first for speed:
_range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))
"""
if lo + 1 < hi:
mid = (hi + lo) // 2
return _range_prod(lo, mid) * _range_prod(mid + 1, hi)
if lo == hi:
return lo
return lo * hi | 138c7b166d8279b869f4df984e1fd3a601ae7eb4 | 99,687 |
def getLineParts(line):
"""
Split the UCI line into its whitespace-separated parts.
Returns a list of strings, with apostrophes removed.
"""
parts = line.split(" ")
# Nothing to work with in this case. It is probably an erroneous line,
# because they should usually have at least two parts.
if len(parts) <= 1:
return parts
groups = []
current = []
while len(parts) > 0:
word = parts.pop(0)
# Suppress empty words that are not enclosed in quotations.
if len(word) == 0:
continue
opening_quote = None
if word.startswith("'") or word.startswith('"'):
opening_quote = word[0]
word = word[1:]
closing_quote = None
if opening_quote is not None and word.endswith(opening_quote):
closing_quote = word[-1]
word = word[:-1]
group = [word]
# Simple case: line part is a single word.
if opening_quote is None or opening_quote == closing_quote:
groups.append(group)
continue
# Build up a multi-word part until we find the closing quote.
# Example: config ssid 'Free WiFi' # <- Space inside quotation marks.
while len(parts) > 0:
word = parts.pop(0)
if word.endswith(opening_quote):
closing_quote = word[-1]
word = word[:-1]
group.append(word)
if opening_quote == closing_quote:
break
groups.append(group)
return [" ".join(g) for g in groups] | 5251a3619cab8a2de498591e729f3d6e63cb97f9 | 99,691 |
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-i', '--input', type=str, required=True,
help='path to the input text files')
parser.add_argument('-o', '--output', required=True,
help='output folder to save text files')
parser.add_argument('-a', '--audio', required=True,
help='audio folder to save wavs files')
parser.add_argument('-m', '--mels', required=True,
help='mels folder to save pt files')
return parser | 8f8b75ab8fa700f1a9c2f591f2d5ac759544561e | 99,693 |
def get_rssi_text(rssi):
""" Based on RSSI provide a Quality name """
if rssi >= -50:
rssi_text = "Excellent"
elif rssi >= -60:
rssi_text = "Very Good"
elif rssi >= -70:
rssi_text = "Good"
elif rssi >= -80:
rssi_text = "Low"
elif rssi >= -90:
rssi_text = "Very Low"
elif rssi >= -100:
rssi_text = "Poor"
else:
rssi_text = "Bad"
return rssi_text | cca720c149c51efe57a6dd7b8aa09eecd1421234 | 99,694 |
def seconds_to_timecode(seconds):
"""
Converts seconds into a conditioned and simplified timecode string
:param seconds: float, seconds
:return: string, timecode in 'HH:MM:SS.SSS' format
"""
h, s = divmod(seconds, 3600)
m, s = divmod(s, 60)
s = round(s * 1000)/1000
hh = str(int(h))
mm = str(int(m))
ss = str(s)
if len(hh) < 2:
hh = '0' + hh
if len(mm) < 2:
mm = '0' + mm
if ss.find('.') > 1:
wholes, decimals = ss.split('.')
else:
wholes = ss
if len(wholes) < 2:
ss = '0' + ss
return '{0}:{1}:{2}'.format(hh, mm, ss) | 850a0962056bf55c8a406562c79c5bac48b932c1 | 99,695 |
def get_descriptive_string(value, pred_function):
"""
a helper function that formats a predicate and argument in a nice human-readable format
Args:
value: the argument of the predicate
pred_function: a predicate function
"""
return "F({}), where F(x) evaluates\n\t {}".format(value, pred_function) | be74f294d78b5e0f2e9346e3de6ba2db854d9472 | 99,696 |
def create_alias(dataset, attributes, delimiter='-'):
"""Create alias key of a dataset using a list of attributes.
Parameters
----------
dataset : dict
Metadata dictionary representing a single dataset.
attributes : list of str
List of attributes used to create the alias.
delimiter : str, optional (default: '-')
Delimiter used to separate different attributes in the alias.
Returns
-------
str
Dataset alias.
Raises
------
AttributeError
``dataset`` does not contain one of the ``attributes``.
"""
alias = []
if not attributes:
raise ValueError(
"Expected at least one element for attributes, got empty list")
for attribute in attributes:
if attribute not in dataset:
raise AttributeError(
f"Dataset {dataset} does not contain attribute '{attribute}' "
f"for alias creation")
alias.append(dataset[attribute])
return delimiter.join(alias) | 9285832f96b258ff7d4cc1e6dcf3edffc15646ad | 99,699 |
def degN (*args):
""" Generate a Degree N Polynomial
Returns a functon, denoted as 'f(x | a_0, a_1, ... , a_i, ... , a_N)= a_N*x^N + ... +a_i*x^i + ... + a_1*x + a_0, where N=len(args)-1.
The elements in 'args' equal the coefficients of their corresponding term in the function, 'f'; And the index of each element in 'args' is equal to the
exponent of the variable, 'x', in it's corresponding term in 'f'.
Example: An argument list of [5, 1, 2] will result in the function, f(x) = 2x^2 + x + 5, being returned.
"""
return lambda x: sum(a*x**i for i, a in enumerate(args)) | 93f9db0cfd7e08c9638984ef9f213bfad6fddbd3 | 99,707 |
def fill_tag(df):
"""Replace tag with auto tag if missing."""
df = df.copy()
df['tag'] = df.tag.where(df.tag.notna(), df.auto_tag).astype('category')
return df | 1f8af2bcf3d64c093d547b357bbb185aef062ec3 | 99,709 |
def get_change(m):
"""Find the minimum number of coins needed to change the input value(an integer)
into coins with denominations 1, 5, and 10."""
ans = 0
ans += m // 10
ans += (m % 10) // 5
ans += (m % 10) - (((m % 10) // 5 ) * 5)
return ans | e71c0a0fabb260ca96ff6a75cc9f46b103255d6e | 99,716 |
def is_key_string(string):
"""
returns True if string is a key string.
Key strings begin with underscore.
"""
return len(string) > 1 and string[0] == '_' | f82949a5431795f38acd65ada780c7ab08aab9d2 | 99,719 |
def _get_server_id(servers, identity):
"""
Fetch and return server UUID by server name if found.
"""
for server in servers.items:
if identity in (server.properties.name, server.id):
return server.id
return None | 38f8612ad39d59229769659003c639d71689232e | 99,721 |
def parse_file(cc, filename):
"""
This function actually parses the test. It does so using some pretty simple
logic. Consequentially, you can't deviate too much from the expected format.
Specifically we assume:
- The start of the tutorial block (i.e., "<comment_char>TUTORIAL") is the
first non-whitespace token on the line.
- The start of a skipping directive (i.e.,
"<comment_char>TUTORIAL_START_SKIP") is the first non-whitespace token on
the line.
- The end of a skipping directive (i.e.,
"<comment_char>TUTORIAL_STOP_SKIP") is the first non-whitespace token on
the line.
C++ is the first non-whitespace token on the line.
- all comment blocks are continuous (no blank lines in the middle)
:param cc: The character denoting the comment
:param filename: The full path to the file
:return: A list of tutorial comments and code blocks
"""
# These are parsing strings we'll need to look for
tutor_start = cc + "TUTORIAL"
tutor_skip_start = cc + "TUTORIAL_START_SKIP"
tutor_skip_stop = cc + "TUTORIAL_STOP_SKIP"
comments = [] # Will be the comments we find
code = [] # Will be the code snippets we fine
in_comment = False # True signals we are in a tutorial comment block
skipping = False
which_first = None
with open(filename, 'r') as input_file:
for line in input_file:
no_ws = line.lstrip() # The line w/o proceeding whitespace
# Dispatch conditions
is_skip_start = tutor_skip_start == no_ws[:len(tutor_skip_start)]
is_skip_stop = tutor_skip_stop == no_ws[:len(tutor_skip_stop)]
is_tutor_start = tutor_start == no_ws[:len(tutor_start)]
is_comment = cc == line.lstrip()[:len(cc)]
# Actually do the dispatching
if skipping: # Currently skipping
if is_skip_stop: # and we were told to stop
skipping = False
elif is_skip_stop: # Not skipping, but told to stop
raise Exception("TUTORIAL_STOP_SKIP w/o TUTORIAL_START_SKIP")
elif is_skip_start: # Told to start skipping
skipping = True
elif is_tutor_start: # Told to start tutorial comment
if which_first is None:
which_first = True
if in_comment:
raise Exception("Can't nest TUTORIAL sections")
comments.append([]) # Start a new comment block
in_comment = True
elif is_comment and in_comment: # Part of tutorial comment
comments[-1].append(no_ws[len(cc):]) # omit comment character
elif in_comment: # 1st line outside a comment block
in_comment = False
code.append([])
code[-1].append(line)
else: # n-th line outside a comment block
if which_first is None:
which_first = False
if len(code) == 0: # If code came first, there's no array yet
code.append([])
code[-1].append(line)
return comments, code, which_first | aa635aec561d6ff0072d0322395fef53b5005d2e | 99,723 |
from bs4 import BeautifulSoup
import re
def detect_error_type(content):
"""
>>> detect_error_type('<input name="login">Login requested')
'Cookie expired or is invalid, login requested'
>>> detect_error_type('<div id="objects_container"><span class="bb">' + \
'The page you requested cannot be displayed at the moment. ' + \
'It may be temporarily unavailable, the link you clicked on may ' + \
'be broken or expired, or you may not have permission to view ' + \
'this page.</span></div>')
'Page temporarily unavailable / broken / expired link'
>>> detect_error_type('<html></html>')
'Failed to parse page'
"""
soup = BeautifulSoup(content, "lxml")
if soup.find("input", attrs={"name": "login"}):
return "Cookie expired or is invalid, login requested"
elif soup.find_all(
"span", string=re.compile("It may be temporarily unavailable")):
return "Page temporarily unavailable / broken / expired link"
else:
return "Failed to parse page" | 8519bfd36bd630f1b1d57d61c60a5c5b97abb4d6 | 99,725 |
def intid(id):
"""Convert id to an integer, if possible.
Else id is returned unchanged. This is used to access ids in
Gnucash data files easier. The ids are often integers.
Example:
>>> intid(5)
5
>>> intid('0012')
12
>>> intid('abc')
'abc'
"""
try:
id2 = int(id)
except Exception:
id2 = id
return id2 | 64cddab951ce4e6c81bc5e90a9bf5fc3566ee375 | 99,726 |
def get_by_name(container, name, name_field="name"):
"""Return item from container by .name field if it exists, None otherwise.
Will throw an Exception if multiple items are found, since this violates the
ONNX standard."""
names = [getattr(x, name_field) for x in container]
inds = [i for i, e in enumerate(names) if e == name]
if len(inds) > 1:
raise Exception("Found multiple get_by_name matches, undefined behavior")
elif len(inds) == 0:
return None
else:
ind = inds[0]
return container[ind] | 871bf26955da25d596630f904f03d28384106967 | 99,728 |
def compute_approach(power, interest):
"""
Computes the approach to managing a
stakeholder according to the power/interest model.
Parameters
----------
power : str
The stakeholder's level of power, either
`high` or `low`.
interest : str
The stakeholder's level of interest, either
`high` or `low`.
Returns
-------
str
The approach to managing this stakeholder:
`monitor closely`, `keep satisfied`,
`keep informed`, `monitor`, or `unknown`.
"""
if power == "high" and interest == "high":
return "monitor closely"
elif power == "high" and interest == "low":
return "keep satisfied"
elif power == "low" and interest == "high":
return "keep informed"
elif power == "low" and interest == "low":
return "monitor"
else:
return "unknown" | 53d7e3b1ece7c0cbe175c7ef1d2bc21fd04c8b25 | 99,729 |
def get_launch_test_fixture(item):
"""Return the launch test fixture name, `None` if this isn't a launch test."""
mark = item.get_closest_marker('launch')
if mark is None:
return None
return mark.kwargs.get('fixture') | bcd0ccecc706c6980192684a6865c069ca1e4b8e | 99,730 |
def clean_players_name_string(df, col='name'):
""" Clean the imported file 'name' column because it has different patterns between seasons
Args:
df: merged df for all the seasons that have been imported
col: name of the column for cleanup
"""
#replace _ with space in name column
df[col] = df[col].str.replace('_', ' ')
#remove number in name column
df[col] = df[col].str.replace('\d+', '')
#trim name column
df[col] = df[col].str.strip()
return df | 7b5249aab192309d31a36cfb601be313b72bf13a | 99,732 |
def loc(x):
"""
turn a string of 0/1 into its decimal number
:param x: string of 0/1
:return: Decimal number
"""
return int(x, 2) | 1f5107c718740be289cc4b2563ac7763a1b17135 | 99,733 |
import pickle
def loadTestData(project_code):
"""Loads the serialized pickle file that holds all the data we have on the test runs.
Args:
project_code: The internal code to the project we wish to load data for.
"""
with open ("CrashTrakr_data_" + project_code, mode="rb") as test_data_file:
test_data = pickle.load(test_data_file)
print(test_data)
return test_data | aafa6386057c438709e4e9e271000ead121ebd0d | 99,743 |
import re
def get_table_headers(table):
"""
Returns a list of the table headers values for table
"""
pattern = re.compile(r'(?ims)\<thead\>(.*?)\</thead\>')
head = pattern.findall(table)[0]
pattern = re.compile(r'(?ims)\<th.*?\>([^<]+?)\<.*?/th\>')
return pattern.findall(head) | 9026c3d9382dc7790c2aceb75fdb478626575849 | 99,744 |
def drop_duplicate_claims(df):
""" Drops rows with duplicate values in claim column. Modifies DF in place! """
len_with_dupes = len(df['claim'])
df.drop_duplicates(subset='claim', inplace=True)
len_no_dupes = len(df['claim'])
print('Dropped {} duplicate rows.'.format(len_with_dupes - len_no_dupes))
return df | 15d2a5a97d51d571ebfcd5bb04322279c3cb2c3a | 99,745 |
def _simplifyValues(*values):
"""Given a set of numbers, convert items to ints if they are
integer float values, eg. 0.0, 1.0."""
newValues = []
for v in values:
i = int(v)
if v == i:
v = i
newValues.append(v)
return newValues | 2bbbcbe4da2da7c4577d7a06eb461e527e10884a | 99,751 |
import re
def active(request, pattern):
"""
Checks the current request to see if it matches a pattern.
If so, it returns 'active'.
To use, add this to your Django template:
.. code-block:: html
{% load tags %}
<li class="{% active request home %}"><a href="/">Home</a></li>
"""
if re.search(pattern, request.path):
return 'active'
return '' | 25b81aa91a5c4ffa19e31843511c26bd62d277ea | 99,752 |
import re
def parse_diag_dump(raw_result):
"""
Parse the 'diag-dump' command raw output.
:param str raw_result: vtysh raw result string.
:rtype: dict
:return: The parsed result of the diag-dump command \
in a dictionary of the form:
::
{
'result': 0
}
"""
diag_dump_re = (
r'(Diagnostic dump captured for feature)'
)
result = {}
for line in raw_result.splitlines():
re_result = re.search(diag_dump_re, line)
if re_result:
result['result'] = 0
break
else:
result['result'] = 1
return result | 6a0fe350bc57233351bd87ad7a0417b7707e687e | 99,753 |
def decipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the private key.
Examples
========
>>> from sympy.crypto.crypto import (
... kid_rsa_public_key, kid_rsa_private_key,
... decipher_kid_rsa, encipher_kid_rsa)
>>> a, b, A, B = 3, 4, 5, 6
>>> d = kid_rsa_private_key(a, b, A, B)
>>> msg = 200
>>> pub = kid_rsa_public_key(a, b, A, B)
>>> pri = kid_rsa_private_key(a, b, A, B)
>>> ct = encipher_kid_rsa(msg, pub)
>>> decipher_kid_rsa(ct, pri)
200
"""
n, d = key
return (msg*d) % n | a467e5e4b24e5ee630d62adc4e8233188fde2647 | 99,760 |
def LineRamp(t, duration, Initial, Final):
"""Creates a linear ramp from A to B"""
f = t / duration
return (1 - f) * Initial + f * Final | 03b3ca48f1cd2e15629dbf4aded47035c91fc941 | 99,763 |
def my_task(**kwargs) -> None:
"""
Function used for testing, returns nothing.
"""
return None | 3839c4ddb7ad6f9358fb6faac3fce50e9072647a | 99,766 |
from typing import Tuple
def calculate_line_changes(diff: str) -> Tuple[int, int]:
"""Return a two-tuple (additions, deletions) of a diff."""
additions = 0
deletions = 0
for line in diff.splitlines():
if line[0] == "+" and not line.startswith("+++"):
additions += 1
elif line[0] == "-" and not line.startswith("---"):
deletions += 1
return additions, deletions | d4374e07e1584eaae16e57b42c396206c5e0010f | 99,767 |
import typing
import itertools
def possible_kmers(k: int = 11) -> typing.Generator:
"""
Utility function for generating all possible kmers of length k.
:param k:
:return:
"""
kmers = (''.join(x) for x in itertools.product('ATCG', repeat=k))
return kmers | eda98d8866c9c07a4ceb9df6f96895a18dcb6ab8 | 99,768 |
from typing import List
from typing import Any
import logging
import json
def load_servers_json() -> List[List[Any]]:
"""
Loads a list of Electrum servers from a local json file.
:returns: A list of server info lists for all default Electrum servers
"""
logging.info("Reading server list from file..")
with open("servers.json", "r") as infile:
return json.load(infile) | 86d25fc7c7090522ba20b55c65a2fe2815178823 | 99,783 |
import re
def is_gcs_path(gcs_path_string):
"""
Checks if strings are of the format
"gs://bucket_name" or "gs://bucket_name/file/path"
"""
return bool(re.match(r'gs://([^/]+)(/.+)?', gcs_path_string)) | 87806895cf238ea18b25ecac650f142fc7ece3be | 99,784 |
def longest_proper_prefix(P):
"""Solution to exercise R-13.2.
What is the longest (proper) prefix of the string "cgtacgttcgtacg" that
is also a suffix of this string?
---------------------------------------------------------------------------
Solution:
---------------------------------------------------------------------------
For a string S of length n:
1. A prefix is a substring of S of the form S[0:k] for 0 <= k <= n.
2. A suffix is a substring of S of the form S[j:n] for 0 <= j <= n.
A proper prefix of a string is a prefix that is not equal to the string
itself; S[0:k] for 0 <= k < n.
The string "cgtacgttcgtacg" has length n = 14, and thus will have n + 1
suffixes and n proper prefixes. The '' symbol represents the null string
of length 0.
The proper prefixes of P for all valid values of k are:
k = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
'', c, cg, cgt, cgta, cgtac, cgtacg, cgtacgt, cgtacgtt, cgtacgttc,
10, 11, 12, 13
cgtacgttcg, cgtacgttcgt, cgtacgttcgta, cgtacgttcgtac
The suffixes of P for all valid values of j are:
j = 0, 1, 2, 3, 4,
cgtacgttcgtacg, cgtacgttcgtac, cgtacgttcgta, cgtacgttcgt, cgtacgttcg,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14
cgtacgttc, cgtacgtt, cgtacgt, cgtacg, cgtac, cgta, cgt, cg, t, ''
The longest common substring is "cgtacgttcgtac", which is 13 characters
long.
"""
n = len(P)
prefixes = set()
suffixes = set()
for k in range(n):
prefixes.add(P[0:k]) # Only proper prefixes k < n
for j in range(n+1):
suffixes.add(P[j:n+1]) # No restrictions on suffixes, j <= n
common = prefixes & suffixes
return max(common, key=len) # Longest common prefix and suffix string | dc79143c5be862747f79ed16ea5dbe3bf9f8bad7 | 99,785 |
from typing import Counter
def make_label_vocab(list_of_labels):
"""
Create vocabulary to index labels
:param list_of_labels: list or array, list of labels
:return: dict
"""
cnt = Counter(list_of_labels)
label_vocab = dict()
for i, label in enumerate(sorted(cnt.keys())):
label_vocab[label] = {"id": i, "cnt": cnt[label]}
return label_vocab | 330a5035bdcf7f426596c81b441519b401352d1c | 99,787 |
def relative_y(xarr, yarr, *args):
"""First value in yarr is subtracted from each y
"""
return (xarr, yarr - yarr[0]) + args | 0d6e8b84c96fe310ff1deefe81c17a630d12bbb0 | 99,792 |
def get_error_point(view, error):
"""Get the error text point.
lxml uses 1-based line and column numbers but ST wants them
0-based, so subtract 1 from both."""
return view.text_point(error.line - 1, error.column - 1) | a2284225f7d3db4d3d0e6a9470f6db53698ac1d8 | 99,795 |
def describe_humidity(humidity):
"""Convert relative humidity into good/bad description."""
if 40 < humidity < 60:
description = "good"
else:
description = "bad"
return description | eb7057c8f9e535f5e72de44fe5c593daebc19f0c | 99,796 |
def _counters(counters):
"""Return the formatted text for counters.
"""
long_name = max(counters.names(), key=len)
pattern = '{:.<' + str(max(len(long_name) + 2, 25)) + '} {: >4}'
return [pattern.format(name.replace("_", " ").capitalize(), counters[name]) for name in counters] | 0f6b4a220331c0af5898b5a5ba21aef2f52711ed | 99,803 |
import unicodedata
def remove_non_ascii(word_list):
"""Remove non-ASCII characters from list of tokenized word_list
Keyword arguments:
word_list: list of words
"""
new_word_list = []
for word in word_list:
new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')
new_word_list.append(new_word)
return new_word_list | d0c80457b1a0d10b0d636c6485ab95244afac23f | 99,807 |
def clip(min_bound, max_bound, value):
"""Limits the range value may take.
Arguments:
min_bound: the lowest allowed value of value.
max_bound: the highest allowed value of value.
value: a whose value should be limited to being between min_bound
and max_bound.
Return:
if value < min_bound, min_bound; if value > max_bound, max_bound;
otherwise, value.
"""
return max(min_bound, min(value, max_bound)) | 3f96ea7a0d79a5218067691adab149dc16836305 | 99,816 |
def _do_get_latest_training_dataset_version(training_dataset_name, featurestore_metadata):
"""
Utility method to get the latest version of a particular training dataset
Args:
:training_dataset_name: the training dataset to get the latest version of
:featurestore_metadata: metadata of the featurestore
Returns:
the latest version of the training dataset in the feature store
"""
training_datasets = featurestore_metadata.training_datasets
matches = list(
filter(lambda td: td.name == training_dataset_name, training_datasets.values()))
versions = list(map(lambda td: int(td.version), matches))
if (len(versions) > 0):
return max(versions)
else:
return 0; | d00f742de5d09cc193ea4ae5f78aeed9cc4afe12 | 99,819 |
def _select_sequence(items, keep=(), drop=()):
"""Helper function for `select` that works on sequences (basically
collections that support enumeration).
Parameters
----------
items: Sequence
List, tuple, or iterable sequence of some sort to select items from.
keep: Iterable[str]
Sequence of indices to keep.
drop: Iterable[str]
Sequence of indices to drop. You should specify either `keep` or
`drop`, not both.
Returns
-------
Same type as `items` (usually a list or tuple).
"""
type_ = type(items)
if keep:
return type_(x for i, x in enumerate(items) if i in set(keep))
return type_(x for i, x in enumerate(items) if i not in set(drop)) | 0faf73afe33f1f383adb94fa50661abd5b9c2da8 | 99,822 |
import requests
def get_activities_json(access_token, limit=None, page=None):
"""Get a list of activity summary dicts.
https://developers.strava.com/docs/reference/#api-Activities-getLoggedInAthleteActivities
`stravalib` implementation for comparison:
`acts = client.get_activities(limit=10)`
Args:
access_token (str): Fresh access token for the Strava API.
limit (int): Maximum number of activities to be returned in the summary.
Default None, which will allow Strava API to set the default
(30 as of this writing).
Returns:
list: dicts with summary data for the requested number of the
activities associated with the athlete whose access_token is used.
"""
# Get the most recent activities for the athlete.
# curl -X GET `https://www.strava.com/api/v3/athlete/activities` \
# (?before=&after=&page=&per_page=)
# -H "Authorization: Bearer [[token]]"
# before, after, page, per_page
data = dict()
if limit is not None:
data['per_page'] = limit
if page is not None:
data['page'] = page
resp = requests.get(
'https://www.strava.com/api/v3/athlete/activities',
data=data,
headers={'Authorization': f'Bearer {access_token}'}
)
return resp.json() | 00e4d209597970804fc40bcd265a86d92acf0ca4 | 99,826 |
def from_bytes(data: bytearray) -> int:
"""
Retrieves an integer value from a bytearray.
:param bytearray data: Input bytes.
:return: Value.
:rtype: int
"""
return int.from_bytes(bytes=data, byteorder='big', signed=False) | c8f1bbe3fe352b5feaf93f051bf24f8657d9f032 | 99,829 |
def validate_keys(kwargs, good_kwargs):
"""
validate keyword arguments,
checking if there is any bad keyword,
and defining with their default value all the non-defined keywords.
:param kwargs: the input keyword dictionary, with only the defined keywords.
:param good_kwargs: the valid keywords with their default values.
:return: the output keywords dictionary, with all the keywords.
"""
good_keys = set(good_kwargs)
bad_keys = set(kwargs) - good_keys
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise KeyError("Unknown parameters: {}".format(bad_keys))
new_kwargs = {}
for k in good_keys:
new_kwargs[k.rstrip("_")] = kwargs.get(k, good_kwargs.get(k))
return new_kwargs | 0d54f2cc8097a4485790402119510de966bc7ae7 | 99,830 |
import json
def CreateConfig(path):
"""
load the config using path
:param path: {str}
path to config file
:return:
dictionnary that contains all the configs
"""
with open(path, 'r') as config_file:
config = json.load(config_file)
return config | ea59f1bf106f26aeeecf44dcf879a8562e261820 | 99,831 |
def validmove(loc):
"""loc must be within 4x4 grid and cannot return to origin"""
return loc != (0, 0) and \
loc[0] >= 0 and \
loc[0] <= 3 and \
loc[1] >= 0 and \
loc[1] <= 3 | d6aced49f923df2aef6e0a6ce4242f27d12e7122 | 99,836 |
def frame_from_timecode(timecode, fps=24.0):
"""
Return the frame corresponding to the given timecode, for the given fps.
:param timecode: String, timecode.
:param fps: Float representing frames-per-second.
:returns: Int representing a number of frames.
"""
# Return a frame of 0 if we don't have a valid timecode or we have a drop
# frame timecode (drop frame is unsupported).
if not timecode or ":" not in timecode or ";" in timecode:
return 0
(hour, minute, second, frame) = timecode.split(":")
hours = int(hour)
minutes = int(minute)
seconds = int(second)
frames = int(frame)
seconds = (hours * 60 * 60) + (minutes * 60) + seconds
frames = (seconds * fps) + frames
return int(round(frames)) | a3ec9a8d292ddd90349b6b6bfc37cf37abaf5b3d | 99,841 |
def rgb_to_hex(r, g, b): # pylint: disable=invalid-name
"""
Convert numeric r, g, b color channels to a hex standard #RRGGBBAA color format.
Arguments:
- r - red channel in (0, 255).
- g - green channel in (0, 255).
- b - blue channel in (0, 255).
"""
return "#%02X%02X%02XFF" % (r, g, b) | d595eb60ad6b6ea9fd9db8afed3bd111acb6255c | 99,845 |
def get_all_classes(self):
""" Retrieves the list of classe names in this dataset """
return [name for name in self.name_to_class_info.keys()] | 6b66528f2c2b62023615467b9386e7a81e3194ca | 99,851 |
def split_list(l, N):
""" Split list l into N sublists of equal size """
step = int(len(l) / N)
div_points = range(0, len(l) + 1, step)
return [l[div_points[i]:div_points[i + 1]] for i in range(N)] | 1c6b2bfc039652b6582433dcb6004e09b390d38f | 99,852 |
import math
def E2M(Ecc, e):
"""
Function: E2M
Purpose: Maps the eccentric anomaly angle into the corresponding
mean elliptic anomaly angle. Both 2D and 1D elliptic
orbit are allowed.
Inputs:
Ecc = eccentric anomaly (rad)
e = eccentricity (0 <= e < 1)
Outputs:
M = mean elliptic anomaly (rad)
"""
if e >= 0.0 and e < 1.0:
M = Ecc - e * math.sin(Ecc)
return M
raise ValueError('Error: E2M() received e = {}, the value of e should be 0 <= e < 1'.format(str(e))) | 5eacfe467da4cb78b0bd002546866fc6a7e83c9e | 99,854 |
def fix_case(target: str, base: str) -> str:
"""Returns the lower-case string target with the case of base"""
ret = "".join(
[
target[i].upper() if base[i].isupper() else target[i]
for i in range(len(target))
]
)
return "".join(
target[i].upper() if base[i].isupper() else target[i]
for i in range(len(target))
) | 35648bb90b7d4945192d1cf18633e477c4e81e48 | 99,857 |
def clip_pt_to_im(pt, im_size):
"""Clips a 2D point to the image frame.
:param pt: 2D point (x, y).
:param im_size: Image size (width, height).
:return: Clipped 2D point (x, y).
"""
return [min(max(pt[0], 0), im_size[0] - 1),
min(max(pt[1], 0), im_size[1] - 1)] | 386b845fd1561976ccbce0e8b098ca6128a938a0 | 99,859 |
def get_url(domain_name):
"""
creating the required URL from domain name
:param domain_name: (string) domain name
:return: URL
"""
return f"https://api.ote-godaddy.com/v1/domains/suggest?query={domain_name}&country=IN&waitMs=1000" | 760f076e6cb46148b42bf3b6e235d49313ac0095 | 99,862 |
def check_coverage_single_pos(cov_df, variants_bed_sub, rsid, threshold = 20):
"""
Checks the coverage of a single position. If the coverage is greater
than the threshold it returns a "PASS" flag, otherwise returns "FAILED".
For the HLAs always returns "CHECK".
Parameters
----------
cov_df: Pandas Dataframe
Dataframe which stores the coverage every position queried.
variants_bed_sub: Pandas Dataframe
Subset of the variants_table just with the genes for which we have
called more than one position.
rsid : String
The rsID of the instance.
threshold: Integer
Threshold to assign the flag.
Returns
-------
found: String
The major star allele.
"""
position = variants_bed_sub.loc[variants_bed_sub.rsid == rsid, "end"].item()
if position > threshold:
flag = "PASS"
else:
flag = "FAILED"
return flag | 2567b04c87447c864141be41d31d09c31677bd9d | 99,864 |
def calc_pyramid_levels(xy_final_shape, tile_size):
"""
Calculate number of pyramids for a given image dimension and tile size
Stops when further downsampling would be smaller than tile_size.
Parameters
----------
xy_final_shape:np.ndarray
final shape in xy order
tile_size: int
size of the tiles in the pyramidal layers
Returns
-------
res_shapes:list
list of tuples of the shapes of the downsampled images
"""
res_shape = xy_final_shape[::-1]
res_shapes = [tuple(res_shape)]
while all(res_shape > tile_size):
res_shape = res_shape // 2
res_shapes.append(tuple(res_shape))
return res_shapes[:-1] | f1df61f3ee151f02e54a2b94a35e016ae35a6b7c | 99,865 |
def _parse_bool(el):
"""parse a boolean value from a xml element"""
value = str(el)
return not value.strip() in ('', '0') | 05e0c2a68a2d3d1e8eb622b963b3e7df17b5161f | 99,866 |
import click
def select(choices, prompt="Please choose one", default=0, required=True):
""" Let the user pick one of several choices.
:param choices: Available choices along with their description
:type choices: iterable of (object, str) tuples
:param default: Index of default choice
:type default: int
:param required: If true, `None` can be returned
:returns: The object the user picked or None.
"""
choices = list(choices)
for idx, choice in enumerate(choices):
_, choice_label = choice
if '\x1b' not in choice_label:
choice_label = click.style(choice_label, fg='blue')
click.echo(
u"{key} {description}".format(
key=click.style(u"[{}]".format(idx), fg='green'),
description=choice_label))
while True:
choice_idx = click.prompt(prompt, default=default, type=int, err=True)
cutoff = -1 if not required else 0
if choice_idx < cutoff or choice_idx >= len(choices):
click.echo(
"Value must be between {} and {}!"
.format(cutoff, len(choices)-1), err=True)
elif choice_idx == -1:
return None
else:
return choices[choice_idx][0] | 7721448051cfd6d299c74e3345be5a13d3921d85 | 99,867 |
def lowest_common_ancestor(taxa1, taxa2, taxa_tree):
"""Return a string giving the lowest common ancestor of taxa{1, 2}.
Compute the most specific taxonomic rank shared by taxa1 and taxa2.
e.g. if taxa1 is 'Staphylococcus aureus' and taxa2 is 'Staphylococcus
lugdunensis' this function should return 'Staphylococcus'.
Use `taxa_tree.ancestors(<taxon>)` to get a list of ancestors for taxon.
"""
ancestors1 = set(taxa_tree.ancestors(taxa1))
for ancestor in taxa_tree.ancestors(taxa2):
if ancestor in ancestors1:
return ancestor | f0ae50c3a3cbda0b22ff09bc044423c2bcc3515f | 99,868 |
import re
def normalize_stacktrace(trace):
"""Replace absolute paths in a stacktrace to make them consistent between runs."""
return re.sub('File "(.*)test.py"', 'File "test.py"', trace) | f8a14af2242585599200b90af7b273cbe86702db | 99,873 |
def authentication_required(fn):
"""Annotation for methods that require auth."""
def wrapped(self, *args, **kwargs):
assert self.api_key, "You must be authenticated to use this method"
return fn(self, *args, **kwargs)
return wrapped | 5a90b38045adbeb81ef4680eb87b8268a5b11aa1 | 99,882 |
def nested_update_with_layers(dict_1, dict_2, layers, **kwargs):
"""Update dict_1 with dict_2, filtering away some layers.
Example:
>>> nested_update_with_layers(
... dict_1 = {},
... dict_2 = {"a": {"ii": {"1": 121, "2": 122},
... "iii": {"3": 133, "4": 134}}},
... layers = ("abc", "roman", "arabic"),
... roman="ii"
... )
OUT: {"a": {"1": 121, "2": 122}}
Args:
dict_1 (dict): The dictionary to update. Can be an empty dictionary.
dict_2 (dict): The dictionary to update it with. This will in general be a
homogeneous multi-layered string-keyed dictionary.
layers (iter of str): The names of the layers of dict_2. Needs to have the
same length as the number of times you would index dict_2.
kwargs: The filters to apply to dict_2 while updating dict_1. Each key-word
has to be the name of a layer. Those layers will not be recreated from
dict_1 to dict_2. Instead, the key-word argument will be used as an index,
selecting only one branch of the lower layers of dict_2 to update into
dict_1.
"""
for layer_name in kwargs:
if layer_name not in layers:
key = kwargs.pop(layer_name)
print(
f"Can't filter based in {layer_name}={key} "
f"because {key} is not in layers={layers}. This will be ignored."
)
if layers[0] in kwargs:
new_kwargs = kwargs.copy()
key = new_kwargs.pop(layers[0])
nested_update_with_layers(dict_1, dict_2[key], layers[1:], **new_kwargs)
else:
for key, value in dict_2:
if isinstance(dict_2, dict):
if key not in dict_1:
dict_1[key] = {}
nested_update_with_layers(
dict_1[key], dict_2[key], layers[1:], **kwargs
)
else:
dict_1[key] = value
return dict_1 | 2b8cee8ecf4eecb317eee93ebd66aa78af0c074d | 99,884 |
def get_value(value):
"""Get the first line of docstring, if None default return."""
if value:
return value.split('\n')[0]
else:
return None | 02f720367f90624c27dddd0797f6d46d015e7eea | 99,890 |
def isEven(num):
"""Boolean function returning true if num is even, false if not"""
return num%2 == 0 | 225a405926afb455f543d9a0b865bfc59b72446d | 99,891 |
def create_pipeline(path_to_yml_files, script_name, num_jobs=1, num_gpus=0,
run_in='host', blocking=False, prefix='-p', extra_cmd_args=''):
"""
Takes a list of yml files, a script name, and some configuration options and
creates a pipeline that can be passed to :py:mod:`scripts.pipeline` so that each
job is executed accordingly.
Args:
path_to_yml_files (list): List of paths to each .yml file that contains the
generated experiment configuration from the sweep.
script_name (str): What script to use, should exist in :py:mod:`scripts`.
num_jobs (int, optional): Number of jobs to be used to run each of these jobs.
Is used as the max_workers argument in
:py:class:`runners.script_runner_pool.ScriptRunnerPool`. Defaults to 1.
num_gpus (int, optional): Number of GPUs to use for each job. Defaults to 0.
run_in (str, optional): Whether to run on 'host' or 'container'.
Defaults to 'host'.
blocking (bool, optional): Whether to block on each job (forces the jobs to run
sequentially). Defaults to False.
prefix (str, optional): The prefix to use before the command (either '-p' or '-y').
Defaults to '-p'.
extra_cmd_args (str, optional): Any extra command line arguments that pipeline may
need to run the script, specified as a str as if it was on the command line.
Defaults to ''.
Returns:
dict: A dictionary containing the sequence of pipelines that is later dumped to
YAML so it can be passed to :py:mod:`scripts.pipeline`.
"""
pipeline = {
'jobs': [],
'num_jobs': num_jobs
}
for p in path_to_yml_files:
_job = {
'script': script_name,
'config': f"""{prefix} "{p}" {extra_cmd_args}""",
'run_in': run_in,
'blocking': blocking,
'num_gpus': num_gpus,
}
pipeline['jobs'].append(_job)
return pipeline | 921a76e0acdb8673ac31ae662081ebb3583c788e | 99,892 |
def get_categories_population_dictionary(labels, n_classes=9):
"""Return a mapping (category) -> Population."""
mapping = {i: 0 for i in range(0, n_classes)}
# Iterate each file and map
for l in labels:
if l >= n_classes:
continue
mapping[l] += 1
return mapping | a522080a20458f0b9ece87ef1e1551e63a4f1d9c | 99,893 |
def extract_line(matrix, point, xyincrement):
""" Takes an x by x matrix and scans a matrix for a list of items.
Uses a starting point tuple (x, y) and an increment tuple(x-increment, y-increment).
Starts the list from the x, y coordinate and goes in the increments given by xinc and yinc.
"""
x, y = point[0], point[1]
xinc, yinc = xyincrement[0], xyincrement[1]
items = []
while x >= 0 and y >= 0 \
and x < len(matrix) and y < len(matrix):
items.append(matrix[y][x])
x += xinc
y += yinc
return items | 834fbf655d309c24580840d88eae0c6c25a3dca0 | 99,896 |
import base64
import pickle
def b64pickle(obj):
"""Serialize object to a base64-string."""
return base64.b64encode(pickle.dumps(obj, -1)).decode("ascii") | b688b4bfeb14055bddc36387d3fe09c6816ad656 | 99,900 |
def parse_configuration(configuration):
"""
"Python (C:\Python34\python.exe)" becomes ("Python", "C:\Python34\python.exe")
"BBC micro:bit" becomes ("BBC micro:bit", "")
"""
parts = configuration.split("(", maxsplit=1)
if len(parts) == 1:
return configuration, ""
else:
return parts[0].strip(), parts[1].strip(" )") | 84c6bca769d089bc9a52889a63bfaee8a93d1058 | 99,903 |
def import_py(lib, name, args, ret, defs=None, echk=None):
""" Import a native function.
Args:
lib Library to load from
name Function name in the library
args List of argument classes
ret Return value class
defs Defaulted tuple of last parameters
echk Optional filter/error checking function
Returns:
Function instance
"""
# Assertions
if defs is not None:
assert isinstance(defs, tuple), "Expected a 'tuple' for argument 'defs'"
assert len(defs) <= len(args), "More defaults provided than possible arguments"
# Import and declaration
func = getattr(lib, name, None)
if func is None:
raise RuntimeError("Function " + repr(name) + " is not exported in " + repr(lib._name))
func.argtypes = args
func.rettype = ret
# Optional error check
if echk is not None:
func.errcheck = echk
# Application of the optional parameters
if defs is None:
return func
else:
def call(*curr):
""" Call 'func', replacing missing and defaulted arguments.
Args:
... Forwarded arguments
Returns:
Forwarded returned value
"""
assert len(args) <= len(curr) + len(defs), "Not enough parameters provided in native function call"
return func(*(curr + defs[len(curr) - len(args) + len(defs):]))
return call | d60b649f99b2dc292d3c259a3b79e266ddf32b4e | 99,904 |
import re
def sub_toc(line):
"""Substitute the HTML markup into a line of the TOC."""
return re.sub(
r'([0-9]+)( +)\|( +)([0-9:\.]+)( +)\|( +)([0-9:\.]+)( +)\|( +)'
r'([0-9]+)( +)\|( +)([0-9]+)',
r'<span class="log4">\1</span>\2<strong>|</strong>'
r'\3<span class="log1">\4</span>\5<strong>|</strong>'
r'\6<span class="log1">\7</span>\8<strong>|</strong>'
r'\9<span class="log1">\10</span>\11<strong>|</strong>'
r'\12<span class="log1">\13</span>',
line,
) | 5e7923f8d554e20f2bb90d128790311ef49d17e8 | 99,906 |
import re
def str_camel_to_snake(term):
"""Convert a string from camelCase to snake_case.
Parameters
----------
term : string
A camelCase string
Returns
-------
string
A new snake_case string
"""
return re.sub(
"([a-z0-9])([A-Z])", r"\1_\2", re.sub("(.)([A-Z][a-z]+)", r"\1_\2", term)
).lower() | 516558b91dc132cca17c15f3e785dd1c36516719 | 99,907 |
import pytz
def set_tz(dt):
"""
Ensure a datetime object has a timezone set.
Either set timezone of naive datetime object to UTC/GMT time or leave the
object as is.
This can be applied to created or updated times for tweet or profile
objects returned from the Twitter API. When inspecting times from the
Twitter API directly, they come as UTC+0000 regardless of where the tweet
was made or what Twitter settings are. Therefore it is safe to assume this
is the correct timezone to add for a Twitter datetime which is naive.
:param datetime.datetime dt: datetime object.
:return: A new datetime object which is timezone aware.
"""
if not dt.tzinfo:
return dt.replace(tzinfo=pytz.UTC)
return dt | 54d404c7a4c3ac6fabba96bc566d287c42ab2459 | 99,908 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.