content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def wrap_quote(str):
"""Format quote."""
return f"> {str}\n" | 652a44a0a9e85c795209bae69e298489f77c1333 | 113,163 |
import pickle
def load_pickle(full_path):
"""Load pickled object."""
with open(full_path, 'rb') as f:
g = pickle.load(f)
return g | eb82f851c67dea980c00211b8dbf599983f61cce | 113,164 |
def get_ancestor_terms(go_term, go_dag, include_go_term=True):
""" Find all ancestors of the given term
Optionally, based on `include_go_term`, `go_term` itself will be
included in the returned set.
Parameters
----------
go_term : string
The GO term. Example: "GO:0008150"
go_dag : goatools.obo_parser.GODag
The Gene Ontology DAG, parsed from the source obo file
include_go_term : bool
Whether to incldue `go_term` itself in the set of ancestors
Returns
-------
ancestors : set
The set of ancestors for this term. If `include_go_term` is True, then
this set will also include `go_term`.
"""
paths = go_dag.paths_to_top(go_term)
all_ancestors = {go_term}
if paths is not None:
all_ancestors = {
term.id for path in paths for term in path
}
if not include_go_term:
all_ancestors.remove(go_term)
return all_ancestors | 7a9ec8c8e6756c38b4d08cbb6804c4dca100ac6c | 113,166 |
import functools
def with_post_processing(fn, post_process_fn):
"""Generates a function that applies `post_process_fn` to outputs of `fn`."""
@functools.wraps(fn)
def apply_fn(*args, **kwargs):
return post_process_fn(*fn(*args, **kwargs))
return apply_fn | ac951c4e8b9edd7364144e6b3006c2adbe3e3367 | 113,167 |
from typing import Sequence
def adjacent_n_tuples(objects: Sequence, n: int) -> zip:
"""Returns the Sequence objects cyclically split into n length tuples.
See Also
--------
adjacent_pairs : alias with n=2
Examples
--------
Normal usage::
list(adjacent_n_tuples([1, 2, 3, 4], 2))
# returns [(1, 2), (2, 3), (3, 4), (4, 1)]
list(adjacent_n_tuples([1, 2, 3, 4], 3))
# returns [(1, 2, 3), (2, 3, 4), (3, 4, 1), (4, 1, 2)]
"""
return zip(*([*objects[k:], *objects[:k]] for k in range(n))) | bd9956dcd8da98f56ca1dfdeb5aaaf557a46c60d | 113,173 |
def user_menu(game, user_action):
"""Allows users to view their menu or check current score
Player data is taken from the game state. Then take the user action
and goes into the if statement to check the user command, and display
the information based on different command. If there is a invalid
comment, then prompt "No menu option found"
Args:
game: contains the current game state
user_action: the user action type in by keyboard
Returns:
Ture if successfully executed
"""
player = game.player
if 'inventory' in user_action:
print(player.inventory)
elif 'score' in user_action:
print(f"Your current score is: {player.stats['point']}")
else:
print("No menu option found")
return True | 1cb38ff6a600ff4f943979924e7b24b015af566a | 113,176 |
def any_schema(schemas):
"""
Creates a schema that will match any of the given schemas.
Will not use anyOf if there is just one validator in the list, for simpler error messages.
"""
schemas = list(schemas)
if len(schemas) == 1:
return schemas[0]
else:
return {'anyOf': schemas} | a0470ad87b5efee48d2b5690f5b0cf0aaa1b2119 | 113,184 |
def str_true(v):
"""The string representation of a true value will be 'TRUE'. False will
be the empty string.
"""
if v:
return "TRUE"
else:
return "" | bb674071f69dd7474c0ec461423e7d47558b1ff4 | 113,188 |
def split_data(data, validate_split=0.1, test_split=0.1):
""" Splits data into train, validation and test set
By default dataset is split into:
Train : 80%
Validate : 10%
Test : 10%
"""
N_test = int(test_split * data.shape[0])
N_validate = int(validate_split * data.shape[0]) + N_test
data_test = data[:N_test]
data_validate = data[N_test:N_validate]
data_train = data[N_validate:]
return data_train, data_validate, data_test | 41bbca684fb208da4d62d5e3993594706d4ad243 | 113,190 |
def nl2br(text):
"""
Replaces \n to <br/>
"""
if not text:
return ""
text = text.replace("\n", "<br/>")
return text | 5f313092dbff7b68333356c495c5945519a28b66 | 113,200 |
def evaluate_functions_at_coordinates(list_of_functions,coordinates):
"""
===========================================
| evaluate_functions_at_coordinates |
===========================================
Evaluate functions at the given coordinate points.
Functions should be of the form v = f(x,y,z)
"""
return [tuple([f(x,y,z) for f in list_of_functions]) for x,y,z in coordinates] | 4f1e97217eb3c741fbd36e6e70e99fbc13c45a70 | 113,203 |
import requests
def request_smiles(lig_id):
"""Fetch the SMILES string for a given ligand ID"""
r = requests.get(
"https://data.rcsb.org/rest/v1/core/chemcomp/{}".format(lig_id)
)
if r.status_code == requests.codes.ok:
return r.json()['rcsb_chem_comp_descriptor']['smiles']
else:
print(r.text)
return r.status_code | 862b08432af4bebb055974e7b726ceb5ea5b13cc | 113,204 |
from typing import Optional
def read_file_content(filename: str) -> Optional[str]:
"""Get the contents of a file.
Returns:
Optional[str]: The contents of the file as string, or None if the file could not be read.
"""
try:
with open(filename, 'r') as f:
return f.read().strip()
except OSError:
return None | d62fdf2ea9c02847cf1758fe7fbf2bdacd8170e2 | 113,205 |
def replace_at_offset(hdr,offset,replace=None):
""" Replace bytes at offset, either with replacement bytes or given number of zero bytes """
if (type(replace) is int):
replace = b"\x00"*replace
assert type(replace) is bytes, "Wrong input type"
assert len(hdr) >= offset, "Offset to large"
new_hdr = bytearray(hdr)
for pos,octet in enumerate(replace):
new_hdr[offset + pos] = octet
return bytes(new_hdr) | 511f87dfa26cc1bf2dec9dcb64b77959218c4456 | 113,206 |
def fill_empty_seats(seats_names_dict, seats):
"""
Fill empty seats with names 'empty'
:param seats_names_dict: seats:names dict
:param seats: list of seats that should be in the dict
:return: seats:names dict
"""
for seat in seats:
if seat not in seats_names_dict:
seats_names_dict[seat] = 'empty'
return seats_names_dict | e5ed6ae78e85238729c048456b70993d63c5477a | 113,211 |
def deep_update(d, d_update):
"""
Updates the values (deep form) of a given dictionary
Parameters:
-----------
d : dict
dictionary that contains the values to update
d_update : dict
dictionary to be updated
"""
for k, v in list(d_update.items()):
if isinstance(v, dict):
if k in d:
deep_update(d[k], v)
else:
d[k] = v
elif isinstance(d, list):
d.append({k: v})
else:
d[k] = v
return d | d9502e91e7f80eeded44ee20cd3d10c9aef76b10 | 113,213 |
def get_bafa_subs_hp(q_nom, spf):
"""
BAFA subsidy for a/w heat pumps with nominal power <= 100 kW and spf >= 3.5
heat pump must be used for:
- combined space heating and dhw
- only space heating if dhw is generated with renewable energy
- generating heat for lhn
Values only for retrofit; subsidy for new buildings is lower (not implemented)
Parameters
----------
q_nom : thermal nominal power of heat pump in kW
spf : seasonal performance factor ("Jahresarbeitszahl")
Returns
-------
subs : amount of subsidy
"""
subs = 0
# innovation subsidy
if q_nom <= 100 and spf >= 4.5:
if q_nom < 32.5:
subs = 1950
else:
subs = 60*q_nom
# base subsidy
if q_nom <= 100 and spf >= 3.5:
if q_nom < 32.5:
subs = 1300
else:
subs = 40*q_nom
return subs | 82359564ae2012fbd97365631fce8c48e6e059a3 | 113,219 |
def workflow_exists(data_base, workflow_name):
"""Check if a certain workflow exists in the DB"""
result = False
# Need at least one iteration
for path in data_base.execute("SELECT path FROM workflows WHERE workflow_name = ?;", (workflow_name,)):
result = True
return result | be62db701a19e49f8fb8357b488707f598a0dbc2 | 113,222 |
import re
def enumerated_file_names_sort(x, num_block_to_sort_by=0):
"""
Key function for 'sorted'.\n
Sorts a list of Strings according to some block of numbers in the String.\n
E.g. file names that contain some prefix and an index: [img1_0.png, img1_1.png, img1_2.png]
If the String contains several number blocks, the index of the number block to sort by can be
specified with partial. E.g. sort the example list by the index in the filename
partial(enumerated_file_names_sort, num_block_to_sort_by=1)
:param x: input of the key function
:param num_block_to_sort_by: the index of the number block to sort by
:return: the num_block_to_sort_by'th number block as Int in the String x
"""
nums = [int(s) for s in re.findall(r'\d+', x)]
return nums[num_block_to_sort_by] | 46bcd7cfc253369f296522ccc22b1c9c8f332440 | 113,234 |
def create_error(request, status, code='', title='', detail=''):
"""creates a JSON API error - http://jsonapi.org/format/#errors
"status" - The HTTP status code applicable to this problem,
expressed as a string value.
"code" - An application-specific error code, expressed as a
string value.
"title" - A short, human-readable summary of the problem. It
SHOULD NOT change from occurrence to occurrence of
the problem, except for purposes of localization.
"detail" - A human-readable explanation specific to this
occurrence of the problem.
"""
id = ''
if request:
id = request.META.get('HTTP_REQUEST_ID', '')
return {
'id': id,
'status': str(status),
'code': code,
'title': title,
'detail': detail,
} | 4a98b4b6a746c31926940cb07854e8363b1a97c1 | 113,238 |
def get_requested_formats(path):
"""Returns a list of requested formats.
The possible values are 'persons' and 'notes'."""
format = path.split('/')[-1]
if format in ['persons', 'notes']:
return [format]
return ['persons', 'notes'] | 3796f241a7a07743c7f36cbf0592a94cc70eb5af | 113,243 |
def _mat_vec_dot_fp(x, y):
"""Matrix (list of list) times vector (list)."""
return [sum(a * b for a, b in zip(row_x, y)) for row_x in x] | 9ab85561ee5eee056489324e5f5c594cb573c979 | 113,244 |
async def mock_plex_server(entry, setup_plex_server):
"""Init from a config entry and return a mocked PlexServer instance."""
return await setup_plex_server(config_entry=entry) | e1a5a810bbd918f5bc0e7cf7c5c7d62b8a6f090a | 113,246 |
import math
def _n_choose_k(n, k):
"""Computes nCk."""
return math.factorial(n) // math.factorial(k) // math.factorial(n - k) | 27fe989a5abd8e760b537db7a0a6fea11380d481 | 113,251 |
def test_plot_corner(results_to_test):
"""
Tests plot_corner() with plotting simulated posterior samples
for all 8 parameters and for just four selected parameters
"""
Figure1 = results_to_test.plot_corner()
assert Figure1 is not None
Figure2 = results_to_test.plot_corner(param_list=['sma1', 'ecc1', 'inc1', 'mtot'])
assert Figure2 is not None
return Figure1, Figure2 | 7e42f02e293ebe3f537ae8bc9f21d6de74586eb4 | 113,252 |
def get_text(raw):
"""Converts a raw bytestring to an ASCII string"""
ba = bytearray(raw)
s = ba.decode('ascii')
return s | c59824ec0ffccbeac3502f6eb72636fc9c4b1bb3 | 113,254 |
def remove_last_dir(url):
"""
remove_last_dir() removes the lowest layer of directory in the url
(e.g. /dir/folder --> /dir)
url: the url to be processed
returns: the processed url
"""
return url[:url.rfind('/')] | 41f1d40bb37a0ad6f1740b5ecffc458ecb817881 | 113,255 |
def relu(x):
"""ReLu activation function
Limits the lower range of the input to 0
Args:
x (float): input value
Returns:
float: output value
"""
return max(0, x) | e0581d60215744cf5a1f524a5598937c29a4b178 | 113,259 |
import random
def resample(a, b):
"""
Pools two lists together and randomly splits them back into two.
Example:
[a, b, c] & [1, 2, 3] -> [b, 3, 2] & [c, 1, a]
:param a: first list
:param b: second list
:return: pair of randomly mixed lists
"""
pool = a + b
random.shuffle(pool)
X, Y = pool[:int(len(pool)/2)], pool[int(len(pool)/2):]
return X, Y | 50342a94556b87c587883ec189140909a60fad68 | 113,264 |
def remove_prefix(text, prefix):
"""
A short algorithm to remove the defined word from the text
and move forward with tokenization.
Args:
text: text that to tokenize
prefix: a part of the text (= the word found in dictionary to remove)
Returns:
truncated text that doesn't contain the tokenized word in the beginning
"""
return text[text.startswith(prefix) and len(prefix):] | 36df167058ef398963421df5d86dba43ad202583 | 113,267 |
def col_data_info_schema(db, table_name):
"""Gets metadata for a PostgreSQL table's columns"""
qry = '''SELECT table_name, column_name, data_type
FROM information_schema.columns
WHERE table_name=:table_name
ORDER BY ordinal_position'''
return db.query(qry, table_name=table_name).all() | 5370a280c99eee123568a7bfd8d15169d8cdfe4e | 113,272 |
import math
def yaw_pitch_roll(q):
"""
Calculate Euler angles from pyquaternion.
Output: - `roll`: rotation about the new X-axis
- `pitch`: rotation about the new Y-axis
- `yaw`: rotation about the new Z-axis
"""
q = q.normalised
roll = math.atan2(2 * (q.w * q.x + q.y * q.z), 1 - 2 * (q.x ** 2 + q.y ** 2))
pitch = math.asin(2 * (q.w * q.y - q.z * q.x))
yaw = math.atan2(2 * (q.w * q.z + q.x * q.y), 1 - 2 * (q.y ** 2 + q.z ** 2))
return yaw, pitch, roll | 99a103cec43ad66efbece8a9d6cb334c54d2b6e2 | 113,274 |
def to_expr(term):
"""Convert to Expr from Term or Pauli operator (X, Y, Z, I).
Args:
term: (Term, X, Y, Z or I): A Term or Pauli operator.
Returns:
Expr: An `Expr` object.
"""
return term.to_expr() | 7321990378ba50e59e864324332a00ad5d87c196 | 113,280 |
def add_gaps(df):
"""Add train-test gaps to dataframe."""
for col in df.columns:
if col.startswith("train_") and col.replace("train_", "test_") in df.columns:
# gap = max(train - test, 0)
gap_col = col.replace("train_", "gap_")
df[gap_col] = df[col] - df[col.replace("train_", "test_")]
df.loc[df[gap_col] < 0, gap_col] = 0
return df | d4910afcaf9e69fb9e1c77276e43041c50e9b90c | 113,282 |
def _WarnIfGitIgnoreHasSources(input_api, output_api):
"""Warn if .gitignore has source files in it."""
for f in input_api.AffectedFiles():
if f.LocalPath().endswith('.gitignore'):
with open(f.LocalPath(), 'r') as f:
lines = f.readlines()
bad_lines = [l.strip() for l in lines if l.strip().endswith(('.c', '.h'))]
if not bad_lines:
break
return [
output_api.PresubmitError('\n'.join([
'.gitignore contains source files which may be needed for building, ',
'please remove the .gitignore entries for the following lines:',
'\n ' + ' \n'.join(bad_lines)
]))
]
return [] | eea418f76bdafbe1e06b9e66aa468fa0d12a628c | 113,285 |
def SfromL( L, nmax=25, epsilon= 2**(-50) ):
"""
Compute sequence of generalized inverses from given Schroder value L.
Args:
L (real): main arg, range 0.0 to 1.0
nmax (integer): default 22. Max length to allow for S.
epsilon (real): smallest change in L you don't care about.
Returns:
Sequence as list of integers
Normally epsilon should be 2 ** -(number of significant bits in L), and for
IEEE 64-bit that's 52 bits (the rest being sign and exponent).
Fearing trouble with round-offs, I set the default to 50 bits.
If you're using some alternative form of real numbers, say a fixed point
format built for a fixed range like -1 to 1, or 0 to 1, then set epsilon
to something suitable for that type.
"""
# Prevent accidental use of negative L, or L too close to zero
# which can lead to infinite loop
if L<1e-22: # 1e-22 is a guess; no real thinking was done
return [73]
S = []
while len(S) <= nmax:
count = 0
while L < 0.5:
L = 2.0*L
count +=1
S.append( count)
if count > 52:
break;
if abs(L-0.5) < epsilon:
break
L = 1-L
if L<1e-22:
break
return S | 3741751c12219483abb75a987ef605e0ff9bf16e | 113,290 |
import six
def to_unicode(value):
"""
Ensure that the provided text value is represented as unicode.
:param value: Value to convert.
:type value: ``str`` or ``unicode``
:rtype: ``unicode``
"""
if not isinstance(value, six.string_types):
raise ValueError('Value "%s" must be a string.' % (value))
if not isinstance(value, six.text_type):
value = six.u(value)
return value | 543b3f62babab72e32a1fadee4bcebe592775187 | 113,295 |
import ast
def _evaluate_expression(expression, variable_map):
"""Evaluate a python expression.
The expression must be able to be evaluated as a python expression.
Args:
expression (string): A string expression that returns a value.
variable_map (dict): A dict mapping string variable names to their
python object values. This is the variable map that will be used
when evaluating the expression.
Returns:
Whatever value is returned from evaluating ``expression`` with the
variables stored in ``variable_map``.
"""
# __builtins__ can be either a dict or a module. We need its contents as a
# dict in order to use ``eval``.
if not isinstance(__builtins__, dict):
builtins = __builtins__.__dict__
else:
builtins = __builtins__
builtin_symbols = set(builtins.keys())
active_symbols = set()
for tree_node in ast.walk(ast.parse(expression)):
if isinstance(tree_node, ast.Name):
active_symbols.add(tree_node.id)
# This should allow any builtin functions, exceptions, etc. to be handled
# correctly within an expression.
missing_symbols = (active_symbols -
set(variable_map.keys()).union(builtin_symbols))
if missing_symbols:
raise AssertionError(
'Identifiers expected in the expression "%s" are missing: %s' % (
expression, ', '.join(missing_symbols)))
# The usual warnings should go with this call to eval:
# Don't run untrusted code!!!
return eval(expression, builtins, variable_map) | 359f6d1efa7b9d0fa82eb6ec87494e1ef3fe8d1e | 113,306 |
def canon_pairwise_tag(tag: str) -> str:
"""
Canonicalize pairwise tag to specify unencrypted storage.
:param tag: input tag
:return: tag prefixed with '~' if not already
"""
return '{}{}'.format('' if str(tag).startswith('~') else '~', tag) | 22f0c3442f3077a92cc5c5945ce944c7f951baa3 | 113,311 |
def getIndexes(string1,string2):
"""checks if string2 is present in string1 and returns
all the positions at which string2 occurs in string1"""
ret = []
ind = string1.find(string2)
while (ind > -1 and ind < len(string1)):
ret.append(ind)
ind = string1.find(string2,ind + 1)
return ret | bc4c79359a88d60fd811bd43e2009ba6e75b0075 | 113,313 |
import re
def convert_operation_name_to_task_id(operation_name):
"""Converts an Operation name to a task ID."""
found = re.search(r'^.*operations/(.*)$', operation_name)
return found.group(1) if found else operation_name | 8ed685ee29a72a06294a7ce4ce784a3f214ffd90 | 113,314 |
import math
def cie76(c1, c2):
"""
Color comparision using CIE76 algorithm.
Returns a value between 0 and 100.
Where 0 is a perfect match and 100 is opposing colors.
http://zschuessler.github.io/DeltaE/learn/
LAB Delta E - version CIE76
https://en.wikipedia.org/wiki/Color_difference
E* = 2.3 corresponds to a JND (just noticeable difference)
"""
return math.sqrt(
math.pow(c2[0] - c1[0], 2) +
math.pow(c2[1] - c1[1], 2) +
math.pow(c2[2] - c1[2], 2)
) | c28f0f9a65a5571c2f91f0e76d920910bf354282 | 113,318 |
from typing import Union
from typing import Dict
from typing import Optional
def get_prefixed_name(
qname: str, namespaces: Union[Dict[str, str], Dict[Optional[str], str]]) -> str:
"""
Get the prefixed form of a QName, using a namespace map.
:param qname: an extended QName or a local name or a prefixed QName.
:param namespaces: a dictionary with a map from prefixes to namespace URIs.
"""
try:
if qname[0] == '{':
ns_uri, local_name = qname[1:].split('}')
elif qname[1] == '{' and qname[0] == 'Q':
ns_uri, local_name = qname[2:].split('}')
else:
return qname
except IndexError:
return qname
except (ValueError, TypeError):
raise ValueError("{!r} is not a QName".format(qname))
for prefix, uri in sorted(namespaces.items(), reverse=True,
key=lambda x: x if x[0] is not None else ('', x[1])):
if uri == ns_uri:
return '%s:%s' % (prefix, local_name) if prefix else local_name
else:
return qname | 68088bfb021d81c57dbc6a36aecccead43faf3a7 | 113,319 |
import typing
import inspect
def tidy_fn_call(fn: typing.Callable, args: typing.Iterable, kwargs: dict):
"""
Put args to kwargs if they matches.
This works like a function call parser.
Parameters
----------
fn : typing.Callable
The function to be called (it won't be called).
args : typing.Iterable
The positional arguments.
kwargs : dict
The keyword arguments.
Returns
-------
tuple
args, kwargs
"""
# we have to assign the stuff in `args` to `kwargs`
args = list(args)
params = inspect.signature(fn).parameters
for name, param in params.items():
if not any(args):
break # no more pos args to process
if name in kwargs:
continue # already specified in kws
# confirmed
kwargs[name] = args.pop()
# 1. no one wants None stuff
# 2. cvt obj to dict
kwargs = {
k: v # if isinstance(v, (str, list, dict)) else to_dict(v)
for k, v in kwargs.items()
if v
}
return args, kwargs | 6acedf601f3d19b84d49c48b65b0ba2b0cd1d9b4 | 113,324 |
import random
def sim_point(p_s: float) -> bool:
"""Simulate point in tennis by drawing from uni dist
Args:
p_s (float): probability server wins point
Returns:
bool: True if server won, False if not
"""
return random.uniform(0, 1) <= p_s | 859c9ad25df510e3cbfdb70e797f5c0b5c7a54c8 | 113,325 |
def transform(df):
"""Fill nulls with 0, sum 10 to Age column and only return distinct rows"""
df = df.na.fill(0)
df = df.withColumn('Age', df['Age'] + 10)
df = df.distinct()
return df | ee1fbde7a45c9c5a51b5fcbd7311b040843e7254 | 113,328 |
def std_scalar(comment, valueType='integer', option=0, **kwargs):
"""Description for standard scalar column."""
return dict(comment=comment, valueType=valueType, dataManagerType='StandardStMan',
dataManagerGroup='StandardStMan', option=option, maxlen=0, **kwargs) | 30f9d55237dbe73192f19df84162580626773e03 | 113,334 |
def table_with_9999_columns_10_rows(bigquery_client, project_id, dataset_id):
"""Generate a table of maximum width via CREATE TABLE AS SELECT.
The first column is named 'rowval', and has a value from 1..rowcount
Subsequent columns are named col_<N> and contain the value N*rowval, where
N is between 1 and 9999 inclusive.
"""
table_id = "many_columns"
row_count = 10
col_projections = ",".join(f"r * {n} as col_{n}" for n in range(1, 10000))
sql = f"""
CREATE TABLE `{project_id}.{dataset_id}.{table_id}`
AS
SELECT
r as rowval,
{col_projections}
FROM
UNNEST(GENERATE_ARRAY(1,{row_count},1)) as r
"""
query_job = bigquery_client.query(sql)
query_job.result()
return f"{project_id}.{dataset_id}.{table_id}" | a37d5bf66cf5cf0e300c1d1209aabc0b0cf2e670 | 113,337 |
def cut_groups(data, col, cutoffs):
"""Cut data into subsets according to cutoffs
Parameters
----------
data : pandas.DataFrame
Data to split.
col : str
Name of column in data to compare with.
cutoffs : list(int)
List of cutoffs, like as [min-value, 30, 60, max-value].
Returns
-------
list(pandas.DataFrame)
List of sub-data as DataFrame.
Examples
--------
>>> cut_groups(data, "X", [0, 0.4, 0.6, 1.0])
[pandas.DataFrame, pandas.DataFrame, pandas.DataFrame]
"""
res = []
N = len(cutoffs)
for i in range(N - 1):
if i == N - 2:
df = data[(data[col] >= cutoffs[i]) & (data[col] <= cutoffs[i+1])]
else:
df = data[(data[col] >= cutoffs[i]) & (data[col] < cutoffs[i+1])]
res.append(df)
return res | a80b7833c60683e9e9fd7bd310c3823f16bb440c | 113,338 |
def match(pattern, address):
"""
Match ip address patterns.
This is not regex.
A star at the end of a ip address string does prefix matching.
None or a blank string matches any address
match('192.168.0.1', '192.168.0.1') == True
match('192.168.0.2', '192.168.0.1') == False
match('192.168.*', '192.168.23.56') == True
match('192.168.0.*', '192.168.0.1') == True
match('192.168.0.*', '192.168.0.35') == True
match('193.*', '192.168.0.1') == False
:param pattern: pattern to match against.
:param address: the address to check
:return: True if the address matches the pattern.
"""
if not pattern or pattern == "":
return True
if pattern.endswith('*'):
return address.startswith(pattern[:-1])
else:
return pattern == address | 5943909fe6c83163700e40c18c9d5e6750764859 | 113,342 |
def is_prime(num : int) -> bool:
"""
Checks if a number is prime or not.
Parameters:
num: the number to be checked
Returns:
True if number is prime, otherwise False
"""
flag : bool = True
if num <= 0:
raise ValueError("Input argument should be a natural number")
elif num == 1:
flag = False
else:
# check for factors
for i in range(2, num):
if (num % i) == 0:
flag = False
break
return flag | 6046bf286d3f84a5a26d5f60c44344e244ac6361 | 113,349 |
def rename(row, renaming):
"""Rename keys in a dictionary.
For each (k,v) in renaming.items(): rename row[k] to row[v].
"""
if not renaming:
return row
for k,v in renaming.items():
row[v] = row[k]
del row[k] | 7fba03a0133aa46c1ea46770c467ab371c67e355 | 113,351 |
def make_observer(world, team, name='ATOMIC'):
"""
:param world: the PsychSim World
:param team: names of agents to be modeled
:type team: List[str]
:param name: name of the agent to be created, defaults to 'ATOMIC'
:type name: str
:return: the newly created agent, also added to the given World
:rtype: Agent
"""
agent = world.addAgent(name, avoid_beliefs=False)
return agent | 43a47cf1deba502704f19f8496df69bf2a0775f2 | 113,353 |
def remove_number(words):
"""remove numbers"""
new_words = []
for word in words:
if not word.isdigit():
new_words.append(word)
return new_words | 1da4c97240780b5e7cbd3650cf49a20052f61dad | 113,359 |
def is_container(obj):
"""
Test if an object is a container (iterable) but not a string
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str) | 8c9089f46333ad25b223e4f17d63ff3c9be37557 | 113,360 |
def duplicated_rows(data):
"""
Check if has duplicated rows in DataFrame
"""
print('Duplicated rows')
print()
print('The dataset has {} rows duplicated'.format(data.duplicated().sum()))
print('---------------------------------')
return None | e2649bac6e2dbf1604283f93ce3f3cdd131f841a | 113,364 |
def get_multiplier(factor):
"""
Convert the factor into a number.
:param factor: the string 'mb', 'm', or 'k'
:return: 10000000, 1000000, 1000 or 1
"""
if factor.lower() == 'mb':
return 10000000
elif factor.lower() == 'm':
return 1000000
elif factor.lower() == 'k':
return 1000
return 1 | ec3467eae3d8c285188c65768c05056fe0b5e7eb | 113,366 |
def intersection(collection_a, collection_b):
"""
The intersection between two collections considered
as sets (duplicated items will be removed).
"""
set_a = set(collection_a)
set_b = set(collection_b)
return set_a.intersection(set_b) | e0dd7017c7769da2aa9c043c130d7fc1a66ce080 | 113,367 |
def color_scale(color, level):
"""Scale RGB tuple by level, 0 - 255"""
return tuple([(i * level) >> 8 for i in list(color)]) | 34ae882fe11d88ba61918f8221175e8d6a10db96 | 113,372 |
def _check_child_op_type(op, child_op_type):
"""
:param op: operation
:param child_op_type: str
:return: Return True if op has 1 child and type of that child matches child_op_type
"""
if len(op.outputs) != 1:
return False
child_ops = list(op.outputs[0].child_ops)
if len(child_ops) != 1:
return False
if child_ops[0].op_type == child_op_type:
return True
return False | cd6e5f342e2d4a668fd0c127b23ae5a26b1ab39a | 113,373 |
def _get_session_id_from_cookie(request, cookie_name, cookie_signer):
"""
Attempts to retrieve and return a session ID from a session cookie in the
current request. Returns None if the cookie isn't found or the value cannot
be deserialized for any reason.
"""
cookieval = request.cookies.get(cookie_name)
if cookieval is not None:
try:
session_id = cookie_signer.loads(cookieval)
return session_id
except ValueError:
pass
return None | e88e186a7e0e38984706454b4cf9936ef1b639da | 113,376 |
from typing import Union
from pathlib import Path
import hashlib
def generate_md5(filename: Union[str, Path], blocksize: int = 2 ** 20) -> str:
""" Generates the md5sum of a file. Does
not require a lot of memory.
Parameters
----------
filename: string
The file to generate the md5sum for.
blocksize: int; default 2**20
The amount of memory to use when
generating the md5sum string.
Returns
-------
md5sum: string
The md5sum string.
"""
m = hashlib.md5()
with open(str(filename), "rb") as f:
while True:
buf = f.read(blocksize)
if not buf: break
m.update(buf)
return m.hexdigest() | d74812c47a462896f01da735e8d6a13f17d27f94 | 113,377 |
def insert_run_at_position(par, pos, txt=''):
"""Insert a new run with text {txt} into paragraph {par}
at given position {pos}.
Returns the newly created run.
"""
p = par._p
new_run = par.add_run(txt)
p.insert(pos + 1, new_run._r)
return new_run | 1ff6fe5e95f94ce3d761cb730bd656a637d4250e | 113,378 |
def _quantization_annotation(lhs_prec, rhs_prec,
rhs_is_weight):
"""Returns an annotation to be appended to the name of the quantizable op."""
bfloat16_prec = 'bf16'
def _replace_with_bf16_if_prec_is_none(prec):
return prec if prec is not None else bfloat16_prec
lhs_prec = _replace_with_bf16_if_prec_is_none(lhs_prec)
rhs_prec = _replace_with_bf16_if_prec_is_none(rhs_prec)
quant_annotation = '_lhs{}_rhs{}_lw{}'.format(lhs_prec, rhs_prec,
int(rhs_is_weight))
return quant_annotation | caeb3ac457ba5a38d12f6e98eb4dbc44f85e695e | 113,380 |
import json
def load_json_rcfile(fname):
"""Loads a JSON run control file."""
with open(fname, "r", encoding='utf-8') as f:
rc = json.load(f)
return rc | b698f06ed0cd2b0e0097d010a0199aa607c8e340 | 113,384 |
def get_cidr(netmask):
"""Convert a netmask to a CIDR."""
binary_str = ''
for octet in netmask.split('.'):
binary_str += bin(int(octet))[2:].zfill(8)
return str(len(binary_str.rstrip('0'))) | 95c98496f63daf7eb4c8c441fdfc161541d0e294 | 113,387 |
def _convert_to_seconds(time):
"""Will convert any time into seconds.
If the type of `time` is not valid,
it's returned as is.
Here are the accepted formats::
>>> convert_to_seconds(15.4) # seconds
15.4
>>> convert_to_seconds((1, 21.5)) # (min,sec)
81.5
>>> convert_to_seconds((1, 1, 2)) # (hr, min, sec)
3662
>>> convert_to_seconds('01:01:33.045')
3693.045
>>> convert_to_seconds('01:01:33,5') # coma works too
3693.5
>>> convert_to_seconds('1:33,5') # only minutes and secs
99.5
>>> convert_to_seconds('33.5') # only secs
33.5
:param time: time_string
:type time: string
:return: time in seconds
:rtype: float
"""
factors = (1, 60, 3600)
if isinstance(time, str):
time = [float(part.replace(",", ".")) for part in time.split(":")]
if not isinstance(time, (tuple, list)):
return time
return sum(mult * part for mult, part in zip(factors, reversed(time))) | a5dba1cabec8c4133ee5b04e26ffc15844f8cf5a | 113,389 |
def maybe_green(app, callable, *args, **kwargs):
"""Run a ``callable`` in the green pool if needed
:param app: lux application
:param callable: callable to execute
:param args:
:param kwargs:
:return: a synchronous or asynchronous result
"""
pool = app.green_pool
if pool:
return pool.submit(callable, *args, **kwargs)
else:
return callable(*args, **kwargs) | a2552fe2cf09b2693dde90073ce6ce7837270e5a | 113,392 |
def powmod(x, r, n):
"""Computes (x ** r) % n
Args:
x (int): Base
r (int): Power
n (int): Modulo
Returns:
int: (x ** r) % n
"""
return pow(x, r, n) | c6f99f377a04b9a396728221fdd83caf391a13f2 | 113,396 |
def row_to_dict(row):
"""Converts a Row object into a dictionary."""
return dict(row) | 9d9cc7ed000973c98e924fdc74618264f12e7ede | 113,398 |
import collections
def transpose_dict_with_sets(dict_in):
"""
Given a mapping X: key -> set of values, produce a mapping Y of the same type, where
for every combination of a, b for which a in X[b], the following holds: b in Y[a].
"""
result = collections.defaultdict(set)
for key, values in dict_in.items():
for val in values:
result[val].add(key)
return result | 0ef60f28d88818fb40addec8504a56e8de54d1c9 | 113,402 |
def _LinearInterpolate(x0, target, x1, y0, y1):
"""Perform linear interpolation to estimate an intermediate value.
We assume for some F, F(x0) == y0, and F(x1) == z1.
We return an estimate for what F(target) should be, using linear
interpolation.
Args:
x0: (Float) A location at which some function F() is known.
target: (Float) A location at which we need to estimate F().
x1: (Float) A second location at which F() is known.
y0: (Float) The value of F(x0).
y1: (Float) The value of F(x1).
Returns:
(Float) The estimated value of F(target).
"""
if x0 == x1:
return (y0 + y1) / 2
return (y1 - y0) * (target - x0) / (x1 - x0) + y0 | f8d9d431de42377ecb12d027a75af2578a696084 | 113,403 |
def num_conv(number, plural=False):
"""Converts card numbers into their proper names
Args:
number (int): The number intended to be converted
plural (bool): Determines if the result should be in plural or single form
Returns:
The proper name to be used for the card number
"""
if plural:
plural_s = "s"
else:
plural_s = ""
if number == 1 or number == 14:
number_string = f"Ace{plural_s}"
elif number == 11:
number_string = f"Jack{plural_s}"
elif number == 12:
number_string = f"Queen{plural_s}"
elif number == 13:
number_string = f"King{plural_s}"
else:
number_string = f"{str(number)}{plural_s}"
return number_string | 104af39d920c92610be2f6a571c5139faaa6c972 | 113,405 |
def reverse_word(string,
word_start,
word_end):
"""
reverses a sub string from start index to end index in place.
"""
# this whole function could be replaced by splicing
# see the function below
start = word_start
end = word_end
if end - start > 1:
while start >= word_start and end <= word_end and start < end:
string[start], string[end] = string[end], string[start]
start += 1
end -= 1
return string | 99d55b9ab5eb26b4fea0ee5e23c555b6580fe518 | 113,406 |
def pairwise(iterable):
"""
Splits a list of items into pairs: s -> (s0,s1), (s2,s3), (s4, s5), ...
"""
a = iter(iterable)
return zip(a, a) | 77d75a7cec7bca10e7a8d6c5d4de29e61abe6289 | 113,407 |
def fib(n):
"""
Recursively compute the fibonacci sequence
"""
if n<=1:
return n
else:
return fib(n-1)+fib(n-2) | f4220dd63b5352bfdbad2a377597f6e85529fd04 | 113,410 |
from pathlib import Path
from typing import Any
import json
def templatevariable(template: Path, name: str) -> Any:
"""Return the value of a template variable."""
path = template / "cookiecutter.json"
data = json.loads(path.read_text())
return data[name] | 52234e4dec353ffa6f63fc9c69a2c9aad5f72c4e | 113,411 |
def user_input_validator(validator, question): # pragma: no cover
"""Validate a user input until it passes a validator function.
validator: function, test for user input. Returns a truthy
value for a valid input, falsey for invalid.
question: str, prompt for user to answer
"""
user_input = input(question).upper()
while not validator(user_input) and user_input.upper() != 'Q':
print('Invalid user input, input again\n')
user_input = input(question).upper()
return user_input | 95d54e223ce1023e5a5ebc2b8c28df82b648e8ad | 113,418 |
from typing import Union
def uniprot_name_to_id(name_to_id_map: dict, name: str) -> Union[str, None]:
"""Uniprot name to ID mapping
:param name_to_id_map: mapping dict[name] -> id
:param name: name
:return: id string, or None
"""
if name in name_to_id_map:
return name_to_id_map[name]
else:
return None | e92602c8c54f7aa6660dbd261820cc6bf81dc612 | 113,420 |
def match(first, second):
"""
Tell if two strings match, regardless of letter capitalization
input
-----
first : string
first string
second : string
second string
output
-----
flag : bool
if the two strings are approximately the same
"""
if len(first) != len(second):
return False
for s in range(len(first)):
fir = first[s]
sec = second[s]
if fir.lower() != sec and fir.upper() != sec:
return False
return True | f8a2668e49117a204b08125a29c794880f170922 | 113,422 |
def get_bcl2fastq_read_type_map(read_info, sample_index_read=None, dual_indexed=False, ignore_dual_index=False):
"""
Get a mapping between ReadInfo read name (R1,I1,I2,R2) and bcl2fastq
output file naming (R1/R2/R3/R4/I1)
The guarantee here is that the 10X sample index will always be on I1,
if generated. If dual-indexing is specified, the secondary index will
be on I2. Upstream pipestances can expect read1 to be in R1, sample
indexes to be on I1, and read2 to be on R2.
:param read_info: The ReadInfo block from RunInfo.xml
:param sample_index_read: The ReadInfo read (I1, I2) to use as the sample index
:param ignore_dual_index: Whether the dual index was ignored (and thus not sequenced)
"""
#read_names = [r["read_name"] for r in read_info]
read_map = {}
reads_counted = 0
for idx, r in enumerate(read_info):
read_name = r["read_name"]
if read_name == sample_index_read:
read_map[read_name] = 'I1'
elif ignore_dual_index and r["index_read"]:
# we didn't read this index -- see make_bases_mask_val
continue
elif dual_indexed and r["index_read"]:
read_map[read_name] = 'I2'
else:
reads_counted += 1
read_map[read_name] = 'R%d' % reads_counted
return read_map | 236840aa03d0e7c5528d433a307189c60d1dc7ee | 113,423 |
def is_true(val):
"""Takes an input value and returns bool value.
Args:
val (object): any python object
Returns:
bool: bool(val)
"""
return bool(val) | 25bb984446b3a997de02d116978d59862bacce61 | 113,426 |
from collections import defaultdict
def dict_with_lengths(words):
"""
The function takes a list of words as its argument.
Returns a dict with a length as key and a list of words with that length as value.
For example,
INPUT: ["apple", "ball", "cat", "dog", "egg", "fruit"]
OUTPUT: {3: ["ball", "cat", "dog"], 4: ["ball"], 5: ["apple", "fruit"]}
"""
"""
This solution can be gradually improved from the original solution you had.
# SOLUTION 1
answer = dict()
for word in words:
key = len(word)
key_words = answer.get(key, [])
key_words.append(word)
answer[key] = key_words
# SOLUTION 2
answer = dict()
for word in words:
key = len(word)
answer.setdefault(key, [])
answer[key].append(word)
"""
# SOLUTION 3
answer = defaultdict(list)
for word in words:
key = len(word)
answer[key].append(word)
return answer | 96661e349046aec30439bd8db39baf359495ec75 | 113,433 |
def energy_overlap(sp1, sp2):
"""
Calculate the overlap energy range of two spectra, i.e. lower bound is the
maximum of two spectra's minimum energy.
Upper bound is the minimum of two spectra's maximum energy
Args:
sp1: Spectrum object 1
sp2: Spectrum object 2
Returns:
Overlap energy range
"""
overlap_range = [max(sp1.x.min(), sp2.x.min()), min(sp1.x.max(),
sp2.x.max())]
return overlap_range | 4e0f08f3be1e7c8fc39805bc9d16319c1f61f3a4 | 113,435 |
def get_qid(url):
"""
Returns the Qid from the entity url
"""
return url.split('/')[-1] | 3601a8c7ba096cf2accd5e93325880b4faffb37b | 113,437 |
def dict_to_opts(d):
""" We assume that if a value is None then k is a flag and should be appended with no value"""
opts = list()
for k, v in d.items():
opts.append(f"--{k}")
if type(v) is list:
for sub_v in v:
opts.append(str(sub_v))
elif v is None:
pass # k is a flag
else:
opts.append(str(v))
return opts | f9b7e1f4f18122048f88f40dcb3a5d8617f658c3 | 113,450 |
def _open_to_close_tag(tag):
"""
Given an opening xml tag, return the matching close tag
eg. '<YAMAHA_AV cmd="PUT"> becomes </YAMAHA_AV>
"""
index = tag.find(' ')
if index == -1:
index = len(tag) - 1
return '</' + tag[1:index] + '>' | 4ed4e4e98ce37ab6dc7cf37246fc58284d0d1cac | 113,455 |
def pairs(seq):
"""Return a sequence in pairs.
Arguments:
seq (sequence): A sequence with an even number
of elements. If the number is uneven, the
last element will be ignored.
Returns:
A zip object with tuple pairs of elements.
Example:
>>> list(pairs([1,2,3,4]))
[(1, 2), (3, 4)]
"""
return zip(*[iter(seq)]*2) | f97c2954cf2e64990fa30ec8a318e3c618c9b889 | 113,460 |
import pathlib
def get_pyproject_path(base_dir=pathlib.Path(".")) -> pathlib.Path:
"""
Get the path to the pyproject.toml file in the given `base_dir` (which
defailts to current directry)
"""
return base_dir / "pyproject.toml" | c2d1834dc840c2700dafe4ab44190791e9c2b4e3 | 113,462 |
def value_key(cell_letter, cell_number):
""" Returning a Cell Row combination to use as a key"""
return '{cell_letter}{cell_number}'.format(cell_letter = cell_letter,
cell_number = cell_number) | c9d19ccb6116455ce0f18e21f16e6e0043938487 | 113,463 |
def make_experiment_name(experiment):
"""Create a readable name containing the name and value of the variables."""
args = []
for name, value in experiment.items():
if isinstance(value, float):
args.append("%s=%.4g" % (name, value))
else:
args.append("%s=%s" % (name, value))
return ';'.join(args) | 371fdb9ff605bbbe27a81a0136bd184a7828a0ba | 113,464 |
def get_books_by_title(args, books):
"""
Get books whose titles contains the arguments
:param args: args object containing all arguments
:param books: A list of book objects read from csv file
:return: A list of book objects whose title matches the arguments.
"""
if not args.title:
return None
title_book_list = []
for arg in args.title:
for book in books:
if arg.lower() in book.title.lower():
if not book in title_book_list:
title_book_list.append(book)
return title_book_list | c34ccc79b6757e906a07836bbd43ac28d8b2eb97 | 113,469 |
import math
def area_of_pixel(pixel_size, center_lat):
"""Calculate m^2 area of a wgs84 square pixel.
Adapted from: https://gis.stackexchange.com/a/127327/2397
Args:
pixel_size (float): length of side of pixel in degrees.
center_lat (float): latitude of the center of the pixel. Note this
value +/- half the `pixel-size` must not exceed 90/-90 degrees
latitude or an invalid area will be calculated.
Returns:
Area of square pixel of side length `pixel_size` centered at
`center_lat` in m^2.
"""
a = 6378137 # meters
b = 6356752.3142 # meters
e = math.sqrt(1 - (b/a)**2)
area_list = []
for f in [center_lat+pixel_size/2, center_lat-pixel_size/2]:
zm = 1 - e*math.sin(math.radians(f))
zp = 1 + e*math.sin(math.radians(f))
area_list.append(
math.pi * b**2 * (
math.log(zp/zm) / (2*e) +
math.sin(math.radians(f)) / (zp*zm)))
return abs(pixel_size / 360. * (area_list[0] - area_list[1])) | 8fba09f3a2e5ef183ec7e79dde60a3cd4396a182 | 113,473 |
def make_shard_endpoints(total_length, shard_size=int(1e6)):
"""
Partition the half-open integer interval [0, total_length) into a
sequence of half-open subintervals [s0,e0), [s1,e1), ... [s_n, e_n)
such that s0 = 0, s_(k+1) = e_k, e_n = total_length, and each of these
subintervals (except possibly the last) has length equal to the given
shard_size. Return the sequence of pairs of endpoints of the
subintervals.
"""
shard_end = 0
shards = []
while True:
shard_start = shard_end
shard_end = shard_start + shard_size
if shard_end > total_length:
shard_end = total_length
if shard_start >= shard_end:
break
shards.append((shard_start, shard_end))
return shards | 4104d5bd6f7f9957e4abcbb4819bc4e82b2e4d95 | 113,475 |
def get_giturl_from_url(url) :
"""extracts the actual git url from an url string
(splits off the branch name after the optional '#')
:param url: an url string, with optional '#' branch name appended
:returns: the actual git url
"""
return url.split('#')[0] | fd7876398096e7c3d86e479ee520f8f756709a54 | 113,477 |
def get_required_key(key, config):
"""
Standardizes error checking for loading a key from config
"""
try:
return config[key]
except KeyError:
raise ValueError(
'"{}" is a required key in your config file.'.format(key)
) | 8df407f21cf60246e0ca68754ff1769711f3f80e | 113,478 |
def getObjectPath(obj):
""" Return the path of this object
"""
return obj.getContext().getPathName()+"/"+obj.name | 7d04d9a75c1f28f912c23585eda0103342fb0e56 | 113,495 |
def rk4(ode,y,t,h):
"""
ode ... ordinary differential equation to solve
y ... A vector fucntion containing D and dD/dt
t ... time to integrate over
h ... integration step
Method using here is the classic Runge-Kutta (4-th order), acquired L10.
"""
k1 = h * ode(t, y)
k2 = h * ode(t+h/2., y+k1/2.)
k3 = h * ode(t+h/2., y+k2/2.)
k4 = h * ode(t+h, y+k3)
return k1/6. + k2/3. + k3/3. + k4/6. | 4e192ccd4a35ffadb20e7f7ea1bddadb434a2cf3 | 113,496 |
import pathlib
from typing import Optional
from typing import List
def files_to_list(file_path: pathlib.Path, separator: Optional[str] = '|') -> List[pathlib.Path]:
"""Returns list of file path, which are listed in input file (one file path at a line).
Args:
file_path: Path to the input file.
separator: Each line from the input file will be split at this separator. The first (0 index) item will
be treated as an audio file path. If None, lines will note be split (the whole line is an audio file path)
Returns: list of file paths
"""
from_dir = file_path.parent
with file_path.open() as f:
if separator is not None:
file_lines = [line.split('|')[0].strip() for line in f.readlines()]
else:
file_lines = [line.strip() for line in f.readlines()]
file_paths = [from_dir / file_path for file_path in file_lines]
return file_paths | cc8ede8117c66d40ced0efe6aed42ea0df2a4575 | 113,497 |
def union_dicts(d1, d2):
"""Union dictionnaries.
{1 : 2, 3 : 4} and {1 : 4, 8 : 6} ==> {8: [6], 1: [2, 4], 3: [4]}"""
df = dict()
for k in d1:
df[k] = list([d1[k]])
for k in d2:
if k in df:
df[k].append(d2[k])
else:
df[k] = list([d2[k]])
return df | 4286c9a11f587b31628146838197db8f92e79c91 | 113,498 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.