content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def clip(value: float, min_value: float, max_value: float) -> float:
"""Clip a value to the given range."""
if value < min_value:
return min_value
if value > max_value:
return max_value
return value | a495639ae870f5ed0949dff37c5ef22b621bc89d | 117,098 |
def get_max_ts(ts1, ts2):
"""Determine max of two timestamps, allowing for nulls."""
if ts1 and ts2:
return max(ts1, ts2)
elif ts1:
return ts1
else:
return ts2 | 2a26206b268019288dd79096677c67c8ba8d411d | 117,099 |
from pathlib import Path
from typing import Tuple
def check_for_file_overwrite(file: Path, default_yes: bool = True) -> Tuple[bool, bool]:
"""
Check if a file exists and prompt the user for overwrite
"""
exists = False
abort = False
if file.exists() is True:
abort = True
exists = True
resp: str
if default_yes is True:
resp = input(f"The file {file} already exists. Overwrite? [Y/n)")
if resp in ["", "Y", "y", "yes"]:
abort = False
else:
resp = input(f"The file {file} already exists. Overwrite? [y/N)")
if resp in ["Y", "y", "yes"]:
abort = False
return abort, exists | be982a72c2beafa2bed720cd85d73102dee6f230 | 117,103 |
import colorsys
def rgb_to_hsv(rgb):
"""Convert an RGB tuple to an HSV tuple for the same color. Both tuples should obey PIL rules,
e.g. have 3 integers each ranging 0-255.
Args:
rgb (Tuple[ :obj:`int` ]): RGB tuple to convert to HSV.
Returns:
Tuple[ :obj:`int` ]: HSV tuple.
"""
r, g, b = rgb
fr, fg, fb = r / 255, g / 255, b / 255
fh, fs, fv = colorsys.rgb_to_hsv(fr, fg, fb)
h, s, v = round(fh * 255), round(fs * 255), round(fv * 255)
return h, s, v | 98ff0edbf803b25ea365eec30e9de843ba59c915 | 117,111 |
def is_field(field):
"""Check if the given object is a valid field
A field is valid if it has at least a name and a value
attribute/property.
"""
for attr in('name', 'value'):
if not hasattr(field, attr):
return False
if field.name is None or field.value is None:
return False
if not isinstance(field.name, str):
return False
return True | 87a5474f09c80d9b68d006eba28abeef448b775a | 117,115 |
import json
import logging
def load_session(file_name, program):
"""
Load and parse the Sublime session file provided, returning back a tuple
containing the overall session file and the recent items. The tuple
contains None if there are errors loading or parsing the session file.
"""
try:
with open(file_name, encoding="utf-8") as file:
session = json.load(file)
if program == "text":
items = session["workspaces"]["recent_workspaces"]
folders = session["folder_history"]
files = [w["file_history"] for w in session["windows"] ]
files.append(session["settings"]["new_window_settings"]["file_history"])
else:
items = session["recent"]
folders = None
files = None
return (session, items, folders, files)
except FileNotFoundError:
logging.exception("Unable to locate session file")
except ValueError:
logging.exception("Session file could not be parsed; invalid JSON?")
except KeyError:
logging.exception("Session file could not be parsed; invalid format?")
return (None, None, None, None) | f4c34016361f2e38741be4a8d2f86cfd60259b05 | 117,116 |
def get_home_road(game, event):
"""
Determines whether specified game and event combination is related to the
home or the road team.
"""
if event['data']['team'] == 'home':
return game['home_abbr'], 'home'
else:
return game['road_abbr'], 'road' | db60ab06344326004a6084acdc744fc0ec0cf68d | 117,117 |
import re
def remove_fabrikate_prefix(components):
"""Remove the fabrikate prefix from the Component names."""
for component in components:
component.name = re.sub('^fabrikate-', '', component.name)
return components | b7ae1dc6e7dc23a82929a4ffc1e5c45c3ce622f1 | 117,119 |
def ex_obj_to_inq(objType):
"""
Return the ex_inquiry string corresponding to the specified objType.
This can be passed to the ex_inquiry_map() function to get the number of
objects of the specified objType
"""
entity_dictionary = {
'EX_ASSEMBLY': 'EX_INQ_ASSEMBLY',
'EX_BLOB': 'EX_INQ_BLOB',
'EX_EDGE_BLOCK': 'EX_INQ_EDGE_BLK',
'EX_FACE_BLOCK': 'EX_INQ_FACE_BLK',
'EX_ELEM_BLOCK': 'EX_INQ_ELEM_BLK',
'EX_NODE_SET': 'EX_INQ_NODE_SETS',
'EX_EDGE_SET': 'EX_INQ_EDGE_SETS',
'EX_FACE_SET': 'EX_INQ_FACE_SETS',
'EX_ELEM_SET': 'EX_INQ_ELEM_SETS',
'EX_SIDE_SET': 'EX_INQ_SIDE_SETS',
'EX_NODE_MAP': 'EX_INQ_NODES',
'EX_EDGE_MAP': 'EX_INQ_EDGE',
'EX_FACE_MAP': 'EX_INQ_FACE',
'EX_ELEM_MAP': 'EX_INQ_ELEM',
}
return entity_dictionary.get(objType, -1) | 7ad9361cf904df52cd085e3776e47ce8d148e8ee | 117,120 |
def create_palindrome_v1(start, end):
"""
Creates a palindrome of integers starting from start, ending at end
(in the middle) All inputs are positive integers. No input validation
required.
Parameters: start, end (int), positive integers
Returns: palindrome sequence (str)
Restrictions. You should use recursion in this question.
>>> create_palindrome_v1(1, 1)
'1'
>>> create_palindrome_v1(3, 5)
'34543'
>>> create_palindrome_v1(5, 2)
'5432345'
# Add your doctests below here #
>>> create_palindrome_v1(5,6)
'565'
>>> create_palindrome_v1(5,5)
'5'
>>> create_palindrome_v1(9,1)
'98765432123456789'
"""
if start == end:
return str(end)
if start < end:
return str(start) + \
str(create_palindrome_v1(start+1, end)) + str(start)
return str(start) + str(create_palindrome_v1(start-1, end)) + str(start) | 8a083048e0bcaaa8e6eebc187a8d253118611653 | 117,122 |
import random
def rand_between(start: int, stop: int) -> int:
"""
Returns a random number between two integers.
Parameters:
start (int) : Lower limit of Random Number
stop (int) : Upper Limit of Random Number
"""
return random.randint(start, stop) | bdd942ea86224fcd4aa196652cc8ae854242603a | 117,124 |
import math
def lorentzian(x, cen=0, gamma=1, peak=None):
"""1 dimensional Lorentzian
Parameters
----------
x : array
cen : [0] center, x0
gamma : [1] half width at half maximum
peak : [None] if None, peak = 1 / (math.pi*sigma), the distribution integrate to 1
"""
if peak is None:
peak = 1.0 / (math.pi * gamma)
return peak * (1.0 / (1.0 + ((1.0 * x - cen) / gamma) ** 2)) | e9d512b6ce043fd9313a7f9b038af3d6c60f1c99 | 117,128 |
import json
def json_description(shape, **metadata):
"""Return JSON image description from data shape and other meta data.
Return UTF-8 encoded JSON.
>>> json_description((256, 256, 3), axes='YXS') # doctest: +SKIP
b'{"shape": [256, 256, 3], "axes": "YXS"}'
"""
metadata.update(shape=shape)
return json.dumps(metadata) | 1d10ed8927891315e88b37ee0cf5a050516abff5 | 117,130 |
from typing import Dict
from typing import List
def parse_tab_seperated_txt(txt_path: str) -> Dict[str, List[float]]:
"""Parses a tab seperated text file into a dictionary of lists of floats.
Args:
txt_path: The path to the text file.
Returns:
Dict[str, List[float]]: A dictionary of lists of floats.
"""
rtn = {}
with open("q4.txt") as file:
lines = file.readlines()
col_names = lines[0].split("\t")
data = lines[1:]
for col in col_names:
col_data = [float(line.split("\t")) for line in data] # type: ignore
rtn[col] = col_data
return rtn | 5bafc1d6a23ecc31cd332c2f325a4a9c5723af98 | 117,131 |
from typing import Dict
def get_column_formats_by_sheet_name() -> Dict[str, Dict[str, Dict[str, str]]]:
"""
Get the Excel cell formats for each column in each of the sheets in the output XLSX file.
:return: Nested dictionary of Excel cell formats.
"""
# Cell formats
format_str: Dict[str, str] = {}
format_int: Dict[str, str] = {"num_format": "#0"}
format_float2: Dict[str, str] = {"num_format": "#,##0.00"}
format_float4: Dict[str, str] = {"num_format": "#,####0.0000"}
# Column formats by sheet name
column_formats_by_sheet_name = {
"mouse_grouping": {
"group": format_int,
"mouse_id": format_int,
"tumor_size": format_float2,
},
"group_statistics": {
"group": format_int,
"num_mice_in_group": format_int,
"mouse_ids_in_group": format_str,
"tumor_size_mean": format_float2,
"overall_mean_diff": format_float4,
},
}
return column_formats_by_sheet_name | d2c5070ecdd7b5a5f720c03d1e8257cc6dfd1582 | 117,146 |
import yaml
def load_config(config_file):
"""Open the YAML configuration file given in parameter"""
with open(config_file, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
print("[Error] - Error while opening the YAML conf file: {}".format(exc)) | 12d0cdb358682d8cae27af2e345ddb9f1b6db191 | 117,149 |
def get_duration(num_samples, sample_rate):
"""
Gets the duration of some consecutive samples in seconds.
The duration of consecutive samples is defined as the number of
samples times the sample period.
:Parameters:
num_samples : nonnegative number
the number of samples.
sample_rate : positive number
the sample rate in Hertz.
"""
return num_samples / sample_rate | 77207b906004dd1abef9664433659856531bd8f3 | 117,153 |
def split_remove_add(item_list):
"""Split one list of items into two: items to add and items to remove."""
list_to_add = []
list_to_remove = []
for item in item_list:
if item.startswith('-'):
list_to_remove.append(item[1:])
else:
list_to_add.append(item)
return list_to_add, list_to_remove | 2621472e4246c28c647d049591a6634035e888fe | 117,154 |
import requests
def download_shoptet_catalog(catalog_url):
"""
Downloads the catalog CSV from Shoptet, decodes it and returns the string.
"""
response = requests.get(catalog_url)
return response.content.decode('cp1250') | db990758765b1a4d1992f2a61cd18bd81ad1078d | 117,156 |
from datetime import datetime
def share_same_hour(d1: datetime, d2: datetime) -> bool:
"""
Returns True if both datetimes share the same hour and date
"""
h1 = d1.replace(microsecond=0, second=0, minute=0)
h2 = d2.replace(microsecond=0, second=0, minute=0)
return h1 == h2 | 2103834b53039dea48535dca95515790c20ec9bc | 117,159 |
def MREF(mft_reference):
"""
Given a MREF/mft_reference, return the record number part.
"""
return mft_reference & 0xFFFFFFFFFFFF | 94b713a9f01c754568e0bc5a719c5b48768fe6fc | 117,162 |
def clean_filename(filename):
""" Remove leading path and ending carriage return from filename.
"""
return filename.split('/')[-1].strip() | 67cec1175db40956613368fda0dfabc3b875a202 | 117,172 |
def decode_predictions(predictions, top=3, dictionary=None):
"""
Args:
predictions: A batched numpy array of class prediction scores (Batch X Predictions)
top: How many of the highest predictions to capture
dictionary: {"<class_idx>" -> "<class_name>"}
Returns:
A right-justified newline-separated array of the top classes and their associated probabilities.
There is one entry in the results array per batch in the input
"""
results = []
for prediction in predictions:
top_indices = prediction.argsort()[-top:][::-1]
if dictionary is None:
result = ["Class {}: {:.4f}".format(i, prediction[i]) for i in top_indices]
else:
result = [
"{}: {:.4f}".format(dictionary.get(i, dictionary.get(str(i), "Class {}".format(i))), prediction[i])
for i in top_indices
]
max_width = len(max(result, key=lambda s: len(s)))
result = str.join("\n", [s.rjust(max_width) for s in result])
results.append(result)
return results | 59aca10e4030b177bdd8127673efa9e3d8219f9c | 117,173 |
def return_two(a, b):
"""
複数返却
Attributes:
a (int): 入力値1
b (int): 入力値2
Return:
b (int): 入力値2
a (int): 入力値1
"""
return b, a | 190c18d9f23e8c4dcb9838cf8a2183a4d7ebf057 | 117,175 |
import pickle
def load_pickle(file_path):
"""Wrapper for loading data from a pickle file.
Args:
file_path: string, path to the pickle file.
Returns:
A dictionary containing the loaded data.
"""
with open(file_path, 'rb') as handle:
data = pickle.load(handle)
return data | f6f06c75a4c5fffbc5033ae677ea98c30d92161a | 117,176 |
def expand_onnx_options(model, optim):
"""
Expands shortened options. Long names hide some part
of graphs in :epkg:`asv` benchmark. This trick converts
a string into real conversions options.
@param model model class (:epkg:`scikit-learn`)
@param optim option
@return expanded options
It is the reverse of function @see fn shorten_onnx_options.
The following options are handled:
.. runpython::
:showcode:
:warningout: DeprecationWarning
from sklearn.linear_model import LogisticRegression
from mlprodict.tools.asv_options_helper import expand_onnx_options
for name in ['cdist', 'nozipmap', 'raw_scores']:
print(name, ':', expand_onnx_options(LogisticRegression, name))
"""
if optim == 'cdist':
options = {model.__class__: {'optim': 'cdist'}}
elif optim == 'nozipmap':
options = {model.__class__: {'zipmap': False}}
elif optim == 'raw_scores':
options = {model.__class__: {'raw_scores': True, 'zipmap': False}}
else:
options = optim # pragma: no cover
return options | 6ee979e6d2df8da953d3ce68c272f08e84a91243 | 117,184 |
def filter_dict_to_mongo_query(filters):
"""Return mongo query from filters dict."""
base_uri_subquery = None
if len(filters["base_uris"]) == 1:
base_uri_subquery = str(filters["base_uris"][0])
else:
base_uris = [str(b) for b in filters["base_uris"]]
base_uri_subquery = {"$in": base_uris}
return {"base_uri": base_uri_subquery} | a2907e63200bb1a1d89c8f50251e03aa129b0cdd | 117,186 |
def upgrade(module, port_path):
""" Upgrade outdated ports. """
rc, out, err = module.run_command("%s upgrade outdated" % port_path)
# rc is 1 when nothing to upgrade so check stdout first.
if out.strip() == "Nothing to upgrade.":
changed = False
msg = "Ports already upgraded"
return (changed, msg)
elif rc == 0:
changed = True
msg = "Outdated ports upgraded successfully"
return (changed, msg)
else:
module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err) | f3efc7e650ef6e448f987b39ddebab8051a0f9a6 | 117,187 |
def make_id_filter(pdb_ids, pdb_chains):
"""
Generate a dataset filter only allowing specific PDB ID/Chains.
Parameters
----------
pdb_ids : list
List of PDB IDs to accept.
pdb_chains : list
List of PDB chains corresponding to the IDs in `pdb_ids`.
Returns
-------
Function
A function returning True for the given PDB ID/Chain and False otherwise.
"""
ids = set([f"{pid.upper()}_{chn}" for pid, chn in zip(pdb_ids, pdb_chains)])
def func(record):
return f"{record.pdb_id}_{record.pdb_chain}" in ids
return func | 3ec14d143bbbfd5e872f684bea3c4fcb83d42b08 | 117,188 |
def norm_filters(weights, p=1):
"""Compute the p-norm of convolution filters.
Args:
weights - a 4D convolution weights tensor.
Has shape = (#filters, #channels, k_w, k_h)
p - the exponent value in the norm formulation
"""
assert weights.dim() == 4
return weights.view(weights.size(0), -1).norm(p=p, dim=1) | 036c9af69aa2bf930911cdc1775fd0aafd980a93 | 117,190 |
def format_match_string(marked_chr_list):
"""
Formats list of marked characters as a string.
"""
chr_list = []
mark_prev = False
for chr, mark in marked_chr_list:
if mark != mark_prev:
chr_list.append('(' if mark else ')')
chr_list.append(chr)
mark_prev = mark
if mark_prev:
chr_list.append(')')
return ''.join(chr_list) | 63583e67fded99ef8ac9b5185232cff6672201f5 | 117,194 |
import importlib
def check_installed(package_name):
"""Attempts to import a specified package by name, returning a boolean indicating success."""
try:
importlib.import_module(package_name)
return True
except ImportError:
return False | bce0b2344694a24620db37f856b0ffdb509ad183 | 117,198 |
def suite_part_list(suite, stage):
###############################################################################
"""Return a list of all the suite parts for this stage"""
run_stage = stage == 'run'
if run_stage:
spart_list = list()
for spart in suite.groups:
if suite.is_run_group(spart):
spart_list.append(spart)
# End if
# End for
else:
spart_list = [suite.phase_group(stage)]
# End if
return spart_list | bfb72578f410218c858a25be99a9397fceb5b255 | 117,210 |
def bool_string(val):
""" String to bool parser for argparse ArgumentParser. """
if val.lower() in ['t', 'true', 'y', 'yes']:
return True
elif val.lower() in ['f', 'false', 'n', 'no']:
return False
else:
raise Exception('Please use t/true or f/false for boolean parameters.') | 11fed62b72faea86217707a2579e131d8dae0d80 | 117,211 |
def paste_js(clipboard):
"""Paste the string ``clipboard`` into the selected text of the focused
element in the DOM using JavaScript/jQuery.
"""
return (
f"var focused = document.activeElement;\n"
f"var start = focused.selectionStart;\n"
f"var end = focused.selectionEnd;\n"
f"var val = focused.value;\n"
f"var new_val = val.slice(0, start) + `{clipboard}` + val.slice(end, val.length);\n"
f"focused.value = new_val;\n"
f"var cursorPos = start + `{clipboard}`.length;\n"
f"focused.setSelectionRange(cursorPos, cursorPos);"
) | 7bdf79308004698f1fd4d6a44af5d958ee78677c | 117,213 |
def param_invalid_value_info(param_name, default_value):
"""
Returns info warning an invalid parameter configuration.
"""
return "Parameter warning: the configuration of hyper-parameter "+ \
"'{}' is not valid, will use default value '{}'" \
.format(param_name, default_value) | 87008226a851f512f19224440a2c501ca85957d5 | 117,217 |
def costs(hours, cost_per_hour, machines=1):
"""cost = hours * $/hour * machines"""
return hours * cost_per_hour * machines | 01ca9f2861790fd60bb8d1a13f0af2838154995c | 117,220 |
def hard_get(data: dict, set_name: str):
"""
Get settings value from a dict,
Use when the setting required.
:param data: dict with data
:param set_name: setting name
:return: setting value
:raise: ValueError if value does not exist
"""
try:
value = data[set_name]
return value
except KeyError:
raise ValueError(f"Provide value for {set_name.upper()}") | 3d578ee6199f1cafd1e6381f8593f793a714336a | 117,221 |
from pathlib import Path
def dir_contains(file, directory):
"""Check if 'file' is or is contained by 'directory'."""
file = Path(file)
directory = Path(directory)
while file.parent != file:
if file == directory:
return True
file = file.parent
return False | edfbc22980bffd2ee7ef9d8978a09729c61daa8a | 117,222 |
def _list_complement(A, B):
"""Returns the relative complement of A in B (also denoted as A\\B)"""
return list(set(B) - set(A)) | 6910f370ab62d1c044e22c5a1b6f34016a81232a | 117,226 |
def get_frame_name(frame):
"""Gets the frame name for a frame number.
Args:
frame (int): Frame number.
Returns:
str: 0-padded frame name (with length 6).
"""
return str(frame).rjust(6, "0") | e4ff9ed335de4ea492b16cdbec694635a4fd01af | 117,229 |
def find_best_odds(books):
"""
Takes two-dimensional array and looks through each collumn to find the highest odd for each outcome:
[
[5, 7.7, 1.2],
[4, 6.25, 1.6]
]
would produce
([5, 7.7, 1.6],
[0, 0, 1])
"""
if len(books) < 2:
raise ValueError("Must contain at least two bookers odds.")
best = [0]*len(books[0])
book_id = [0]*len(books[0])
for id in range(len(books)):
bookie = books[id]
for i in range(len(bookie)):
if bookie[i] > best[i]:
best[i] = bookie[i]
book_id[i] = id
return (best, book_id) | eb6d284d225d73af47892dc66fdeae7d21a21df4 | 117,230 |
def dble_pwr_law(time_bins, tau, alpha, beta, factor=1):
"""
Double Power Law star formation history
Parameters
----------
time_bins: list or numpy.ndarray
Time bins
tau: float or int
alpha: float or int
beta: float or int
factor: float or int, optional
Default = 1
Returns
-------
Array containing the star formation history corresponding to the given time_bins
"""
return factor/((time_bins/tau)**alpha + (time_bins/tau)**(-beta)) | 6c0fb9cf03d74a505ac7ca261a234c618f411aad | 117,233 |
def counting_stats(response_stat_collection: dict) -> int:
"""
Count a correct total of features in all collections
:param response_stat_collection: the collection field'd in response's statistics
:returns: count of all features
"""
count = 0
for stat_collection in response_stat_collection.values():
count += stat_collection
return count | 95a6e2e42949a0d5d4f16516d164150781cd28c7 | 117,245 |
def _get_reward(old_error, new_error):
"""Return RL agent reward.
Reward for RL agent is difference between new and previous error for output.
Plus small amount for error (prioritize higher error)
"""
return (old_error - new_error) + 0.2 * new_error | a6413cc700af343a11e62366395db9cc2bbda70b | 117,248 |
def strip(x):
"""Strip a string.
Parameters
----------
x : any
A str object which is to be stripped. Anything else is returned as is.
"""
if isinstance(x, str):
return x.strip()
return x | f662c6f1a6e95a21d43ddac77070215c2cf34240 | 117,251 |
def NSS_loss_2(x, y):
"""
Computes the Normalized Scanpath Saliency loss between x (output of a model)
and y (label).
x and y are assumed to be torch tensors, either individual images or batches.
"""
# If dimensionality of x is 2, insert a singleton batch dimension
if len(x.shape) == 2:
x = x.unsqueeze(0)
y = y.unsqueeze(0)
# Loop over each image in the batch, apply NSS, return the average
loss = 0
for i in range(x.shape[0]):
x_i, y_i = x[i, :, :], y[i, :, :]
# normalize saliency map
sal_map = (x_i - x_i.mean()) / x_i.std()
# mean value at fixation locations
sal_map = sal_map.masked_select(y_i > 0)
loss += sal_map.mean()
# Return the -ve avg NSS score
return -1 * loss / x.shape[0] | 068167beaf5566b20fd7b9d762043a53a7d028cf | 117,257 |
def channel_filter(channel, channel_spec):
"""
Utility function to return True iff the given channel matches channel_spec.
"""
if isinstance(channel_spec, int):
return channel == channel_spec
if isinstance(channel_spec, tuple):
return channel in channel_spec
raise ValueError("Incorrect type for channel_spec" + str(type(channel_spec))) | d49aa3d61c0f57997edf5145005e7f5130e9ed19 | 117,259 |
import hashlib
def hash_string(string):
"""Hash string."""
return hashlib.md5(string.encode('utf-8')).hexdigest() | 7fd8f52fb7cf52c9e2e9f098478af94479b82110 | 117,263 |
def sum_economic_loss(effect_list):
"""Sums the economic loss values in an effects list"""
return sum(el.economic_loss for el in effect_list) | 95a4927d92f994b828da607a163ebcf705e2974c | 117,266 |
from typing import Callable
from typing import Counter
def mangle() -> Callable[[str], str | None]:
"""Appends number to already seen strings, making them distinct
>>> mangled = mangle()
>>> mangled('a')
'a'
>>> mangled('b')
'b'
>>> mangled('a')
'a:1'
"""
store = Counter[str]()
def call(name: str) -> str | None:
if name is None:
return None
seen = store[name]
store[name] += 1
if not seen:
return name
return f'{name}:{seen}'
return call | c337d47e2c470be5f1c57fe1f88fc5f0a5020433 | 117,270 |
def link_to_changes_in_release(release, releases):
"""
Markdown text for a hyperlink showing all edits in a release, or empty string
:param release: A release version, as a string
:param releases: A container of releases, in descending order - newest to oldest
:return: Markdown text for a hyperlink showing the differences between the give release and the prior one,
or empty string, if the previous release is not known
"""
if release == releases[-1]:
# This is the earliest release we know about
return ''
index = releases.index(release)
previous_release = releases[index + 1]
return '\n[Changes in %s](https://github.com/catchorg/Catch2/compare/v%s...v%s)' % (release, previous_release, release) | bc39b566d6b8f9393ecd64bf4c5b3ee35fce80f4 | 117,274 |
def compose_sequences(sequence, sequences):
"""Compose a sequence with other sequences.
Example
sequence = compose_sequences(Dataset(), [
(Batcher, 32),
(LambdaTransform, lambda x: x.expand_dims(-1))
])
"""
for s in sequences:
sequence = s[0](sequence, *s[1:])
return sequence | 06c7d6e6b1085c332f5e325948b434c9c17296bb | 117,275 |
def get_poem(soup):
"""Parses a page to get the title and the content of a poem.
:param soup: BeautifulSoup object with the parsed HTML
:return: 2-tuple containing the title and the content of the poem.
"""
title = soup.title.text
poem = soup.find(attrs={"class": "blog-post"}).text
return title, poem | 8d67b0af9332e6f929b18d61c4b4435ffbd56cb5 | 117,276 |
from datetime import datetime
import json
def serialize_to_json(data):
""" Serialize an object to JSON, ensuring that the datetimes are formatted
according to RFC 3339.
Args:
obj: the object to serialize to JSON
"""
# Define the actual datetime serializer
# See: https://stackoverflow.com/questions/8556398/generate-rfc-3339-timestamp-in-python#8556555
def datetime_serializer(element):
if isinstance(element, datetime):
return element.isoformat("T") + "Z"
return json.dumps(data, default=datetime_serializer) | b8dfb699bb3658870c7665dd6192be3b142765df | 117,283 |
def is_valid_name(name):
"""
param: name (str) - a string with the task's name.
This function checks whether the name is valid for the task.
If it is, between 3 and 25 characters (inclusive of both)
the "info" field can be empty and has no specific requirements,
so we do not need to validate it.
returns: a Boolean True if the text is of the valid length; False, otherwise
"""
# ⚠️ Pay close attention to the number of parameters and their types:
# each validation function first checks that the input parameters are of the correct type.
# Make sure that you use the type() function!
if name.isalpha() and len(name) >= 3 and len(name) <= 25:
return True
else:
return False | 5775fb87e2730c8e32a22a583765b645b6cd450d | 117,288 |
import math
def mag(obj, base=10):
"""Return the magnitude of the object (or its length), or -1 if size is 0."""
try:
size = len(obj)
except TypeError:
size = obj
try:
m = int(math.log10(size) if base == 10 else math.log(size, base))
except:
m = -1
return m | 4a70614aad7c82ca791771feafa67bbe82108bf7 | 117,290 |
def csv_line(value_parser):
"""
Return a function that parses a line of comma separated values.
Examples:
>>> csv_line(int)('1, 2, 3')
[1, 2, 3]
>>> csv_line(float)('0.5, 1.5')
[0.5, 1.5]
For example, it can be passed to type argument of
`argparse.ArgumentParser.add_argument` as follows::
parser.add_argument(
...,
type=csv_line(float),
help='Comma separated value of floats')
"""
def convert(string):
return list(map(value_parser, string.split(',')))
return convert | fbd957fc92d6d1fe6394c1f6ffb98a60c40a4402 | 117,295 |
import sqlite3
def run_query(db, query, commit=False):
"""
Function
----------------------------------
Func to run a query in sqlite3 db
Takes you from A-Z, connection-close
Parameters
---------------------------------
db: (str) name of database to connect to
query: (str) query to run on database
commit: (boolean, default=False)
Set to True to commit changes to db
Returns
----------------------------------
Result of query
"""
conn = sqlite3.connect(db)
curs = conn.cursor()
result = curs.execute(query).fetchall()
if commit:
conn.commit()
curs.close()
conn.close()
return result | 3e5d9e3ab0e062a036dbdd16cabb0c2cf45f62f4 | 117,296 |
from pathlib import Path
def get_sql(script_name: str) -> str:
"""Get the contents of a SQL script."""
return (Path() / "db" / f"{script_name}.sql").read_text() | a4d914ec3a035ac440ff973ddbb0b815bb3c7651 | 117,298 |
from pathlib import Path
def _cope_names(input_dir, selected_cope=None):
"""Get COPE names from the first level analysis
Parameters
----------
input_dir:
BIDS derivative - FSL feat first level outputs
all feat directories should be derived from the same design
selected_cope:
A list of cope of interest, names matching the first level design
Return
------
selected_contrasts: List of tuple
cope index and cope name
"""
first_level = Path(input_dir)
feat_dir = first_level.glob("sub-*/sub-*[0-9].feat/")
con = list(feat_dir)[0] / "design.con"
print(con)
with open(con) as f:
contrast_names = [
line.split()[-1] for line in f.readlines() if "ContrastName" in line
]
selected_contrasts = []
for i, cn in enumerate(contrast_names):
cope_idx = i + 1
if type(selected_cope) is list:
# check if names matches
for sc in selected_cope:
if sc in contrast_names:
if sc == cn:
selected_contrasts.append((cope_idx, cn))
else:
pass
else:
print(f"selected contrast {sc} doesn't exisit. Typo?")
else:
selected_contrasts.append((cope_idx, cn))
return selected_contrasts | cf99fb6f984e0d2ee9db8a234aef44d372057e78 | 117,301 |
def _get_non_mgmt_ethernets(instance):
"""
Returns ethernets of instance which is not used for management IP.
Args:
instance: BaseObject instance
"""
return instance.ethernet_set.filter(
mac__isnull=False
).exclude(
ipaddress__is_management=True
).order_by('mac') | 7bb605fd8744baa42cf263228a1e8c9e83c82998 | 117,307 |
def create_seq_db(data):
"""Convert interactions of a user to a sequence.
Args:
data (pandas.DataFrame): The dataset to be transformed.
Returns:
result (pandas.DataFrame): Transformed dataset with "col_user" and "col_sequence".
"""
# group by user id and concat item id
groups = data.groupby("col_user")
# convert item ids to int, then aggregate them to lists
aggregated = groups.col_item.agg(col_sequence=lambda x: list(map(int, x)))
result = aggregated
result.reset_index(inplace=True)
return result | f17e16c0a15c6cf61a8cd843779aa0fb83864fd2 | 117,311 |
def Reshape(self, name, in_t, shape, attrs=dict()):
"""
Create and add Reshape op to Graph
"""
out_t = self.layer(op="Reshape", name=name,
inputs=[in_t, shape], outputs=[name],
attrs=attrs)[0]
out_t.name = "{}_out_0".format(name)
return out_t | 15af0d1d4228e8bdb1cd9136e0de5eee33646167 | 117,318 |
def GetMaxRow(panes, direction, layer):
"""
GetMaxRow() is an internal function which returns
the highest layer inside the specified dock.
"""
max_row = 0
for pane in panes:
if pane.dock_direction == direction and pane.dock_layer == layer and \
pane.dock_row > max_row:
max_row = pane.dock_row
return max_row | 16813f8a2192223186c968fca578a1bc69cd7496 | 117,326 |
def get_sequence_header_length(seq_header_len):
"""Returns length of SEQUENCE header."""
if seq_header_len > 255:
return 4
if seq_header_len > 127:
return 3
return 2 | c22b082789b6cc08c3af48f0af38f0b8bad8d9d6 | 117,333 |
def get_free_item_obj(free_items, sku):
"""Fetch free item obj
:param free_items: List
:param sku: String
:rtype: Dictionary
"""
for item in free_items:
if item["item"] == sku:
return item | a1f2eea280067455bfbf4da0b07f4e857777098b | 117,336 |
def max_sub_array_sum(array, number):
"""
Time: O(n)
Space: O(1)
Finds a the maximum sum of a 'number' consecutive elements in an array
:param array: float or int
:param number: float or int
:return: float or int, highest sum
"""
if not array:
return None
maximum = 0
temp = 0
for index, integer in enumerate(array):
temp += integer
if index-number >= 0:
temp -= array[index-number]
if temp > maximum:
maximum = temp
return maximum | 67c6a11992935b9bcf4249542400192a6a791997 | 117,337 |
import hashlib
def generate_faker_seed(value):
"""Generate a seed value for faker."""
if not isinstance(value, str):
value = str(value)
h = hashlib.new("md5")
h.update(value.encode("utf-8"))
return int(h.hexdigest()[:16], 16) | 3ccd97d664eb73831279acdf80b9b3a0451ef590 | 117,338 |
def read_genome(filename: str) -> str:
"""
Reads a genome from a .fa file
filename: relative or absolute path of the .fa file to be read from
Returns:
Genome string
"""
with open(filename) as f:
genome = "".join(
[line.rstrip() for line in f.readlines() if not line.startswith(">")]
)
return genome | 26d41b08ebb7cb38f0baf0ff129096ae0d4cdc44 | 117,340 |
import pickle
def load_object(path: str):
"""
Unpickle a saved object.
:param path: path to a pickled object
:return: the object
"""
with open(path, "rb") as handle:
tokenizer = pickle.load(handle)
return tokenizer | 088c958c26c7cdc8c7d637bc55bf8c7a1e831d36 | 117,342 |
def subsample_fourier(x, k):
"""Subsampling in the Fourier domain
Subsampling in the temporal domain amounts to periodization in the Fourier
domain, so the input is periodized according to the subsampling factor.
Parameters
----------
x : tensor
Input tensor with at least 3 dimensions, where the next to last
corresponds to the frequency index in the standard PyTorch FFT
ordering. The length of this dimension should be a power of 2 to
avoid errors. The last dimension should represent the real and
imaginary parts of the Fourier transform.
k : int
The subsampling factor.
Returns
-------
res : tensor
The input tensor periodized along the next to last axis to yield a
tensor of size x.shape[-2] // k along that dimension.
"""
N = x.shape[-1]
res = x.reshape(x.shape[:-1] + (k, N // k)).mean(axis=(-2,))
return res | 54c4af979f3fe2f8e439fa425b94dac7763ba76f | 117,344 |
def create_url_with_offset(*, url: str, offset: int, size: int) -> str:
"""Methods to create a URL with offset.
When ray spills objects, it fuses multiple objects
into one file to optimize the performance. That says, each object
needs to keep tracking of its own special url to store metadata.
This method creates an url_with_offset, which is used internally
by Ray.
Created url_with_offset can be passed to the self._get_base_url method
to parse the filename used to store files.
Example) file://path/to/file?offset=""&size=""
Args:
url(str): url to the object stored in the external storage.
offset(int): Offset from the beginning of the file to
the first bytes of this object.
size(int): Size of the object that is stored in the url.
It is used to calculate the last offset.
Returns:
url_with_offset stored internally to find
objects from external storage.
"""
return f"{url}?offset={offset}&size={size}" | 232c26386924d8a84a5c61139ae71ee3fac3b3e4 | 117,348 |
def string_contains_space(string):
"""
Returns true if string contains space, false otherwise.
"""
for char in string:
if char.isspace():
return True
return False | 2069afc7679c3b52606ba021f26aba9615ccdd9b | 117,349 |
def extr_hotdays_calc(data, thr_p95):
"""
Calculate number of extreme hotdays.
Return days with mean temperature above the 95th percentile
of climatology.
Parameters
----------
data: array
1D-array of temperature input timeseries
thr_p95: float
95th percentile daily mean value from climatology
"""
xtr_hotdays = ((data > thr_p95)).sum()
return xtr_hotdays | 3a082ffc5ef62089f8de25747272f3c32ad2409a | 117,352 |
from typing import OrderedDict
def manipulate_df(df, user_id, questionnaire_id, collected_at, created_at, updated_at, keep_answer_timestamps=False):
"""
Reads the elements of the answers and creates keys and values from the answers in a format fit for appending to the
final dataframe
:param df:
:param user_id:
:param questionnaire_id:
:return: A dictionary that becomes the row of the dataframe
"""
a = OrderedDict()
a['user_id'] = user_id
a['questionnaire_id'] = questionnaire_id
a['created_at'] = created_at
if keep_answer_timestamps:
a['collected_at'] = collected_at
a['updated_at'] = updated_at
for row in df.itertuples():
time_label = row.label + '_collected_at'
a[time_label] = row.collected_at
a[row.label] = row.value
else:
for row in df.itertuples():
#time_label = row.label + '_collected_at'
#a[time_label] = row.collected_at
a[row.label] = row.value
return a | e9dd1dde58ff556d0029908b587ffe1bac14b442 | 117,354 |
def get_intersection(**kws):
"""Get the intersection of all input keyword parameters, ignore
empty list or tuple.
Returns
-------
res : list
Examples
--------
>>> a, b, c = [], [], []
>>> print(get_intersection(a=a,b=b,c=c))
[]
>>> a, b, c = [1], [2], []
>>> print(get_intersection(a=a,b=b,c=c))
[]
>>> a, b, c = [1,2], [2], []
>>> print(get_intersection(a=a,b=b,c=c))
[2]
>>> a, b, c = [1,2], [2], [2,3]
>>> print(get_intersection(a=a,b=b,c=c))
[2]
>>> a, b, c = [1,2], [3,4], [2,3]
>>> print(get_intersection(a=a,b=b,c=c))
[]
"""
s = set()
for k in kws:
v = kws.get(k, [])
if s == set() or v == []:
s = s.union(v)
else:
s = s.intersection(v)
return list(s) | 6309321f68a637e59a30571d79c5d15f16dfaa1d | 117,360 |
def is_within_region(readpos, contiglength, readlength, regionlength):
"""Checks if a read is within the given region."""
return readpos - readlength <= regionlength \
or readpos >= contiglength - regionlength | 934ce7f066de80beb5d0f65bbe6eb6707fdfb39c | 117,361 |
from typing import Set
def add_cell(cell: str, visited: Set[str]) -> bool:
"""
Add cell to a set of visited set if it is not equal to '*' or ' '.
If the same cell is already in the visited set, return False.
Otherwise, return True.
>>> add_cell('5', {'1', '2', '3'})
True
>>> add_cell('5', {'1', '2', '5'})
False
>>> add_cell('*', {'1', '2', '3'})
True
>>> add_cell(' ', {'1', '2', '3'})
True
"""
if cell in ('*', ' '):
return True
if cell in visited:
return False
visited.add(cell)
return True | 98b4a7beac2e9774c5878d019568e387b899e380 | 117,362 |
def sext_3(value):
"""Sign-extended 3 bit number.
"""
if value & 0x4:
return 0xfffffff8 | value
return value | c8e19fc90de12025bc9a52ffb4f135e136604538 | 117,363 |
def display_number(number):
"""Format number using , as thousands separator."""
return "{:,}".format(number) | 3cc405f7e3c0c21473550fee44c628f380fe361c | 117,367 |
def get_elapsed(start, end):
"""
used to calculate the time between two time stamps
:param start: start time
:param end: end time
:return: a string in minutes or seconds for the elapsed time
"""
elapsed = end - start
if elapsed < 60:
return '{0:.2g}'.format(end - start) + " seconds"
else:
return '{0:.2g}'.format((end - start) / 60.0) + " minutes" | 05ce8d15b9f5730937d91026ff0c7f26ae59d74b | 117,378 |
def path_to_str(x):
"""Converts a path to a string."""
return str(x) | a2252e4c061e0c970b29f2d41bcfd46af33814a9 | 117,379 |
import csv
def load_from_csv(path):
"""
Loads all the segments from a csvfile.
:param path: string, path to csvfile
:return: list, a list of all the segments
"""
segs = []
with open(path) as csvfile:
reader = csv.reader(csvfile, delimiter="|")
for row in reader:
segs.append(row)
return segs | f239dbb1017994cf10e91a27cdba6d5181cafa2f | 117,383 |
def _get_update_parent_dict(parent_record: dict, nr_of_children: int, spouses: list):
"""
Increment the married counter, add the nr of children and replace the existing
spouses with the spouses list passed in.
:param parent_record: the parent record for the parent currently under consideratio n
:param nr_of_children: a scalar number of children in this current family iteration
:param spouses: currently just a pass through to build the dictionary
:return: a dictionary containing the updated fields *only*, i.e. 'married_count', 'children_count'
and 'spouses'
"""
married_count = 1
if 'married_count' in parent_record:
married_count += parent_record['married_count']
children_count = nr_of_children
if 'children_count' in parent_record:
children_count += parent_record['children_count']
update_dict = {'married_count': married_count,
'children_count': children_count,
'spouses': spouses}
return update_dict | 241bdc4c3ec40ee0a03338da310b75c9cf7a3729 | 117,386 |
def avgCl(lst):
"""return the average RGB of a RGB list"""
c1=c2=c3=0
n = len(lst)
for c in lst:
c1+=c[0]
c2+=c[1]
c3+=c[2]
c1,c2,c3=c1/n,c2/n,c3/n
return [c1,c2,c3] | 289728e3870adf7affe0d76001b46333428edd2f | 117,387 |
def dict_to_string(variable_dict):
"""Returns a separated string of the key and value pairs of a dict"""
variables_used = ""
for key in variable_dict:
variables_used += '§' + str(key) + '§' + str(variable_dict[key])
return variables_used[1:] | 8d54b4c602f647c4ce6f99d8327f822cf2ef4815 | 117,388 |
import random
def simulate_ip() -> str:
""" Simulates a random IP address
Returns:
A random IP address (str)
"""
return "{}.{}.{}.{}".format(
random.randint(1, 1000),
random.randint(1, 1000),
random.randint(1, 1000),
random.randint(1, 1000)
) | faf424a9025244220f2b4c0b9bffad95243a152b | 117,390 |
def pytest_collection_modifyitems(items):
"""
Put the module init test first to implicitly check whether
any subsequent test fails because of module reinitialization.
"""
def module_init_tests_first(item):
return int('test_xmlsec.py::TestModule::test_reinitialize_module' not in item.nodeid)
items.sort(key=module_init_tests_first) | 59d95c47ddb2bdcbf2889b787c4841d1acd9a8f0 | 117,393 |
def dict_merge(d1, d2):
"""
Recursively merge two dicts to create a new dict.
Does not modify either dict. The second dict's values will take priority
over the first's.
"""
new = dict(d1)
for k, v in d2.items():
if (
k in new and
isinstance(new[k], dict) and
isinstance(d2[k], dict)
):
new[k] = dict_merge(new[k], d2[k])
else:
new[k] = d2[k]
return new | 24a74152301b8a4edf939c98a28352efa0597b15 | 117,394 |
def quoted(s):
""" Given a string, return a quoted string as per RFC 3501, section 9."""
if isinstance(s, str):
return '"' + s.replace('\\', '\\\\').replace('"', '\\"') + '"'
else:
return b'"' + s.replace(b'\\', b'\\\\').replace(b'"', b'\\"') + b'"' | 198230e3810adc7c93cc4962b6ffea3b88a8c8ba | 117,397 |
def to_celsius(kelvin:float, round_digit=2) -> float:
"""Convert Kelvin to Celsius
Args:
kelvin (float):
round_digit (int, optional): Defaults to 2.
Returns:
float: celcius
"""
return round(kelvin-273.15, round_digit) | 3254eb47aa897679f314ff88e6feea3df84b99f3 | 117,398 |
def _make_list_of_str(arg):
"""
Convert a str to list of str or ensure a list is a list of str
:param list[str] | str arg: string or a list of strings to listify
:return list: list of strings
:raise TypeError: if a fault argument was provided
"""
def _raise_faulty_arg():
raise TypeError(
f"Provided argument has to be a list[str] or a str, "
f"got '{arg.__class__.__name__}'"
)
if isinstance(arg, str):
return [arg]
elif isinstance(arg, list):
if not all(isinstance(i, str) for i in arg):
_raise_faulty_arg()
else:
return arg
else:
_raise_faulty_arg() | bb8f2eab2656e33916b06d9892299991dd4c99ed | 117,401 |
def _tryint(v):
"""
Tries to convert v to an integer. If it fails, return 0.
"""
try:
return int(v)
except:
return 0 | 644c5141603cdfd84e0b22d20a026bdbb15447c0 | 117,403 |
def top(df, n=5, column=None):
"""Returns the top 5 entries from a DataFrame,
sorted by column.
"""
return df.sort_values(by=column)[-n:] | d82cc4e488887fa5109a9ca130257e1e59b0fc90 | 117,404 |
def get_parent_map(joints):
"""Returns a dict of node names to their parent node's name"""
parent_dict = dict()
for joint_name in list(joints.keys()):
parent_dict[joint_name] = joints[joint_name]['parent']
return parent_dict | f8a8c1c7cfec4ff5e4812d9c9cde2f3a840c9d4e | 117,405 |
import functools
import warnings
def deprecate_function(msg, stacklevel=2):
"""Emit a warning prior to calling decorated function.
Args:
msg (str): Warning message to emit.
stacklevel (int): The warning stackevel to use, defaults to 2.
Returns:
Callable: The decorated, deprecated callable.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)
return func(*args, **kwargs)
return wrapper
return decorator | 1f1b878abb86b3b6bf258064f96486d694556297 | 117,406 |
import re
def is_extract_metadata(pipeline):
"""
Report if the pipeline will extract metadata. Returns **True** if metadata will be extracted.
* pipeline: a list where each item is a line from a CellProfiler `*.cppipe` file.
"""
is_extract_metadata_bool = False
for line in pipeline:
if re.match(r"^\s*Extract metadata\?:Yes$", line) is not None:
is_extract_metadata_bool = True
break
return is_extract_metadata_bool | 977281fd87d2707e0ea5d815ce91df84bbbb1400 | 117,407 |
def get_reads(datafile):
"""
Input:
1. datafile - name of a sam file
Output:
1. A list of reads starting positions and corresponding methylation call strings
"""
file = open(datafile)
data = [x for x in list(file) if x[0] != "@"]
file.close()
return data | f89c2e853ba294ccbb74907c817ab9cf74cb2b37 | 117,412 |
def filter_output_columns(output_columns, filter_columns):
"""
Filters a list of columns and only keeps those present in a list of allowed columns.
Returns original list of columns if no allowed columns specified.
:param output_columns: a column list to filter
:param filter_columns: a list of allowed columns
:return: a filtered list of columns
"""
if filter_columns:
return [column for column in output_columns if column in filter_columns]
return output_columns | e28c7b759aa13a7c2519eaa3433d0791064b4fd3 | 117,415 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.