content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_function(func_name, modules):
"""
Searches for the function func_name in the modules list. Name can or cannot be given fully qualified
:param func_name: string, the function name being searched
:param modules: the list of modules created by :func:`get_submodules_of`
:return: function object or None
"""
# Check if fully qualified name given
if func_name.find('.') != -1:
split_name = func_name.rsplit('.', 1)
func_name = split_name[-1]
module_name = split_name[-2]
the_module = next((e for e in modules if e['module_name'] == module_name), None)
try:
if the_module:
return getattr(the_module['module_object'], func_name)
else:
return None
except AttributeError:
return None
else:
for the_module in modules:
for func_dict in the_module['functions']:
if func_name == func_dict['function_name']:
return func_dict['function_object']
return None | 7f997e12cf9d492c12ce141c0308a4c7b63af496 | 92,099 |
from typing import Type
import importlib
from typing import cast
def _import_class(path: str, /) -> Type:
"""Import a class from a provided path."""
module_path, _, class_name = path.rpartition(".")
package, _, module_name = module_path.rpartition(".")
module = importlib.import_module(name=module_name, package=package)
return cast(Type, getattr(module, class_name)) | cec6e1f30bb983b2b4b82f06893dbbf84b517971 | 92,101 |
def count_duplicates(t):
"""
Count the number of duplicate sets in a given list (t). Return an integer value.
Sets greater than two are considered a single duplicate set.
"""
tmp = t[:]
tmp.sort()
count = 0
for i in range(len(t) - 1):
if tmp[i] == tmp[i + 1] and tmp[i] != tmp[i - 1]:
count += 1
return count | 2086e6a6ef6ded4f360e073e261c4f706400f690 | 92,102 |
import json
def data_as_dict(path):
"""Opens given json file and turns it into a dictionary object"""
with open(path) as f:
return json.loads(f.read()) | ab26597ac6ae37b0a587e4bdad2589102436cbc5 | 92,104 |
import re
def get_dver_from_string(s):
"""Get the EL major version from a string containing it.
Return None if not found."""
match = re.search(r'\bel(\d+)\b', s)
if match is not None:
return match.group(1)
else:
return None | 126dd3dac82f1552e6bfbfdeda3cf48318c5022e | 92,115 |
def _hex2rgb(c_hex):
"""Convert hex to rgb.
Parameters
----------
c_hex : str
Hex color.
Returns
-------
list
RGB color.
Examples
--------
rgbcolor = _hex2rgb("#FFFFFF")
"""
# Pass 16 to the integer function for change of base
return [int(c_hex[i:i + 2], 16) for i in range(1, 6, 2)] | d5ccc3f4833339a77f64247d3efff86042291693 | 92,119 |
def tokenize(seq, delim=' ', punctToRemove=None, addStartToken=True, addEndToken=True):
"""
Tokenize a sequence, converting a string seq into a list of (string) tokens by
splitting on the specified delimiter. Optionally add start and end tokens.
"""
if punctToRemove is not None:
for p in punctToRemove:
seq = str(seq).replace(p, '')
tokens = str(seq).split(delim)
if addStartToken:
tokens.insert(0, '<START>')
if addEndToken:
tokens.append('<END>')
return tokens | cf42e333e6ca7e2a0989bc488a7f780ac980c4e3 | 92,121 |
def create_list(text):
"""
Receives a big string and returns a list with words.
"""
return text.split() | f008e8ca39c0d476dbe46f369728fb231f8d393c | 92,124 |
def normalize_From(node):
""" Return a list of strings of Python 'from' statements, one import on each
line.
"""
statements = []
children = node.getChildren()
module = '.'*node.level + node.modname
for name, asname in children[1]:
line = 'from %s import %s' % (module, name)
if asname is not None:
line += ' as %s' % asname
line += '\n'
statements.append(line)
return statements | 133405ceb331582aa023f78ac71e9c6fed8760f8 | 92,125 |
import click
def core_options(f):
"""Add core operation options to commands via a decorator.
These are applied to the main (but not all) cli commands like
`parse`, `lint` and `fix`.
"""
f = click.option(
"--dialect", default=None, help="The dialect of SQL to lint (default=ansi)"
)(f)
f = click.option(
"--templater", default=None, help="The templater to use (default=jinja)"
)(f)
f = click.option(
"--rules",
default=None,
# short_help='Specify a particular rule, or comma separated rules, to check',
help=(
"Narrow the search to only specific rules. For example "
"specifying `--rules L001` will only search for rule `L001` (Unnecessary "
"trailing whitespace). Multiple rules can be specified with commas e.g. "
"`--rules L001,L002` will specify only looking for violations of rule "
"`L001` and rule `L002`."
),
)(f)
f = click.option(
"--exclude-rules",
default=None,
# short_help='Specify a particular rule, or comma separated rules to exclude',
help=(
"Exclude specific rules. For example "
"specifying `--exclude-rules L001` will remove rule `L001` (Unnecessary "
"trailing whitespace) from the set of considered rules. This could either "
"be the whitelist, or the general set if there is no specific whitelist. "
"Multiple rules can be specified with commas e.g. "
"`--exclude-rules L001,L002` will exclude violations of rule "
"`L001` and rule `L002`."
),
)(f)
f = click.option(
"--ignore",
default=None,
help=(
"Ignore particular families of errors so that they don't cause a failed "
"run. For example `--ignore parsing` would mean that any parsing errors "
"are ignored and don't influence the success or fail of a run. Multiple "
"options are possible if comma separated e.g. `--ignore parsing,templating`."
),
)(f)
f = click.option(
"--bench",
is_flag=True,
help="Set this flag to engage the benchmarking tool output.",
)(f)
f = click.option(
"--logger",
type=click.Choice(["parser", "linter", "rules"], case_sensitive=False),
help="Choose to limit the logging to one of the loggers.",
)(f)
return f | 01bf53c0d038e0a3ce47fe466961a65ea80e217e | 92,127 |
def rdr(interest, exp_inflation_rate, **kwargs):
"""
Real Discount Rate
This allows to factor out inflation out of economic analysis
"""
i = interest
f = exp_inflation_rate
ri = (i-f)/(1+f)
return ri | e1291f7dcee38855664fcdb0ef2b6ce83b9debe6 | 92,129 |
from typing import Any
from typing import Sequence
def indexof(needle: Any, haystack: Sequence[Any]) -> int:
"""Gets the index of some item in a sequence.
Find an index of ``needle`` in ``haystack`` by looking for exact same
item by pointer ids vs usual ``list.index()`` which finds
by object comparison.
Args:
needle: The object or item to find in the sequence.
haystack: The sequence of items to search for the ``needle``.
Returns:
The index of the needle in the haystack.
Raises:
ValueError: If the needle is not found inside the haystack.
For example::
>>> a = {}
>>> b = {}
>>> haystack = [1, a, 2, b]
>>> indexof(b, haystack)
3
>>> indexof(None, haystack)
Traceback (most recent call last):
...
ValueError: None is not in [1, {}, 2, {}]
"""
for i, item in enumerate(haystack):
if needle is item:
return i
raise ValueError("{!r} is not in {!r}".format(needle, haystack)) | f6bbdf7bf847adf7e6a397748189c9cee459896d | 92,134 |
from typing import List
from typing import Tuple
def get_matrix_diags(matrix: List[List[str]]) -> List[Tuple[int, int]]:
"""
Returns a list of all the diagonals for the given matrix appropriate to the
e4 route. The diagonals are returned in order and can be traversed to form
the message or decrypt an e4 matrix.
"""
row_count, col_count = len(matrix), len(matrix[0])
diags = []
# Every element in the last column is the start of a diagonal
# List is reversed to keep order of diagonals
for i in list(range(row_count))[::-1]:
diags.append((i, col_count-1))
# Every element in the first row is also the start of a diagonal
for j in list(range(col_count))[::-1]:
if (0, j) not in diags:
diags.append((0, j))
return diags | 498e2e4e5be2fc83f8abefeb1af44dfb0006588f | 92,138 |
import re
def is_tar_file(path):
"""Check if path appears to be a tar archive"""
return bool(re.match('^.*(\.tar|\.tgz|\.tar\.gz|\.tar\.bz2)$', path, re.I)) | ea38472e19acd036fbbc66c4467492ca51a628ba | 92,140 |
def find_largest(arr):
"""Find largest value in an array."""
largest = arr[0]
largest_i = 0
for i, val in enumerate(arr):
if val < largest:
largest = val
largest_i = i
return largest_i | 192f0a21cb585f5327b78e4ae6218135dcf7f209 | 92,145 |
def retrieve_solutions(store):
""" Retrieve a list of solutions from the data store
"""
return list(store['stats']['init_id'].astype(str).values) | b25756bdbabeedbaa877c04b26764aee7c7f7f7c | 92,147 |
import re
def parse_length(value, def_units='px'):
"""Parses value as SVG length and returns it in pixels, or a negative scale (-1 = 100%)."""
if not value:
return 0.0
parts = re.match(r'^\s*(-?\d+(?:\.\d+)?)\s*(in|[cm]m|p[tcx]|%)?', value)
if not parts:
raise Exception('Unknown length format: "{}"'.format(value))
num = float(parts.group(1))
units = parts.group(2) or def_units
if units == 'px':
return num
elif units == 'pt':
return num * 1.25
elif units == 'pc':
return num * 15.0
elif units == 'in':
return num * 90.0
elif units == 'mm':
return num * 3.543307
elif units == 'cm':
return num * 35.43307
elif units == '%':
return -num / 100.0
else:
raise Exception('Unknown length units: {}'.format(units)) | de11af335808d6f576f93337eb83c7c17fbfd5b1 | 92,149 |
def f11(xx):
"""
Example of a analytic expression replacing the external point number
:param xx: the distance between two bodies (or markers)
"""
return 20.0/(0.5*xx*xx+1.0) | 6a029d75a4fbe270a81d5c6c9436718da47ba6db | 92,152 |
import json
def dump_utf8(obj, indent=None):
""" json dump without ensure_ascii """
return json.dumps(obj, ensure_ascii=False, indent=indent, sort_keys=True) | aeb1c029d0a81651f1de6db4730175efc38a48d8 | 92,157 |
def get_id(frame):
"""Takes a photo_frame object and returns a list containing each image's
uniquely generated ID.
"""
num = len(frame.photos)
im_id = []
for i in range(0, num):
buf = len(str(i))
if buf <= 8:
im_id.append('0'*(8-buf)+str(i))
else:
raise ValueError('Over 99 999 999 images')
return im_id | 264b3908a4fdd62ef2337fb2ab31a0d28d7ebb22 | 92,158 |
def convert_is_active(x) -> bool:
"""Convert string 'active' to bool."""
return True if x == "active" else False | c010fd60d8749b3a5b905af818120d807f803f59 | 92,159 |
from typing import Iterable
def iscollection(value):
"""Return whether `value` is iterable but not string or bytes."""
return isinstance(value, Iterable) and not isinstance(value, (str, bytes)) | 90a350ea342140834b45adcac35c736033667f0f | 92,160 |
def flag_bad_overlaps(overlaps, min_num_matches):
"""
:param overlaps: Table of overlaps with ref_id, matched_ref_id, pixel and
matched_pixel columns
:param min_num_matches: int. Minimum number of peak matches for an overlap to count as well fit.
:return overlaps: Same is input but each bad overlap has had its entry in the 'good' column
set to False.
"""
bad_overlaps = [idx for idx in range(len(overlaps)) if overlaps[idx]['peaks'] < min_num_matches]
overlaps['good'][bad_overlaps] = False
return overlaps | f1bc00822c6b80b3f3ee552fddcfc264c5471bf9 | 92,162 |
def standarize_colors(color):
"""Convert color to PIL standard color."""
return color if not color == "transparent" else (0, 0, 0, 0) | 4b607b7ac9a2557851ae5c0b67c96d91c206adc9 | 92,163 |
def _calculate_rejection_probabilities(init_probs, target_probs):
"""Calculate the per-class rejection rates.
Args:
init_probs: The class probabilities of the data.
target_probs: The desired class proportion in minibatches.
Returns:
A list of the per-class rejection probabilities.
This method is based on solving the following analysis:
Let F be the probability of a rejection (on any example).
Let p_i is the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
```
F = sum_i(p_i * (1-a_i))
= 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1
```
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
```
t_i = sum_k=0^inf(F^k * p_i * a_i)
= p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1
= p_i * a_i / sum_j(p_j * a_j) using F from above
```
Note that the following constraints hold:
```
0 <= p_i <= 1, sum_i(p_i) = 1
0 <= a_i <= 1
0 <= t_i <= 1, sum_i(t_i) = 1
```
A solution for a_i in terms of the other variabes is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]```
"""
# Make list of t_i / p_i.
ratio_l = [0 if x[0] == 0 else x[1] / x[0] for x in
zip(init_probs, target_probs)]
# Calculate list of rejection probabilities.
max_ratio = max(ratio_l)
return [1 - ratio / max_ratio for ratio in ratio_l] | cbeffac3002436794d604d936cb1d3ce0cf56fff | 92,169 |
def loadX3C(name):
"""Load an X3C instance in the format:
n elements
s set1
s set2
...
s last set
Return a pair (elements, list of sets) where each set is a list and the set at index 0 is a dummy empty set"""
n = 0
sets = []
f = open(name, "r")
lines = f.readlines()
for l in lines:
s = l.split()
if len(s) < 1:
continue
if s[0] == "c":
continue
elif s[0] == "n":
n = int(s[1])
elif s[0] == "s":
(x, y, z) = (int(s[1]), int(s[2]), int(s[3]))
sets += [[x, y, z]]
f.close()
return (n, sets) | 8268f106298b9d8a04c55bf92ca622c5600d1321 | 92,173 |
def _makeConvNamed(cols):
"""Return a function to be used to convert a list of parameters
from positional style to named style (convert from a list of
tuples to a list of dictionaries."""
nrCols = len(cols)
def _converter(params):
for paramIndex, paramSet in enumerate(params):
d = {}
for i in range(nrCols):
d[cols[i]] = paramSet[i]
params[paramIndex] = d
return params
return _converter | 2da2d0829c30b924d286f977127a9caa2ebbbb1d | 92,177 |
import torch
def tensor_std(t, eps=1e-8):
"""Compute standard deviation, output 1 if std < eps for stability (disabled features)."""
s = t.std(0, keepdim=True)
s = torch.where(s < eps, torch.ones_like(s), s)
return s | b46e3b1a7a0545fa2461afd663d5d6436ce02bef | 92,182 |
def HasRootMotionBone(obj, rootBoneName):
"""
Returns True if the root bone is named @rootBoneName
@obj (bpy.types.Object). Object.type is assumed to be 'ARMATURE'
@rootBoneName (string). Name of the root motion bone to compare with
"""
bones = obj.data.bones
for bone in bones:
#print(bone.name, len(bone.children), bone.parent)
if (bone.parent is None) and (bone.name==rootBoneName):
return True
return False | d58a61fb7902d5f6e7303de099db7e665d5d2508 | 92,184 |
import requests
import io
def fetch(url_or_path):
"""Fetches a file from an HTTP or HTTPS url, or opens the local file."""
if str(url_or_path).startswith("http://") or str(url_or_path).startswith("https://"):
r = requests.get(url_or_path)
r.raise_for_status()
fd = io.BytesIO()
fd.write(r.content)
fd.seek(0)
return fd
return open(url_or_path, "rb") | 368f8ee43e61e7714c12fa5194283af731d02c68 | 92,193 |
def karpathy_transform(I):
"""Karpathy's Pong image transform converts 8bit images to binary images"""
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1
return I | 91e47464b642eddcfcefbd32f6f23ec198937ab5 | 92,194 |
def help_msgs(key):
"""The command line `--help` command messages"""
msgs={'starting-cards': 'Any number of predefined cards to have in your starting hand. '
'The default is no predefined cards (i.e. all 5 starting cards are randomly '
'generated on each simulation). See example 1 and 2 printed above for details.',
'draws': 'The number of draws to run. By default 3 which corresponds to a full hand. '
'If you want to simulate a preflop scenario pass the number 0. For one draw scenario pass 1',
'target': 'The hand you want to simulate the odds of. For example to target 8 Low simply pass 8 to this '
'option. The default target is J (Jack low).',
'simulations': 'The number of simulations to run. By default it is 50,0000 which offers '
'roughly 0.1% precision around the true odds.',
'procs': 'The number of parallel processes to use to run the simulations. By defaults this number is '
'equal to the number of cores on your machine minus 1. A number less than or equal to 1 '
'will default to 1 process'
'process',
'plot': 'displays a chart showing how the simulation\'s results are converging to their '
'final value'}
return msgs[key] | 9add12e04f54ea3984088a5239777365709cb72b | 92,195 |
def _pretty_size(n):
"""Convert a size in bytes to a human-readable string"""
if not n:
return None
size = int(n)
shortened = None
for suffix in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 1024:
shortened = '{} {}'.format(round(size), suffix)
break
size /= 1024
return shortened | 563069f290141e0b31a46e5b3f07e5b904c5e49b | 92,200 |
def removeNoneList(elements):
"""
Parameters
----------
elements: object or List[object]
Returns
-------
List[object] with no None values
"""
if not isinstance(elements, list):
elements = [elements]
return [elt for elt in elements if elt is not None] | 3dea1f4427de893f4e24455c7fc033043f858f09 | 92,201 |
from typing import List
from typing import Any
def read_nodes(path: str) -> List[Any]:
"""Read node names from lines.
:param path: path to file
:return: list of nodes
"""
with open(path) as f:
content = f.readlines()
return [
line.strip()
for line in content
] | 6560a7373954f338ad42a52a482866c1b69cee8b | 92,202 |
import networkx as nx
def create_networkx_undirected_graph(net, unique_source, unique_sink):
"""
Create a NetworkX undirected graph from a Petri net, returning also correspondences for the unique
source and the unique sink places that were discovered
Parameters
-------------
net
Petri net
unique_source
Unique source place
unique_sink
Unique sink place
Returns
-------------
graph
NetworkX graph
unique_source_corr
Correspondence in the NetworkX graph of the unique source place
unique_sink_corr
Correspondence in the NetworkX graph of the unique sink place
inv_dictionary
Correspondence between NetworkX nodes and Petri net entities
"""
graph = nx.Graph()
dictionary = {}
inv_dictionary = {}
for place in net.places:
value = len(dictionary)
dictionary[place] = value
inv_dictionary[value] = place
graph.add_node(dictionary[place])
for transition in net.transitions:
value = len(dictionary)
dictionary[transition] = value
inv_dictionary[value] = transition
graph.add_node(dictionary[transition])
for arc in net.arcs:
graph.add_edge(dictionary[arc.source], dictionary[arc.target])
unique_source_corr = dictionary[unique_source] if unique_source in dictionary else None
unique_sink_corr = dictionary[unique_sink] if unique_sink in dictionary else None
return graph, unique_source_corr, unique_sink_corr, inv_dictionary | b047bf19c2e49eb5ca4aa4f06568e311ae2a6a6b | 92,208 |
import base64
def get_base64_hash_digest_string(hash_object):
"""Takes hashlib object and returns base64-encoded digest as string."""
return base64.b64encode(hash_object.digest()).decode(encoding='utf-8') | f8d0b6474f45ecaa0a1d564376db68c8852b9303 | 92,210 |
def get_first_syl(w):
"""
Given a word w, return the first syllable and the remaining suffix.
"""
assert len(w) > 0
a = w[0]
n = 0
while n < len(w) and w[n] == a:
n = n + 1
return ((a, n), w[n:]) | a84c033582f89d1b4ec06983981696e17bdbefa6 | 92,213 |
def difference(d1, d2):
"""Return a dictionary with items from *d1* not contained in *d2*.
If a key is present both in *d1* and *d2* but has different values,
it is included into the difference.
"""
result = {}
for key in d1:
if key not in d2 or d1[key] != d2[key]:
result[key] = d1[key]
return result | d500e5b594db29c590bd8f21196bab66d4815ac0 | 92,216 |
def allinstance(collection, legal_type):
"""
Checks the type of all items in a collection match a specified type
Parameters
----------
collection: list, tuple, or set
legal_type: type
Returns
-------
bool
"""
if not isinstance(collection, (list, tuple, set)):
illegal = type(collection).__name__
raise(TypeError(f'allinstance expects either list, tuple, or set, not "{illegal}" in first parameter'))
if not isinstance(legal_type, type):
raise(TypeError(f'allinstance expects type, not "{legal_type}" in second parameter'))
return all(isinstance(item, legal_type) for item in collection) | 3018d96e8d7f1bc16175054141a5a230f2517fac | 92,218 |
def split_string(string):
"""Given a string, return the core string and the number suffix.
If there is no number suffix, the num_core will be None.
"""
# Get the starting index for the number suffix:
assert isinstance(string, str)
i = 1
for i in range(1, len(string) + 1):
if string[-i] in [str(k) for k in range(10)]:
continue
else:
break
idx = len(string) - i + 1
# Obtain string_core and num_core:
string_core = string[:idx]
if len(string[idx:]) > 0:
num_core = eval(string[idx:])
else:
num_core = None
return string_core, num_core | 9e28ea2f1c1d558a288cec626464ad0b542147a1 | 92,219 |
def convert_string_to_list(value, delimiter):
"""
Splits a string using the provided delimiter.
Parameters:
value (str): string to be split.
delimiter (str): delimiter used to split the string.
Returns:
list: a string converted to a list.
"""
try:
return value.split(delimiter)
except AttributeError:
return value | 4445670d7a30e086b6c3526cd4d04401845b3780 | 92,220 |
def clang_find_declarations(node):
"""Finds declarations one level below the Clang node."""
return [n for n in node.get_children() if n.kind.is_declaration()] | dc2408ed1f60bdbb1c281fd59510fb9144a5d257 | 92,226 |
def is_gf_line(line):
"""Returns True if line is a GF line"""
return line.startswith('#=GF') | 43feea08428485c25b5420dd61b2c7b19df6fafe | 92,228 |
def promotion_from_char(piece_char):
"""
get numeric piece type from char
"""
piecetype_chars = {'n': 1, 'b': 2, 'r': 3, 'q': 4}
return piecetype_chars[piece_char] | 6964d90a16e77a00ff443bb005e5eb00c1725f54 | 92,230 |
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result | f9ed294a9783b562b17885f0c696e25f288d05dd | 92,231 |
def get_article_markup(tree):
"""
Builds HTML markup from parsed tree to write to file, and strips any HTML comments.
:param tree: A BeautifulSoup tree object
:return: String
"""
xml = '<?xml version="1.0" encoding="UTF-8"?>\n'
markup = xml + str(tree)
return markup | 8780ab33071b44c57cad80bd444551c43f8d30d6 | 92,232 |
import math
def ceil(value):
"""Rounds a number up to the nearest whole number."""
return math.ceil(value) | a5c41aa22cfc32c48c2e78928678a5f76e79bc9b | 92,233 |
def read_input(filename):
"""
Read data from input file on form:
eg.
5 14 1 5 6 3 10
where first number is N, second is K and rest is data set.
args:
filename :: string
path to input file
returns:
out :: list of tuples.
"""
out = []
with open(filename, 'r') as f:
for l in f:
try:
d = l.split()
n = int(d[0])
K = int(d[1])
ts = [int(i) for i in d[2:]]
if len(ts) != n:
raise TypeError('N not equal to length of T')
out.append([n,K,ts])
except IndexError:
pass
return out | ca1be7702e0fe8910b3626e20c1dcd4ccdc481e2 | 92,237 |
import re
def parse_ignore_file(ignorefile, include_star=True):
"""Parses a .gitignore or .cfignore file for fnmatch patterns
"""
if ignorefile is None:
return []
try:
with open(ignorefile, 'r') as f:
_cfignore = f.read().split('\n')
cfignore = []
for l in _cfignore:
if l and not l.startswith('#'):
l = re.sub('\s*#.*', '', l).strip()
if include_star and l.endswith('/'):
l += '*'
cfignore.append(l)
except Exception as e:
print(e)
cfignore = []
cfignore.extend(['.git/', '.gitignore', '.cfignore', 'manifest.yml'])
return cfignore | 2b6330dff3d13bee6bb63ebabc4ae062b9ea889a | 92,239 |
from typing import Callable
import time
def timer(function: Callable) -> object:
"""计时函数 - 执行之后显示执行时间"""
def wrapper(*arg, **kwargs):
"""参数接收器"""
# 计时并执行函数
start = time.time()
result = function(*arg, **kwargs)
end = time.time()
# 显示时间
used = (end - start) * 1000
print("-> elapsed time: %.2f ms" % used)
return result
return wrapper | a27cba807b5ee2271675e4a4ed0f892d3c861a85 | 92,247 |
from string import ascii_lowercase
def alphabeticalToDecimal(alpha):
"""
Converts str to an int index. e.g.: 'a' -> 0, 'b' -> 1, 'c' -> 2, 440414 -> 'yama'
:param alpha: str
:return: int
"""
assert isinstance(alpha, str) and alpha
index = -1
steps = [(x, y) for x, y in enumerate(alpha[::-1])]
for step, letter in steps[::-1]:
letter_index = ascii_lowercase.index(letter)
index += (letter_index+1)*(26**step)
return index | 690bc4cb99bf6e6244ccce7846fb86c1c7cd45e5 | 92,248 |
import struct
def read_packet_body(packet, length, header_size):
"""
Gets packet body. On error, returns None.
:param packet: Full packet
:param length: Length of body in bytes
:return: Packet body as string
"""
fmt = '!%ds' % length
body = packet[header_size:header_size + length]
try:
return struct.unpack(fmt, body)[0]
except (struct.error, TypeError) as e:
return None | bfefa9253e48d35fb9cbc584d28c9ddf6911e928 | 92,250 |
def like_ons_geography(xycell):
"""
Given an xycell return True if the .value attribute of the cell _appears_ to be an ONS geography code, an
ONS geography code is a 9 digit code consisting of a single uppercase letter followed by 8 numbers.
"""
if isinstance(xycell.value, str) and xycell.value[0].isalpha() and xycell.value[0].isupper() and xycell.value[1:].isnumeric() and len(str(xycell.value)) == 9:
return True
else:
return False | d956eab213590187c4421598c90b6d93f476be65 | 92,251 |
def _parse_all_table_tags(soup):
"""Internal function to grab all <table> BeautifulSoup tags corresponding to data tables in the HTML"""
tables = []
# Parse out statistics tables based on class identifiers
for potential_table in soup.find_all('table'):
if potential_table.get('class') is not None and \
'stats_table' in potential_table.get('class') and \
'form_table' not in potential_table.get('class'):
tables.append(potential_table)
return tables | 0aa3afd6c52e80559d491f9413c9892b9b2fb420 | 92,258 |
from typing import Any
def generate_filename(document_date: str, args: Any) -> str:
"""Generate a filename depending on CLI arguments."""
if args.output_file:
return str(args.output_file)
return f"tdc-{document_date}" | 63777086c1e416b114442bca0dc18cf891fc4fae | 92,262 |
def number_datafile(run_number, prefix="PLP"):
"""
Given a run number figure out what the file name is.
Given a file name, return the filename with the .nx.hdf extension
Parameters
----------
run_number : int or str
prefix : str, optional
The instrument prefix. Only used if `run_number` is an int
Returns
-------
file_name : str
Examples
--------
>>> number_datafile(708)
'PLP0000708.nx.hdf'
>>> number_datafile(708, prefix='QKK')
'QKK0000708.nx.hdf'
>>> number_datafile('PLP0000708.nx.hdf')
'PLP0000708.nx.hdf'
"""
try:
num = abs(int(run_number))
# you got given a run number
return "{0}{1:07d}.nx.hdf".format(prefix, num)
except ValueError:
# you may have been given full filename
if run_number.endswith(".nx.hdf"):
return run_number
else:
return run_number + ".nx.hdf" | 44ff74582d5ea72a10d61bc6e4c6e0a4cec99b7a | 92,264 |
def payment_series(df, thresh=5):
"""Return true if user has series of payments of same amount.
Default threshold series length is set to 5 based on
data inspection: there is a substantial number of users
who celarly seem to pay monthly but for whom the precise
amount changes slightly after about 5 or 6 identical payments.
Relies on equal rather than subsequent payments because
subsequent payments are sensible to payments for mutliple
cars (two parallel payment streaks on alternating dates),
as well as to random/misclassified payments.
"""
def helper(g):
diff = g.amount.diff()
longest = diff.cumsum().value_counts().max()
g['series'] = longest >= thresh
return g
if df.empty:
df['series'] = None
return df
df = df.sort_values(['user_id', 'amount'])
return df.groupby('user_id').apply(helper) | bd8e44de2ab5d78a48b79a774e0b7312af642636 | 92,269 |
from typing import Dict
def ipywidgets_js_factory() -> Dict[str, Dict[str, str]]:
"""Create a default ipywidgets js dict."""
# see: https://ipywidgets.readthedocs.io/en/7.6.5/embedding.html
return {
# Load RequireJS, used by the IPywidgets for dependency management
"https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js": {
"integrity": "sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=",
"crossorigin": "anonymous",
},
# Load IPywidgets bundle for embedding.
"https://unpkg.com/@jupyter-widgets/html-manager@^0.20.0/dist/embed-amd.js": {
"data-jupyter-widgets-cdn": "https://cdn.jsdelivr.net/npm/",
"crossorigin": "anonymous",
},
} | d17db4ee078a8094f8205ac6dca097934a225770 | 92,277 |
def solve(result, answer):
""" Validate user answer """
try:
return result == int(answer)
except ValueError:
return False | bac5932d70dd349d4339e4cf27e23c990f8eeb40 | 92,278 |
def base_repr(number: int, base: int = 10, padding: int = 0) -> str:
"""
Return a string representation of a number in the given base system.
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
elif base < 2:
raise ValueError("Bases less than 2 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append("0" * padding)
if number < 0:
res.append("-")
return "".join(reversed(res or "0")) | a3865881e7b54dc82288010e3b130206ceddf22f | 92,285 |
from datetime import datetime
def prior_month(dte):
"""
Compute the datetime of the first day of the prior month.
Argument:
dte - a date
"""
year = dte.year
month = dte.month
if month == 1:
year -= 1
month = 12
else:
month -= 1
return datetime(year, month, 1) | e222765d7bc1581d8510e1268dcc43a3bfb1d9e8 | 92,287 |
import torch
def complex_modulus(input_array):
"""Computes complex modulus.
Parameters
----------
input_array : tensor
Input tensor whose complex modulus is to be calculated.
Returns
-------
modulus : tensor
Tensor the same size as input_array. modulus[..., 0] holds the
result of the complex modulus, modulus[..., 1] = 0.
"""
modulus = torch.zeros_like(input_array)
modulus[..., 0] = torch.sqrt((input_array ** 2).sum(-1))
return modulus | b8ecc0165c4de4152b28975edf12af1e69ef0955 | 92,289 |
def inputs_recursive_indent(text, depth):
"""
Add an indentation depending on the depth in a <tr>
"""
return '<td style="padding-left:' + str((depth - 1) * 10) + 'px">' + text + '</td>' | b212d8ff9edd87e91599e57b923ad5b084306bc3 | 92,290 |
def get_common(a, b):
"""Retrieve elements in common between sets a and b"""
return a & b | 4ae525909d09c6c42066b9de038faa367a8b6708 | 92,297 |
from typing import Dict
from typing import Optional
from typing import Any
def get_injection_requests(
type_hints: Dict[str, type], cname: str, component: Optional[Any] = None
) -> Dict[str, type]:
"""
Given a dict of type hints, filter it to the requested injection types.
:param type_hints: The type hints to inspect.
:param cname: The component name.
:param component: The component if it has been instantiated.
"""
requests = {}
for n, inject_type in type_hints.items():
# If the variable is private ignore it
if n.startswith("_"):
continue
# If the variable has been set, skip it
if hasattr(component, n):
continue
# Check for generic types from the typing module
origin = getattr(inject_type, "__origin__", None)
if origin is not None:
inject_type = origin
# If the type is not actually a type, give a meaningful error
if not isinstance(inject_type, type):
raise TypeError(
f"Component {cname} has a non-type annotation {n}: {inject_type}\n"
"Lone non-injection variable annotations are disallowed, did you want to assign a static variable?"
)
requests[n] = inject_type
return requests | a3db3f40852ac14b0f90b8287b55d19c81d315a2 | 92,300 |
import dill
def pairwise_worker(payload):
"""
Worker function used to compute pairwise computations over chunks of
an upper triangular matrix in parallel.
"""
similarity, I, J, offset_i, offset_j = payload
similarity = dill.loads(similarity)
pairs = []
diagonal_chunk = offset_i == offset_j
if diagonal_chunk:
J = I
for i in range(len(I)):
A = I[i]
for j in range(0 if not diagonal_chunk else i + 1, len(J)):
B = J[j]
if similarity(A, B):
pairs.append((offset_i + i, offset_j + j))
return pairs | 4e1059a68c7cf46ec91d0e875a6b2c611f19b8cb | 92,301 |
def select_by_attributes(gdf, field, value, operator='IN'):
"""
Filter a ``geopandas.GeoDataFrame`` data.
Parameters
----------
gdf : geopandas.GeoDataFrame
The vector data.
field : str
Column to be filtered.
value : int or float
Value to compare when filtering.
operator : str, default=IN
Arithmetic operator to be used when filtering.
Options include: IN, NOT IN, EQUALS, NOT EQUALS, LT, GT, LTE,
and GTE.
Returns
-------
filtered_data : geopandas.GeoDataFrame
The filtered data.
"""
if operator == 'IN':
return gdf[gdf[field].isin(value)]
elif operator == 'NOT IN':
return gdf[~gdf[field].isin(value)]
elif operator == 'EQUALS':
return gdf[gdf[field] == value]
elif operator == 'NOT EQUALS':
return gdf[gdf[field] != value]
elif operator == 'LT':
return gdf[gdf[field] < value]
elif operator == 'GT':
return gdf[gdf[field] > value]
elif operator == 'LTE':
return gdf[gdf[field] <= value]
elif operator == 'GTE':
return gdf[gdf[field] >= value] | e449572f2409f0ad06303217431649f2d749cdfa | 92,304 |
def filter_sentence_length(sentence_pairs, max_length):
"""Filters sentence pairs by max length.
Args:
sentence_pairs (list): list of pairs of sentences.
max_length (int): max words num in each sentence.
Returns:
filter_result (list): filtered sentence pairs.
"""
filter_result = [[src, tgt] for src, tgt in sentence_pairs
if len(src.split(" ")) < max_length and
len(tgt.split(" ")) < max_length]
return filter_result | 35f24d013dece1e9a54a9a04f97f9ca328343ea6 | 92,305 |
def _default_extract_pre(hook, args):
"""Default extract_fn when `timing='pre`
Args:
hook (VariableMonitorLinkHook):
args (_ForwardPreprocessCallbackArgs):
Returns (chainer.Variable): First input variable to the link.
"""
return args.args[0] | 32d7df533bbcc597cb4e0a4a00e08c66bb981356 | 92,309 |
def files_paths(paths):
"""Return a list of only files from a list of paths"""
files = [x for x in paths if x.is_file()]
return files | b16fa466db44ed4ec72f123a40957fa5f84e7606 | 92,311 |
def create_fold_time(df, frac, date_column):
"""create splits based on time
Args:
df (pd.DataFrame): the dataset dataframe
frac (list): list of train/valid/test fractions
date_column (str): the name of the column that contains the time info
Returns:
dict: a dictionary of splitted dataframes, where keys are train/valid/test and values correspond to each dataframe
"""
df = df.sort_values(by = date_column).reset_index(drop = True)
train_frac, val_frac, test_frac = frac[0], frac[1], frac[2]
split_date = df[:int(len(df) * (train_frac + val_frac))].iloc[-1][date_column]
test = df[df[date_column] >= split_date].reset_index(drop = True)
train_val = df[df[date_column] < split_date]
split_date_valid = train_val[:int(len(train_val) * train_frac/(train_frac + val_frac))].iloc[-1][date_column]
train = train_val[train_val[date_column] <= split_date_valid].reset_index(drop = True)
valid = train_val[train_val[date_column] > split_date_valid].reset_index(drop = True)
return {'train': train, 'valid': valid, 'test': test, 'split_time': {'train_time_frame': (df.iloc[0][date_column], split_date_valid),
'valid_time_frame': (split_date_valid, split_date),
'test_time_frame': (split_date, df.iloc[-1][date_column])}} | 7694e9a7d17cacf8e949d96950ab01f4fc9ce439 | 92,313 |
import io
def data_to_binary(obj, serializer, **kwargs):
"""Convert object into binary data with specified serializer.
Args:
obj (any): Object to serialize.
serializer (Callable): Serializer callback that can handle input object type.
kwargs: Options set to the serializer.
Returns:
bytes: Binary data.
"""
with io.BytesIO() as container:
serializer(container, obj, **kwargs)
binary_data = container.getvalue()
return binary_data | fed392563091d2ae334411690d2787ba7b1abe1c | 92,314 |
def interpolate_tempfilt_loop(tempfilt, zgrid, zi, output):
"""
Linear interpolate an Eazy "tempfilt" grid at z=zi.
`tempfilt` is [NFILT, NTEMP, NZ] integrated flux matrix
`zgrid` is [NZ] redshift grid
`output` is empty [NFILT, NTEMP] grid to speed up execution
"""
sh = tempfilt.shape
NF, NT, NZ = sh[0], sh[1], sh[2]
#output = np.zeros((NF, NT))
for iz in range(NZ-1):
dz = zgrid[iz+1]-zgrid[iz]
fint = 1 - (zi-zgrid[iz])/dz
if (fint > 0) & (fint <= 1):
fint2 = 1 - (zgrid[iz+1]-zi)/dz
# print iz, zgrid[iz], fint, fint2
for ifilt in range(NF):
for itemp in range(NT):
#print ifilt, itemp
output[ifilt, itemp] = tempfilt[ifilt, itemp, iz]*fint + tempfilt[ifilt, itemp, iz+1]*fint2
break
return output | 07c288d02bde0763f1e74577f34a51d03b2ea796 | 92,316 |
def anova_table(aov):
""" Add effect siz measure to ANOVA table.
Statsmodel's ANOVA table does not provide any effect size measures to tell
if the statistical significance is meaningful. Here eta-squared and omega-squared are calculated.
Omega-squared is considered a better measure of effect size since it is unbiased
in it's calculation by accounting for the degrees of freedom in the model.
"""
aov['mean_sq'] = aov[:]['sum_sq']/aov[:]['df']
aov['eta_sq'] = aov[:-1]['sum_sq']/sum(aov['sum_sq'])
aov['omega_sq'] = (aov[:-1]['sum_sq']-(aov[:-1]['df']*aov['mean_sq'][-1]))/(sum(aov['sum_sq'])+aov['mean_sq'][-1])
cols = ['sum_sq', 'df', 'mean_sq', 'F', 'PR(>F)', 'eta_sq', 'omega_sq']
aov = aov[cols]
return aov | 2667d6386b3656a0dc41569a6ae29bc02e3d8764 | 92,322 |
def _id_to_element_type(player_id, players):
"""Helper for converting a player's ID to their respective element type:
1, 2, 3 or 4.
:param player_id: A player's ID.
:type player_id: int
:param players: List of all players in the Fantasy Premier League.
:type players: list
:return: The player's element type.
:rtype: int
"""
player = next(player for player in players
if player["id"] == player_id)
return player["element_type"] | c93be691a5232af19cdccc931361a3053e146cac | 92,323 |
def weighted_mean(dataset1, dataset2,
weight1=1., weight2=5.):
"""Calculate a weighted mean of data items.
"""
weighted_mean = ((weight1 * dataset1
+ weight2 * dataset2)
/ (weight1 + weight2))
return weighted_mean | 56413f75c152826bd2389959f7699a142452a56a | 92,325 |
def _get_mode(series):
"""Get the mode value for a series"""
mode_values = series.mode()
if len(mode_values) > 0:
return mode_values[0]
return None | bd9b87bb63b92c630dc5ce4cd8dbc420b1ae9dd9 | 92,327 |
import math
def fib(n):
""" Calculates the n-th Fibonacci number iteratively
"""
if n < 1:
raise ValueError("expected integer")
if math.floor(n) != n:
raise ValueError("n must be exact integer")
a, b = 1, 1
for i in range(2, n+1):
a, b = b, a + b
return a | 4fb6db5a59767d0e7256e467bc454e75f11fe5cc | 92,337 |
import re
def __clean_sponsored_by_override(sponsored_by_override):
"""
Return an empty string for 'sponsored_by_override' if the value in AdZerk is set to "blank" or "empty".
@type sponsored_by_override: str
"""
return re.sub(r'^(blank|empty)$', '', sponsored_by_override.strip(), flags=re.IGNORECASE) | 6913e213c182d6597165b11c348920330fd5fd02 | 92,339 |
from typing import Awaitable
from typing import Any
import asyncio
def waitEvent(emitter, event_name: str) -> Awaitable[Any]:
"""
Returns a future which resolves to the event's details when event_name is emitted from emitter
:param emitter: emitter to attach callback to
:param event_name: name of event to trigger callback
:return: Awaitable[Any]
"""
fut = asyncio.get_event_loop().create_future()
def set_done(arg=None):
fut.set_result(arg)
emitter.once(event_name, set_done)
return fut | 5b644fa8c1f2316cd028e4a4d1971ade5d7c2741 | 92,343 |
def _parse_package(line):
"""
Parse an exact package specification from a single line of a Gemfile.lock.
The Gemfile.lock format uses two spaces for each level of indentation. The
lines that we're interested in are nested underneath the `GEM.specs`
section (the concrete gems required and their exact versions).
The lines that are parsed out of the 'GEM.specs' section will have four
leading spaces, and consist of the package name and exact version needed
in parenthesis.
> gem-name (gem-version)
What's returned is a dict that will have two fields:
> { "name": "gem-name", "version": "gem-version" }
in this case.
If the line does not match that format, `None` is returned.
"""
prefix = line[0:4]
if not prefix.isspace():
return None
suffix = line[4:]
if suffix[0].isspace():
return None
version_start = suffix.find(" (")
if version_start < 0:
return None
package = suffix[0:version_start]
version = suffix[version_start + 2:-1]
return {"name": package, "version": version} | f55127636043e66725e5f0175118aef50c5300d0 | 92,348 |
def DeltaX_difference(Xs, agents):
"""Compute X(t-1)-X(t) for Xs and agents."""
return [agents.TDstep(X) - X for X in Xs] | 9760600437dc28fe89a906eb7c14feae56351d14 | 92,350 |
def get_length(self):
"""Returns the current length of the Array"""
return len(self) | fb8e81874d3a2e066a9597251707070163b3a74e | 92,352 |
from typing import Union
from typing import Collection
def _round(value: Union[Collection[float], float], places: int = 6):
"""Round a value or Collection of values to a set precision"""
if isinstance(value, (float, int)):
return round(value, places)
elif isinstance(value, Collection):
return [_round(v, places) for v in value]
else:
raise ValueError(f"Cannot round value of type {type(value)}") | febbab3617b37613a1fe84b8c8d022e859aa9f6a | 92,354 |
def get_only_child(node):
"""
Returns the only child of a node which has only one child. Returns 'None' if node has 0 or >1 children
"""
child_count = 0
only_child = None
for key in node:
if isinstance(key, int):
child_count += 1
only_child = node[key]
return only_child if child_count == 1 else None | 8d4c464fdbd9025ee4bf8d3c97d58f8fecbbc071 | 92,356 |
def isAnagram(string1, string2):
"""Checks if two strings are an anagram
An anagram is a word or phrase formed by rearranging the letters
of a different word or phrase.
This implementation ignores spaces and case.
@param string1 The first word or phrase
@param string2 The second word or phrase
@return A boolean representing if the strings are an anagram or not
"""
# Remove spaces
str1_nospace = string1.replace(" ", "")
str2_nospace = string2.replace(" ", "")
# Convert to lowercase and sort
list1 = list(str1_nospace.lower())
list1.sort()
list2 = list(str2_nospace.lower())
list2.sort()
# Check for equality
return (list1 == list2) | 820390d6fa1ca18b0a20dbd3d2faad08b656680c | 92,357 |
import json
def json_to_list(filename):
"""Turn a json file into a python list."""
data = json.load(open(filename))
return data | 03bbdaf441c1b806c1fc4b831457e7cb31587c0b | 92,363 |
from typing import Union
def multiple_round(num: float, multiple: Union[float, int]) -> Union[float, int]:
"""Round a number to the nearest multiple of another number.
Args:
num: Value to round
multiple: Multiple-of to round towards
"""
return multiple * round(num/multiple) | cff555ebbf754a2b2a8afd5d3460b08209cd1534 | 92,375 |
def pks(qset):
"""Return the list of primary keys for the results of a QuerySet."""
return sorted(tuple(qset.values_list('pk', flat=True))) | 3504b81bc2d7b2a6fe0e13d08e47faa85c2ba2e9 | 92,377 |
def get_tuple_in_list(list_of_tuples, key):
"""
returns the first tuple (key, value) for a given key in a list
"""
for k, v in list_of_tuples:
if k == key:
return k, v
return None | c46b28498e5c7a03ad45fb2efcce3a6ee63f274b | 92,378 |
def tokenize_text(text, tokenizer):
"""Break the input text into tokens the model can use, and return them.
Use max_length to avoid overflowing the maximum sequence length for the model."""
tokenized_text = tokenizer.encode(text, add_special_tokens=True, max_length=512)
return tokenized_text | 1ff72ffd691fd2b4bbc67ab6e6c1a93080fc2c23 | 92,379 |
def _recall(tp, fn):
"""Calculate recall from true positive and false negative counts."""
if fn == 0:
return 1 # by definition.
else:
return tp / (tp + fn) | ef6b40b984a5620e36fda8aae54d7a0c1ffcfbe2 | 92,385 |
def convert_period_into_pandas_freq(period_str):
"""converts the period into a frequency format used by pandas library.
Args:
period_str (string): period in string format using m, h and d to represent
minutes, hours and days.
Returns:
string: new value for period according to pandas frequency format.
ex: 1m -> 1min, 1h -> 1H and 1d -> 1D
"""
number = ''.join(filter(str.isdigit, period_str))
letter = ''.join(filter(str.isalpha, period_str))
if letter == 'm':
return ''.join([number, 'min'])
# return uppercase of period_str for other values of letter
return period_str.upper() | e471ee8a2b6bdb384d4db089ac3a62d9658ef69b | 92,388 |
def sort_vias_by_row(layout_area, row_height, vias):
"""
Sort the vias by row
:param layout_area: a list [x, y] that stores the area of the layout
:param vias: a list of vias that need to be sorted
:return: a list of rows, each containing a list of vias in that row.
"""
num_rows = layout_area[1] // row_height + 1
rows = []
for i in range(num_rows):
rows.append([])
for via in vias:
via_y = via[0][1]
row_dest = via_y // row_height
rows[row_dest].append(via)
# sort vias in each row based on x-coordinate
for each_row in rows:
each_row.sort(key = lambda x: x[0][0])
return rows | 86df34c554b6a654374e509fd25f9eec5ab0d582 | 92,391 |
import re
def ExtractRegex(pat, str):
"""Return the first captured string of the regex or None if there was no match."""
m = re.search(pat, str, re.DOTALL)
if not m: return None
try:
return m.group(1)
except IndexError:
return None | 0ed26f84dbbe633e6b093f43c9124b6566157108 | 92,394 |
def should_retry_command(run_command_result):
"""Returns True when the command that produces the given output
should be retried.
Retry when:
- the error is a "No route to host" error;
- the error is a "Connection refused" error.
This method takes as input the output of the run_command() method:
(out, err, retcode).
"""
retry_error_messages = [
"No route to host",
"Connection refused",
]
error = ''.join(run_command_result[1])
return any(
message in error for message in retry_error_messages) | ae16a056556eb51c3c2a536417d596a77c82ffec | 92,402 |
def calculate_tax_for_bracket(
income: float, tax_bracket: int, next_bracket: int, tax_rate: float
):
"""Use this function to calculate the tax amount for a specific bracket.
This function is used for calculating the tax amount for a specific tax bracket
given the income.
:param income: the income to tax.
:param tax_bracket: the tax bracket being checked.
:param next_bracket: the next bracket to only tax the specific amount.
:param tax_rate: the tax rate for the given bracket.
:return: the taxed amount and the new income amount to pass to following function.
"""
if income >= tax_bracket:
taxable_income = income - next_bracket
taxed_amount = taxable_income * tax_rate
return taxed_amount, next_bracket
else:
return 0, income | 60d6fb02e8d8529ef374df6b761539bb3b67a273 | 92,403 |
import torch
def one_hot(index, n_cat, dropColumn=False):
"""
expects tensor of shape (n_samples,1), returns one-hot array size
(n_samples, n_cat).
Optionally, can drop the first column of the encoding to prevent colinearity
among the predictors. This is only necessary if you care about the values
of the inferred parameters during inference.
"""
onehot = torch.zeros(index.size(0), n_cat, device=index.device)
onehot.scatter_(1, index.type(torch.long), 1)
if dropColumn:
return onehot.type(torch.float32)[:,1:]
else:
return onehot.type(torch.float32) | b30922154ffce6ad2b36f5a84fa8185418c9986c | 92,409 |
def is_const(obj):
"""Return True if object is most likely a constant."""
if obj is not None:
return isinstance(obj, str) and obj[0].isalpha() and obj.upper() == obj | 3b91e1e50f79ec127bc5468d2c499572cc4283d5 | 92,410 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.