content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import re
import functools
def rename_column_headers(df, pattern,
repl):
"""Renames column headers of dataframe by regex replacement.
Args:
df: A pandas dataframe.
pattern: Regex pattern to replace.
repl: Replacement string to insert where old string was.
Returns:
Dataframe with renamed columns.
"""
def _regex_replace_fn(x, pattern, repl):
return re.sub(pattern=pattern, repl=repl, string=x)
return df.rename(
columns=functools.partial(_regex_replace_fn, pattern=pattern, repl=repl)) | 0255b5f37e42c54ca2822789a3f144511ab8f376 | 33,037 |
import os
def is_open_dcos():
"""Determine if the tests are being run against open DC/OS. This is presently done by
checking the envvar DCOS_ENTERPRISE."""
return not (os.environ.get("DCOS_ENTERPRISE", "true").lower() == "true") | d51b64a360f5e7638942070dd8f252d37d171973 | 33,038 |
def way_roy(routing, start, end):
"""Return the route from the start to the end as a list for the Roy-Warshall algorithm"""
route = []
current_node = start
while current_node != end:
route.append(current_node)
current_node = routing[current_node][end] # Follow the routing matrix
route.append(end)
return route | 26070d5d9b9d599586ebe2c6cc447fb804d764ea | 33,039 |
def extract_topn_from_vector(feature_names, sorted_items, topn=10):
"""get the feature names and tf-idf score of top n items"""
top_items = sorted_items[:topn] if topn > 0 else sorted_items
return {feature_names[idx]: sc for idx, sc in top_items} | 9d6f1f8e64ccf0c8143e74e58814e00a3455dcac | 33,040 |
import argparse
def add_s3_args(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""
Add the command line options for the AWS credentials.
"""
parser.add_argument(
"--aws_profile",
action="store",
type=str,
default=None,
help="The AWS profile to use for `.aws/credentials` or for env vars",
)
parser.add_argument(
"--s3_path",
action="store",
type=str,
default=None,
help="Full S3 dir path to use (e.g., `s3://alphamatic-data/foobar/`), "
"overriding any other setting",
)
return parser | 9e812507dbe1f6924d9a5f09d52a057a0a3d9e38 | 33,041 |
def format_program_client_stats(row, prefix):
"""
Given a program in the facility DF (specified by string prefix),
format the client stats (gender, pets, ADA, EMS calls/visits).
Parameters:
===========
row: pandas.Series
The row of the df to format
prefix: str
The prefix for all the stats entries (e.g., 'trailers_', 'isolation_', etc)
Returns
=======
An HTML string of the formatted client stats.
"""
men = row[prefix + "MALE"] + row[prefix + "TRANSGENDER_F_TO_M"]
women = row[prefix + "FEMALE"] + row[prefix + "TRANSGENDER_M_TO_F"]
nonbinary = (
row[prefix + "DECLINED"] + row[prefix + "OTHER"] + row[prefix + "UNDEFINED"]
)
pets = row[prefix + "totalPets"]
ada = row[prefix + "totalAda"]
ems_calls = row[prefix + "EMS_CALL"]
ems_visits = row[prefix + "EMS_VISIT"]
return f"""
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Women: {women}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Men: {men}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Nonbinary/other/declined: {nonbinary}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Pets: {pets}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
Clients with ADA needs: {ada}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
EMS calls (last 24 hours): {ems_calls}
</p>
<p style="margin-top:2px; margin-bottom: 2px; margin-left: 16px">
EMS visits (last 24 hours): {ems_visits}
</p>
""" | 7aaa67e293d818dc9bccd3c117e17fb16f8a7716 | 33,042 |
import functools
def lookup_with_default(fnc):
"""
Wraps ConfigParser lookups, catching exceptions and providing defaults.
:arg fnc: Function to be decorated.
"""
@functools.wraps(fnc)
def decorator(self, name, *args, **kwargs):
try:
return fnc(self, name)
except KeyError as exception:
if 'default' in kwargs:
return kwargs['default']
raise exception
return decorator | 18b2a3c01b602d9e538f17bdd4f753317919bca3 | 33,045 |
def s2ms(data):
"""
timestamp format s to ms
:param data:
:return:
"""
res = []
for point in data:
point = list(point)
point[0] *= 1000
res.append(point)
return res | d7019c157ced6ba96bd9e0b0558010ffb7d04c59 | 33,047 |
def observer(*fields):
""" Observer decorator
The `observer` decorator takes `*args` which represent django
field names that should be observed for mutations.
The `ObserverMixin` is responsible for monitoring the fields
for mutation & acting on it but the decorator takes the list
of fields to observe & adds them to the wrapped function as
a private `_observed_fields` property.
"""
def observer_wrapper(func):
""" Add the hidden property with the fields to observe """
assert func.__name__.startswith('_observe_'), \
'Observed method names must begin with "_observer_" not %s' % func.__name__
# pylint: disable=protected-access
func._observed_fields = fields
return func
return observer_wrapper | 7e49023470e677a3c56ae293daf47c628216c023 | 33,048 |
from typing import Tuple
def time_units(total_seconds: int) -> Tuple[int, int, int]:
"""Convert a given number of seconds to hours, minutes and seconds.
Parameters
----------
total_seconds : int
Total number of seconds to convert.
Returns
-------
int, int, int
Three integers representing the resultant seconds, minutes and hour of the conversion
"""
hours = total_seconds // 3600
minutes = (total_seconds // 60) % 60
seconds = total_seconds % 60
return seconds, minutes, hours | 09ce9f37d218288eb8061a7dd88e6898480ea8c4 | 33,049 |
def input_ecg_ch_name():
"""Input function for channel name (str)."""
msg = "[IN] Input the ECG channel name:\n>>> "
value = input(msg)
return value | 8d6854ef9c419e496029bcd5d73d7c48ca3f2a16 | 33,050 |
def row_count(filename):
""" Counts the rows in a given file """
count = 0
with open(filename, 'r') as ofp:
for _ in ofp:
count = count + 1
# Remove header row from count
count = count - 1 if count > 0 else count
return count | 35ab79abd4548277ac9d372605cfaca8164202c1 | 33,052 |
def get_subword_for_word0(word, n1=4, n2=5, include_self=False):
"""only extract the prefix and suffix"""
l = len(word)
n1 = min(n1, l)
n2 = min(n2, l)
z1 = [word[:k] for k in range(n1, n2 + 1)]
z2 = [word[-k:] for k in range(n1, n2 + 1)]
z = z1 + z2
if include_self:
z.append(word)
return z | 88fa2c62dd14f6ef0e7a06014c32d24bbde1bd10 | 33,053 |
def sample_array_str(self):
"""
Create a human readable string for a SampleArray.
:param self:
:return: str
"""
return f"SampleArray(name={self.name}, container_type={self.container_type}, contents={self.contents})" | 90248d3fc14af0a644b18f1416f205957aa65145 | 33,054 |
from typing import Any
def ensure_field(dictlike: dict, fieldname: str) -> Any:
"""Ensure the required field is found in the data structure."""
sentinel = object()
value = dictlike.get(fieldname, sentinel)
if value is sentinel:
raise UserWarning('{!r} is a required field'.format(fieldname))
return value | 424e41e16581c60da0bdb99f25fb2d75f082d62b | 33,055 |
import argparse
def parse_args():
"""Parses the user command line arguments
"""
parser = argparse.ArgumentParser(description='Calculates the overall profits from limit orders on a product in Coinbase Pro.\nExample:\npython profits_calculator.py ETH-USD')
parser.add_argument('product', type=str,
help='Cryptocurrency product to calculate profits for, e.g. ETH-USD, BTC-USD, MATIC-USD')
parser.add_argument('--quiet', '-q', action='store_true',
help='Flag. If set, runs code in quiet mode.')
args = parser.parse_args()
# Don't use namespaces
product = args.product
quiet = args.quiet
if not quiet:
print(f"product = {product}")
print(f"quiet = {quiet}")
return product, quiet | 7be486b8df90d506d4e7e42f46b3a5d0ed02fb79 | 33,056 |
def delta_t_formula_morrison_and_stephenson_2004(tt, fraction=0.0):
"""∆T long-term parabola from Morrison and Stephenson, 2004."""
t = (tt - 2385800.5 + fraction) / 36525.0 # centuries before/after 1820
return 32.0 * t * t - 20.0 | 57edb23b3fd17209b116c4ae4904df6f67b098f8 | 33,058 |
import torch
def prepare_gt(input_img, gt_bboxes, gt_classes):
"""
args:
- input_img: PIL image HxW
- gt_bboxes - list of bounding boxes
- gt_classes - list of category ids
return:
gt[0] = tensor of bboxes of objects in image scaled [0,1], in (CENTER, w, h) format
gt[1] = tensor of class ids in image
"""
gt = [torch.FloatTensor(gt_bboxes), torch.IntTensor(gt_classes)]
height, width, _ = input_img.shape
for idx, bbox in enumerate(gt[0]):
new_bbox = [0] * 4
new_bbox[0] = (bbox[0] + (bbox[2] / 2)) / width
new_bbox[1] = (bbox[1] + (bbox[3] / 2)) / height
new_bbox[2] = bbox[2] / width
new_bbox[3] = bbox[3] / height
gt[0][idx] = torch.FloatTensor(new_bbox)
return gt | 3ce26456cacb724058d5268134c3b351d4349423 | 33,059 |
def iter_to_dict_frequency(iterable):
"""
creates a dictionary form a list and the values
are the counts of each unique item
"""
dict_frequency = {}
for item in iterable:
dict_frequency.setdefault(item, 0)
dict_frequency[item]+=1
return dict_frequency | 266d6d1520151c1c2bda297578200021c28da70d | 33,061 |
def login_response_fixture():
"""Define a fixture to return a successful login response."""
return {"token": "abcd1234"} | 1771dfacbed1e84b47e74cc9603cb62d139a87b7 | 33,062 |
def parsedcommand(obj):
"""
Decorator to flag documented expression commands available to users.
Used with the usage() method.
"""
obj.parsedcommand = True
return obj | 6bd6e06c61cd2f6443bfc9bf4e176c83691eae46 | 33,064 |
import six
def is_urlsource(sourcedata):
""" Whether sourcedata is of url kind
"""
return isinstance(sourcedata, six.string_types) and sourcedata.startswith('http') | 0f1b0fbfe7ba9bd4d127d73b360612ee486af354 | 33,065 |
def arcsinh(x):
"""
[Definition] x的反双曲正弦函数
[Category] 反双曲函数
domain of definition: R
"""
return 'arcsinh(%s)' %x | 8c2cdfd7f6a884b653617955f481fb99b9e398a4 | 33,066 |
import re
def match_sample_in_data_frame(df, regex, column):
"""Use ``regex`` to parse out information from the ``"query"`` column of the ``df`` DataFrame.
Return an augmented DataFrame.
"""
if not df.shape[0]:
return df # short-circuit empty
# Get shortcut to column with query/filenames.
col_query = df.loc[:, column]
# Obtain list of new column names, sorted by occurence, must use dict for that.
names = {}
for query in col_query:
m = re.match(regex, query)
if m:
for key in m.groupdict():
names[key] = True
names = list(names.keys())
# Build new column values.
columns = {n: [] for n in names}
for query in col_query:
m = re.match(regex, query)
if m:
for name in names:
columns[name].append(m.groupdict().get(name))
else:
for name in names:
columns[name].append(None)
# Insert new columns into df.
idx = df.columns.get_loc(column)
for i, (key, column) in enumerate(columns.items()):
df.insert(idx + i + 1, key, column)
return df | f420e0e9100421a0d4472d8ba09ee8b50f49dfc5 | 33,067 |
def _parsed_to_dict(fmhdata, nested_fmh_section):
"""Converts the raw construct data to our standard dictionary format."""
if nested_fmh_section:
magic_str = fmhdata[nested_fmh_section]['section_outer']['section']['signature'].decode('ascii')
fmhdata = fmhdata[nested_fmh_section]['section_outer']['section']['data']
else:
magic_str = fmhdata['signature'].decode('ascii')
fonts = []
for font in fmhdata['fonts']:
tmp = dict(font['data'])
del tmp['_io']
del tmp['chars_count']
del tmp['chars_pointer']
tmp['chars'] = [dict(char) for char in tmp['chars']]
for char in tmp['chars']:
del char['_io']
fonts += [tmp]
return {'fmh3_type': magic_str, 'fonts': fonts} | d7ae0658071d22dd8d6548f2c08b9a8e5f12d6f7 | 33,068 |
def convert_nothing(x):
""" useful utility for not converting anything"""
return x | 7e900dedbe20cd154bea977bd3a887cda478735f | 33,069 |
import requests
def get_registered_mars(management_url: str) -> dict:
"""
View all registered mar files.
Parameters:
management_url (str): TorchServe management url
Return:
registered_mars (dict)
"""
management_url = management_url.rstrip("/")
registered_mars_url = f"{management_url}/models"
res = requests.get(registered_mars_url)
return res.json() | b5033339c744aafe2a0a5e836962c0d91db6b224 | 33,070 |
import os
def create_if_not_exist(path):
"""
An alias to `os.makedirs(path, exist_ok=True)` that also returns the path. This is useful in cases like, e.g.:
>>> path = create_if_not_exist(os.path.join(dir, subdir, anotherdir))
:param path: path to create
:return: the path itself
"""
os.makedirs(path, exist_ok=True)
return path | 66e80d22c0ecd5d98ba85545c87576a7765f6c70 | 33,071 |
import traceback
import os
def getFileAndLine(back=0):
"""
Return a 3-ple consisting of
(a) the basename of the file (or other source) responsible for the stack frame
that called this function, and
(b) the line number in said file at which said call occurred.
(c) the method from which the call was made
Arguments
back - report the frame an integer number of frames either farther (back>0) or
less far (back<0) into the call stack than the one that called this
function. (default: 0)
This is useful for implementing a wrapper around this function -- instead
of reporting the (probably useless) file/line of the call to this
function, back=1 means report the file/line of the call to the wrapper.
"""
callStack = traceback.extract_stack()
# Frame -1 is this frame. Frame -2 is the calling frame. Frame -3 would be 1
# farther than the calling frame.
framesFromEnd = 2 + back
frameOfInterest = callStack[-framesFromEnd]
basename = os.path.basename(frameOfInterest[0])
if basename == "__init__.py":
basename = os.path.basename(os.path.dirname(frameOfInterest[0])) + "/"
return (basename, frameOfInterest[1], frameOfInterest[2]) | 6d684d009aad6ffec856cefa9c3eae9e53abc0f7 | 33,072 |
import ast
def string_to_dict(string):
"""Function to convert json strings to dictionary"""
return ast.literal_eval(string) | c7ab9907e213bb3154350397c2e96b4d20edc293 | 33,074 |
def process_lstn_activation(df):
"""
Calculates the score improvement normalized by voltage for each patient
:param:df: contains the two excel sheets 'LSTN', 'RSTN' as dictionaries
returns: improvement score normalized by voltage for both the sides in each patient
"""
df = df.sort_values(['Patient'])
patients = df['Patient']
side = df['Side']
motor_score_on_stim = df['Motor score (on stim)']
motor_score_off_stim = df['Motor score (off stim)']
voltage = df['Voltage [V]']
improvement_volt = ((motor_score_off_stim/motor_score_on_stim)/voltage)
return improvement_volt | ee2ff9c57b86835e8d4c0accd63a8c16df86c8b3 | 33,075 |
def get_adverse_outcome_lookahead_label_key(time_window_hours: int) -> str:
"""Returns the lookahead label key for the provided time window in hours."""
return f"adverse_outcome_within_{time_window_hours}h" | a65b4857d4a3ec1c1cd86fdceddfabd3fcfb5ec4 | 33,076 |
def googleplus_html_scope(value):
"""
This is meant to be used as attribute to html / body or other tags to
define schema.org type
"""
return ' itemscope itemtype="http://schema.org/%s" ' % value | ff33a98544770c23b1bea736b2b6056ad953d24e | 33,077 |
import random
def d6():
"""Simulate the roll of a 6 sided die"""
return random.randint(1, 6) | 881a548131019e9cd5cfd837fcc911f007550afa | 33,078 |
def kill_min_items(classes):
"""Kill all references to min_items=1."""
# NOTE! This changes all constr list to normal List
for i, c in enumerate(classes):
for j, line in enumerate(c.lines):
c.lines[j] = line.replace(', min_items=1', '')
classes[i] = c
return classes | 27a385b2c78200158f03f91b90577bfdd94feb9c | 33,079 |
def _chunks(l, chunk_size):
"""return chunks of chunk_size for list l
>>> list(chunks([0, 1, 2, 3, 4], 2))
[[0, 1], [2, 3], [4]]
"""
return (l[i:i + chunk_size] for i in range(0, len(l), chunk_size)) | 2d45088536810dc0f52b38a3d2c2b86414daf862 | 33,080 |
def write_params_to_args(params, args, override=False):
""" Join params into args and return args"""
assert isinstance(params, dict), 'params must be a dict'
for par in params.keys():
if not par.startswith('__'):
if not hasattr(args, par) or override:
print ('adding %s: %s' % (par, str(params.get(par))))
setattr(args, par, params.get(par))
return args | 9544cef621d2d1fcc3100dc4d42581924f91d0ef | 33,081 |
def find_closest_arg(xs, x, approach="both", ordered=False):
"""
Find the index of a value in `xs` that is closest to `x`.
`approach` can take values ``'top'``, ``'bottom'`` or ``'both'`` and denotes from which side should array elements approach `x`
(meaning that the found array element should be ``>x``, ``<x`` or just the closest one).
If there are no elements lying on the desired side of `x` (e.g. ``approach=='top'`` and all elements of `xs` are less than `x`), the function returns ``None``.
if ``ordered==True``, then `xs` is assumed to be in ascending or descending order, and binary search is implemented (works only for 1D arrays).
if there are recurring elements, return any of them.
"""
if not (approach in ["top","bottom","both"]):
raise ValueError("unrecognized approaching mode: {0}".format(approach))
try:
return xs.find_closest_arg(x,approach=approach,ordered=ordered)
except AttributeError:
pass
if not ordered:
diff_array=xs-x
if approach=="top":
threshold=diff_array>=0
if threshold.any():
diff_array=diff_array*threshold+(diff_array.max()+1.)*(~threshold)
return diff_array.argmin()
else:
return None
elif approach=="bottom":
threshold=diff_array<=0
if threshold.any():
diff_array=diff_array*threshold+(diff_array.min()-1.)*(~threshold)
return diff_array.argmax()
else:
return None
else:
return abs(diff_array).argmin()
else:
if xs.ndim!=1:
raise ValueError("ordered method is only applicable to 1D arrays")
if len(xs)==0:
return None
lb,hb=0,len(xs)-1
if xs[0]>xs[-1]: # must be reverse ordered
arg_rev=find_closest_arg(xs[::-1],x,approach=approach,ordered=True)
return len(xs)-1-arg_rev if arg_rev is not None else arg_rev
if xs[lb]>x:
if approach=="bottom":
return None
else:
return lb
if xs[hb]<x:
if approach=="top":
return None
else:
return hb
while hb-lb>1:
i=(lb+hb)//2
el=xs[i]
if el<x:
lb=i
elif el>x:
hb=i
else:
return i
if approach=="top":
return hb
elif approach=="bottom":
return lb
else:
if abs(xs[lb]-x)<abs(xs[hb]-x):
return lb
else:
return hb | 764e3bd7880c92d53e1dd31cbb86068472d7400b | 33,082 |
import os
def resource_filename(filename):
"""Returns the absolute path to a test resource."""
return os.path.join(os.path.dirname(__file__), 'data', filename) | 0b67f3daecbb806711a91f96c927680e42a69a37 | 33,083 |
import hashlib
def hash_color(input_string):
"""
Derives a hex color code from the MD5 hash of an input string.
Returns the resulting hex color code as a string.
"""
digest_chars = list(hashlib.md5(input_string).hexdigest())
color = ""
while len(color) < 6:
color += digest_chars.pop(0)
color += digest_chars.pop(-1)
return color | 0d04cca8b9491e5e99b05504f27401bd6d62c9b5 | 33,087 |
def build_collectd_rpms(log, host, target_cpu, packages_dir,
collectd_src_dir, tarball_fpath, distro_number,
collectd_version):
"""
Build Collectd RPMs on a host
"""
command = ("cd %s && mkdir {BUILD,RPMS,SOURCES,SRPMS} && "
"cp %s SOURCES" %
(collectd_src_dir, tarball_fpath))
retval = host.sh_run(log, command)
if retval.cr_exit_status:
log.cl_error("failed to run command [%s] on host [%s], "
"ret = [%d], stdout = [%s], stderr = [%s]",
command,
host.sh_hostname,
retval.cr_exit_status,
retval.cr_stdout,
retval.cr_stderr)
return -1
command = ('cd %s && '
'rpmbuild -ba --with write_tsdb --with nfs --without java '
'--without amqp --without gmond --without nut --without pinba '
'--without ping --without varnish --without dpdkstat '
'--without turbostat --without redis --without write_redis '
'--without gps --without lvm --without modbus --without mysql '
'--without ime '
'--define "_topdir %s" '
'--define="rev %s" '
'--define="dist .el%s" '
'contrib/redhat/collectd.spec' %
(collectd_src_dir, collectd_src_dir,
collectd_version, distro_number))
log.cl_info("running command [%s] on host [%s]",
command, host.sh_hostname)
retval = host.sh_run(log, command)
if retval.cr_exit_status:
log.cl_error("failed to run command [%s] on host [%s], "
"ret = [%d], stdout = [%s], stderr = [%s]",
command,
host.sh_hostname,
retval.cr_exit_status,
retval.cr_stdout,
retval.cr_stderr)
return -1
generated_collectd_rpm_dir = ("%s/RPMS/%s" %
(collectd_src_dir, target_cpu))
command = ("mv %s/* %s" %
(generated_collectd_rpm_dir, packages_dir))
retval = host.sh_run(log, command)
if retval.cr_exit_status:
log.cl_error("failed to run command [%s] on host [%s], "
"ret = [%d], stdout = [%s], stderr = [%s]",
command,
host.sh_hostname,
retval.cr_exit_status,
retval.cr_stdout,
retval.cr_stderr)
return -1
return 0 | c57c3d2d06fcbb332abc9679e1a43ec00098d5fc | 33,088 |
import argparse
def initArgs():
"""Initialize argparse arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--calc_streak", action="store_true", default=False,
help='Calculate streak')
parser.add_argument("--calc_period", action="store_true", default=False,
help='Calculate periodogram')
parser.add_argument("--plot", action="store_true", default=False,
help='Plot normalized spectrum')
parser.add_argument("--nevents", type=int, default=10,
help='Number of events to average')
parser.add_argument("--exp", type=str, default='cxilr6716',
help='Experiment')
parser.add_argument("--run", type=int,
help='Run')
parser.add_argument("--instrument", type=str, default='cxi',
help='Instrument')
parser.add_argument("--pvbase", type=str, default='CXI:SC1:DIFFRACT',
help='pvbase')
parser.add_argument("--alias", type=str, default='DscCsPad',
help='detector alias')
return parser.parse_args() | 16d546bf2136ce646c1339a9b4553f31cd53e848 | 33,089 |
def check_args(args):
"""Check values of parameters. Returns None, or an error message."""
message = ""
if not 0 <= args.min_netmask <= 32:
message = (
message + "\n Option 'min_netmask' must be an integer between 0 and 32"
)
if not 0.0 <= args.network_discovery_timeout <= 60.0:
message = message + "\n Option 'network_timeout' must be between 0.0 and 60s"
if not 1 <= args.network_discovery_threads <= 32000:
message = message + "\n Option 'threads' must be between 1 and 32000"
if message == "":
return None
return message | c48a3eb7a04548d71dc0f968de609a09d692e458 | 33,090 |
def fixedsplit(text, separator=None, maxsplit=-1):
"""Split a string and return a fixed number of parts"""
parts = text.split(separator, maxsplit)
maxparts = maxsplit + 1
missing = maxparts - len(parts)
if missing > 0:
parts = parts + (missing * [""])
return parts | fecbb9e2d195a4fe04ccf63d80df872cd82941a5 | 33,092 |
def ara2kan(ara):
"""
Converts the specified number to the Kanji number text.
"""
assert ara >= 0 and ara < 100
kan = '零一二三四五六七八九十'
return '%s%s%s' % (
kan[int(ara / 10)] if ara >= 20 else '',
'十' if ara >= 10 else '',
kan[ara % 10] if ara < 10 or ara % 10 else ''
) | ff8c28aba051f5a0f4633b31ada62f7d32d6db79 | 33,094 |
def get_keypress_event_trigger_key(event: str) -> int:
"""Find the key number which triggers particular keypress event."""
comparison_str = "e.which == "
if comparison_str not in event:
return -1
comparison_start = event.index(comparison_str) + len(comparison_str)
comparison_end = event.find(")", comparison_start)
key = int(event[comparison_start:comparison_end])
return key | 73178e6c30affe62fa77677fe894953b0bb371ea | 33,095 |
def generate_color_brew(n):
"""
Generates an evenly distributed range
of hue values in the HSV colour scale.
Arguments:
n -- an int with the number of hue values
you want to obtain
Returns:
a python list with those hue values
"""
hue_step = 360 / float(n)
return [color * hue_step / 360.0 for color in range(n)] | 69bfc64bc209fcb916c77ee6d3681ce0c3ccc815 | 33,096 |
import torch
def downsample(vox_in, n, use_max=True):
"""
Downsample a 3-d tensor n times
Inputs:
- vox_in (Tensor): HxWxD tensor
- n (int): number of times to downsample each dimension
- use_max (bool): use maximum value when downsampling. If set to False
the mean value is used.
Output:
- vox_out (Tensor): (H/n)x(W/n)x(D/n) tensor
"""
dimy = vox_in.size(0) // n
dimx = vox_in.size(1) // n
dimz = vox_in.size(2) // n
vox_out = torch.zeros((dimy, dimx, dimz))
for x in range(dimx):
for y in range(dimy):
for z in range(dimz):
subx = x * n
suby = y * n
subz = z * n
subvox = vox_in[suby : suby + n, subx : subx + n, subz : subz + n]
if use_max:
vox_out[y, x, z] = torch.max(subvox)
else:
vox_out[y, x, z] = torch.mean(subvox)
return vox_out | 28c28a8e22bae39c8474c8d0e406059a74ec9b60 | 33,100 |
from typing import List
import requests
from bs4 import BeautifulSoup
def _get_db_results_by_query(
query: str, db: str, sort: str, retmax: int = 100
) -> List[str]:
"""Searches an NCBI database by a query and returns resulting document ids.
NOTE: When searching pubmed using sort=relevance, results differ slightly
from results obtained using the web search interface sorted by "Best
Match". The returned results from the programmatic search still appear to
be sorted by some form of relevance, just not the same as the one used in
the GUI...
"""
if db not in ["pubmed", "pmc"]:
raise ValueError
query = query.replace(" ", "+")
if retmax is not None:
response = requests.get(
f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db={db}&"
f"term={query}&sort={sort}&retmax={retmax}"
)
else:
response = requests.get(
f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db={db}&"
f"term={query}&sort={sort}"
)
soup = BeautifulSoup(response.content, features="lxml")
return [id.text for id in soup.find_all("id")] | e63ce75ca367b520b148ad1d3c795ce95961451f | 33,102 |
def add_others_to_robots(robots, others_list):
"""Add others to robots set."""
for other in others_list:
robots.add(other)
return robots | a4dfb797a1304c5b8f4486eae88ce6f4b3d9e56e | 33,103 |
import stat
def get_world_bits():
"""
Helper function for get_permissions() and change_permissions().
Same as get_owner_bits() except for group.
"""
world_mask = stat.S_IRWXO
world_bits = {
'r' : stat.S_IROTH,
'w' : stat.S_IWOTH,
'x' : stat.S_IXOTH,
'rw' : stat.S_IROTH|stat.S_IWOTH,
'rx' : stat.S_IROTH|stat.S_IXOTH,
'wx' : stat.S_IWOTH|stat.S_IXOTH,
'rwx' : stat.S_IRWXO,
}
world_bits['---'] = 0
world_bits['r--'] = world_bits['r']
world_bits['-w-'] = world_bits['w']
world_bits['--x'] = world_bits['x']
world_bits['rw-'] = world_bits['rw']
world_bits['r-x'] = world_bits['rx']
world_bits['-wx'] = world_bits['wx']
return world_mask, world_bits | 0f54eb590474d44f48e1414d97a742e3659def84 | 33,105 |
from typing import List
import shlex
import difflib
def diff_commands(cmd1: str, cmd2: str) -> str:
"""Return a unified diff of two shell commands."""
def diffable(cmd: str) -> List[str]:
args = shlex.split(cmd)
pretty = []
i = 0
while i < len(args):
if args[i] in ("-o", "-iquote", "-isystem", "-MF"):
pretty.append(args[i] + " " + args[i + 1])
i += 2
else:
pretty.append(args[i])
i += 1
return pretty
args1 = diffable(cmd1)
args2 = diffable(cmd2)
return "\n".join(tuple(difflib.unified_diff(args1, args2, n=999))[2:]) | a6366a2bc2841541eebf707b8423bd2d409e7b78 | 33,107 |
def calculateRPKM(countsfile, avg):
"""Calculates the RPKM values for a sample
RPKM = read_counts/(cluster_length * sum(read_counts)) * 10^9
parameters
----------
counts_file
file containing the counts
returns
----------
RPKM = dictionary containing RPKM counts per cluster
"""
sum_reads = 0
read_counts = {}
read_counts_avg ={}
cluster_lengths = {}
with open(countsfile, "r") as f:
for line in f:
if "*" not in line:
line = line.strip()
cluster, length, nreads, nnoreads = line.split("\t")
nreads = float(nreads)
if avg == "True" and "BG" in cluster:
read_counts[cluster] = nreads
NR = int(cluster.split("--")[-1].split("=")[-1])
nreads = nreads / NR
read_counts_avg[cluster] = nreads
else:
read_counts[cluster] = nreads
read_counts_avg[cluster] = nreads
cluster_lengths[cluster] = float(length)
sum_reads += nreads
RPKM = {}
RPKM_avg = {}
for key in read_counts:
try:
RPKM[key] = read_counts[key] / (sum_reads * cluster_lengths[key]) * 1000000000
RPKM_avg[key] = read_counts_avg[key] / (sum_reads * cluster_lengths[key]) * 1000000000
except(ZeroDivisionError):
RPKM[key] = 0
RPKM_avg[key] = 0
return (RPKM, RPKM_avg) | a1dd088d64706d191cb325ad1b251250a2a83bee | 33,108 |
import os
def exec_butler_tmp_dir(exec_butler_dir, job_name, tmp_dirname):
"""Construct the job-specific path for the non-shared copy of the
execution butler repo."""
return os.path.join(os.path.dirname(exec_butler_dir), tmp_dirname,
job_name) | 52087d772f9e55504c2d10f76cbd2013135437fc | 33,109 |
from sys import modules
import inspect
def get_rewards():
"""Rewards defined within the module
* Uses module introspection to get the
handle for classes.
Returns:
-------
* names: tuple(<str>)
Names for classes that implement reward computation
* objects: tuple(<objects>)
classes wrt camelized names
Usage:
-----
> names, objs = get_rewards()
> names
> ('reward_min_speed_delta', reward_min_delay')
> objs
> (<function ilurl.rewards.reward_min_speed_delta(state, *args)>,
<function ilurl.rewards.reward_min_delay(state, *args)>)
"""
this = modules[__name__]
names, funcs = [], []
for name, func in inspect.getmembers(this):
# Is a definition a function
if inspect.isfunction(func):
# Is defined in this module
if inspect.getmodule(func) == this:
names.append(name)
funcs.append(func)
return tuple(names), tuple(funcs) | 7f70df8ae95b3658ecd82075758e934d16b69cbe | 33,110 |
def ClampValue(input, min, max):
"""
Clamp Value to min/max
:param input: Input
:param min: Minimum Value
:param max: Maximum Value
:return: Clamped Output
"""
if input > max:
return max
elif input < min:
return min
else:
return input | 00b63ac1fd6fb009d0f5c3abdee6f387ee21a5da | 33,111 |
from typing import Dict
from typing import List
def _create_default_qubit_map(n_qubits: int) -> Dict[int, List[int]]:
"""Creates a qubit map that maps each qubit to itself."""
qubit_map = {}
for i in range(n_qubits):
qubit_map[i] = [i, 1]
return qubit_map | a12e9d2ed183748b3e92e24385de648a90a51bcb | 33,113 |
def get_all_relation_fields(model):
"""获取模型中所有的关系字段"""
return [item for item in model._meta.get_fields() if item.is_relation] | 93d39658a22bebb41e6b3032e31d74e433a4cc08 | 33,114 |
import os
import zipfile
def storage_to_zip(path, outfile=None):
"""Create a ZIP archive from a data storage hierarchy.
The contents of the data storage hierarchy are all placed in the archive,
with the top-level folder in the archive being the data storage root folder
itself. That is, all paths within the ZIP file are relative to the dataset
root folder.
Parameters
----------
path : str
Path to the root of the dataset.
outfile : str, optional
Name of the ZIP file to create. If not specified, the file is created
in the same directory as the data root with the same name as the
dataset root directory (with ".zip" added).
Returns
-------
outfile : str
The name of the ZIP file created.
"""
datapath, datadir = os.path.split(path)
if outfile is None:
# absolute path to parent of data root + dataset name + .zip
outfile = os.path.join(datapath, datadir + '.zip')
with zipfile.ZipFile(outfile, 'w') as zipf:
for root, dirs, files in os.walk(path):
for f in files:
# write as *relative* path from data root
zipf.write(os.path.join(root, f),
arcname=os.path.join(datadir, f))
return outfile | 72ef77594a16fd9a96d50986127ec1f52c38ab65 | 33,115 |
import re
def tokenize(sentence):
"""
English segment
:param sentence:
:return:
"""
return [x.strip() for x in re.split('(\W+)?', sentence) if x.strip()] | 759328b1064f3af8d484376e4ec671f5367e6f80 | 33,116 |
import torch
def get_available_devices():
"""
Get all the available devices for training neural networks.
:return: list of available devices
"""
devices = ['cpu']
if torch.cuda.is_available():
devices.append('cuda')
devices.extend(f'cuda:{index}' for index in range(torch.cuda.device_count()))
return devices | 05aae56e877df30745269d94a68a6b717f0f2443 | 33,117 |
def get_lettercase_permutation(word):
"""O(2^n) worst case time and space complexities"""
result = ['']
for ch in word:
if ch.isalpha():
result = [word + c for word in result for c in [ch.lower(), ch.upper()]]
else:
result = [word + ch for word in result]
return result | ae4d94a0325b0f3a1b92734b619b7b54469a47d2 | 33,118 |
def check_proposition():
"""Create a test fixture to compare propositions."""
def check_proposition(actual, test):
"""Check that propositions match."""
assert actual.keys() == test.keys()
assert actual['id'].startswith('proposition:')
assert actual['type'] == test['type']
if test['type'] == 'therapeutic_response_proposition':
assert actual['object'] == test['object']
else:
assert 'object' not in actual.keys()
assert actual['predicate'] == test['predicate']
assert actual['subject'] == test['subject']
assert actual['object_qualifier'] == test['object_qualifier']
return check_proposition | 6995e1b4b83b64b6a8e441249fd6049b398b8996 | 33,119 |
from pathlib import Path
import tempfile
import atexit
import shutil
def create_temp_dir(remove_at_exit: bool = True) -> Path:
"""Create a temporary directory.
Args:
remove_at_exit (bool, optional): If True, delete directory when
application exists. Defaults to True.
Returns:
Path: The path to the created directory.
"""
temp_dir = Path(tempfile.mkdtemp())
if remove_at_exit:
atexit.register(lambda: shutil.rmtree(temp_dir, ignore_errors=True))
return temp_dir | 76fd070c764f748b48d45c31821bc6049128e8ae | 33,120 |
import math
def projection(xs, ys, zs):
"""三维坐标到二维投影"""
def rotate(us, vs):
"""二维坐标旋转"""
u, v = us[0], vs[0] # 法向量
l = math.sqrt(math.pow(u, 2) + math.pow(v, 2)) # 长度
cosa, sina = u / l, -v / l # 旋转角
ru, rv = [], []
for u, v in ((us[i], vs[i]) for i in range(len(us))):
ru.append(u * cosa - v * sina)
rv.append(u * sina + v * cosa)
return ru, rv
zs, xs = rotate(zs, xs)
zs, ys = rotate(zs, ys)
return xs, ys | 2c78d8baa5d3e0c815f197b7da187e7d31df7faa | 33,121 |
import requests
def URL_is_reachable(url, expected_response=200):
"""== Verifies URL Passed in Returns a Given Response Code ==
- Pass in URL, and optionally an expected response code (if something other than ``200`` is expected).
- Returns either ``True`` or ``False``.
== Calling ==
| *Args* | ``url`` (str) | Fully-qualified URL (including protocol). |
| *Args* | ``expected_response`` (int) | _Optional_ return code if other than ``200``. |
| *Returns* | ``boolean`` | Either True or False. |
| *Raises* | exception | Returns ``False`` on exception. |
=== Example in Robot ===
| ``${is_reachable} = URL is Reachable https://www.google.com``
"""
try:
req_return = requests.get(url)
if req_return.status_code == expected_response:
return True
else:
return False
except:
return False | 9e145e4bcf9313d97c1247fa94f3e1d0469f5ab7 | 33,122 |
import types
def listToStrValue(value):
"""
Flattens list to a string value
>>> listToStrValue([1,2,3])
'1, 2, 3'
"""
if isinstance(value, (set, tuple, types.GeneratorType)):
value = list(value)
if isinstance(value, list):
retVal = value.__str__().lstrip('[').rstrip(']')
else:
retVal = value
return retVal | 67b4d017f8dc8b13eaf9596031f2446067e1824f | 33,123 |
def is_isogram(text):
"""
-> Função que testa uma string se é um isograma e retorna um boleano
:param text: string a ser testada
:return: retorna boleano se é ou não uma string isograma
"""
text = text.lower()
if text.isalpha():
if len(set(text)) == len(text):
# o set(text) faz uma lista com todas as letras da palavra
# é comparado o tamanho da lista e o tamanho da palavra
return True
else:
return False
else:
return False | 84909d18599b544c6734535dd1ffa38d301eec23 | 33,124 |
import binascii
def parse_object_id(value: str) -> bytes:
"""
Parse an object ID as a 40-byte hexadecimal string, and return a 20-byte
binary value.
"""
try:
binary = binascii.unhexlify(value)
if len(binary) != 20:
raise ValueError()
except ValueError:
raise ValueError("blob ID must be a 40-byte hexadecimal value")
return binary | 503b0cd21c445d1ce851dbdc8f47b493bc17b1f8 | 33,125 |
def set_selectors_and_props():
"""Set specific visual attributes for RB Style table
selector is based on HTML elements and CSS properties"""
return [dict(selector='caption',
props=[('color', 'black'), ('background-color', 'white'), ('font-size', '16px'), ('text-align', 'center'),('font-weight', 'bold')]),
dict(selector='th',
props=[('color', 'black')])] | af87a060b99520e7d1c513102f77c1e6ead87bb8 | 33,126 |
def get_pos(tokenlist, pos):
""" filter a list to return the words that match the provided part of speech"""
matching_pos_words = []
i = 0
for (word, pos_tag) in tokenlist:
try:
if pos_tag.startswith(pos):
matching_pos_words.append((word, i))
except:
print('error', word, pos_tag)
i += 1
return matching_pos_words | ffc508d95d7430aabf99d10a1903133093aaee22 | 33,131 |
def reverse(xs):
"""Returns a reversed sequence"""
return type(xs)(reversed(xs)) | 0cb99f16b2f46fc3d4b5dc478a45be99d5d7201e | 33,132 |
import os
from datetime import datetime
def file_creation_date(file_path):
"""Gets the datetime a file was created - only works on Windows OS
Args:
file_path (string): Absolute path of the file
Returns:
datetime: create datetime of file
"""
# Must be a valid path string
assert os.path.isfile(file_path) is True
unix_timestamp = os.path.getctime(file_path)
return datetime.fromtimestamp(unix_timestamp) | 2c12249cda1035cbbd8cc8497e039098e21bdba9 | 33,133 |
import subprocess
def get_kernel_name_for_dnndk(model):
"""Parse the kernel name out of the given elf file for DNNDK runtime.
This method will take elf file as input, read its information, and
return the model name.
Parameters
----------
model : str
The name of the ML model binary. Can be absolute or relative path.
"""
cmd = 'readelf {} -s --wide | grep ' \
'"0000000000000000 0 FILE LOCAL DEFAULT ABS "'.format(model)
line = subprocess.check_output(cmd, shell=True).decode()
kernel_0 = line.split()[-1].lstrip("dpu_").rstrip(".s")
return kernel_0[:-len('_0')] if kernel_0.endswith('_0') else kernel_0 | 0a2fe0f73bf73ef33f76d219aeefc3162fe59e7b | 33,134 |
import os
def is_special_dir(dirname):
"""Determine if a directory name is a special directory."""
return dirname == os.curdir or dirname == os.pardir | 607a9cd7a3668c20bcb05c4980773f874eb623e8 | 33,135 |
def is_int(n):
"""Returns True if 'n' is an integer.
Args:
n (anything): The variable to check.
Returns:
bool: True if it is an integet.
"""
try:
int(n)
return True
except (ValueError, TypeError):
return False | 1a79e76e120be8cdfb838af4e970441d72c8e932 | 33,136 |
def get_name(n,digitStructBbox,digitStructName,digit_structure):
"""
Gets the name of an image.
"""
return ''.join([chr(c[0]) for c in digit_structure[digitStructName[n][0]].value]) | 56a7bdbbfb8be4fa113a07324b37600542226062 | 33,137 |
import re
def tokenize(message):
"""convert to distinct lowercase words"""
message = message.lower()
all_words = re.findall("[a-z0-9]+", message)
return set(all_words) | fe4d9321d3415836b5f03ae91ea6cec4c361bbf7 | 33,138 |
def _full_best_response_policy(br_infoset_dict):
"""Turns a dictionary of best response action selections into a full policy.
Args:
br_infoset_dict: A dictionary mapping information state to a best response
action.
Returns:
A function `state` -> list of (action, prob)
"""
def wrap(state):
infostate_key = state.information_state_string(state.current_player())
br_action = br_infoset_dict[infostate_key]
ap_list = []
for action in state.legal_actions():
ap_list.append((action, 1.0 if action == br_action else 0.0))
return ap_list
return wrap | d1e2f5869fb3607aaffe05ff3446568f4665bbda | 33,139 |
def GetPortsForHost(ip):
"""Returns ports in range 20000-30000
The ports returned are deterministics by ip, expect same ports
per ip and potentially different ports for different ips.
Only the last byte of the ip is used.
"""
parts = [int(i) for i in ip.split('.')]
offset = 20000 + parts[-1] * 7
interval = (13 + parts[-2] + parts[-3]) % 42
return range(offset, 30000, interval)[:10] | 466ae430590e957a6085517ddce98744cdb5fe33 | 33,140 |
from typing import Optional
def _validate_and_convert_float(value: float) -> Optional[float]:
"""
Validate that a value is a float, or a number that can be converted to a float.
If the value is valid, this method will return the value as float. Otherwise, this
method returns None
:param value: Value to validate and convert
:return: Value as float if value is valid, None otherwise
"""
valid = True
if not isinstance(value, (float, int)):
valid = False
if isinstance(value, bool):
valid = False
if valid:
return float(value)
return None | 14911cf27623e6ffebc4d93b36761884beb73cfa | 33,141 |
import torch
def IntTensor(values, device='cuda:0'):
"""
Returns a Tensor of type torch.int containing the given values
Parameters
----------
values : list
the values of the tensor
device : str
the device to store the tensor to
Returns
-------
Tensor
an integer precision tensor
"""
return torch.tensor(values, dtype=torch.int, device=device) | bde0853a436969b5d463f3ee28394473fc2b3ab0 | 33,142 |
def strip(string, option="B", char=" "):
"""returns string with leading or trailing characters or both
removed, based on the option you specify. The following are valid
options. (Only the capitalized letter is needed; all characters
following it are ignored.)
Both
removes both leading and trailing characters from string.
This is the default.
Leading
removes leading characters from string.
Trailing
removes trailing characters from string.
The third argument, char, specifies the character to be removed,
and the default is a blank. With rexx, if you specify char, it
must be exactly one character long. With this function, all
characters specified in the char string are considered for
removal from string.
Here are some examples:
strip(' ab c ') -> 'ab c'
strip(' ab c ','L') -> 'ab c '
strip(' ab c ','t') -> ' ab c'
strip('12.7000',char='0') -> '12.7'
strip('0012.700',char='0') -> '12.7'
"""
if option[0] == "B" or option[0] == "b":
return string.strip(char)
elif option[0] == "L" or option[0] == "l":
return string.lstrip(char)
elif option[0] == "T" or option[0] == "t":
return string.rstrip(char)
else:
raise ValueError("option="+repr(option)+" is not valid") | 3aa3b46df09ba6c3a26db4d5e3a1e1cd6f4a484a | 33,143 |
import pathlib
def get_inputs(
sub,
data_dir,
analysis_dir,
overwrite,
interactive,
pyprep_rng,
ica_rng,
low_cutoff,
high_cutoff,
downsample_freq,
t_min_max_epochs,
recompute_faster,
rdm_size,
do_plot,
):
"""Parse inputs in case script is run from command line.
See Also
--------
parse_overwrite
"""
# strs to pathlib.Path
data_dir = pathlib.Path(data_dir) if data_dir else None
analysis_dir = pathlib.Path(analysis_dir) if analysis_dir else None
# collect all in dict
inputs = dict(
sub=sub,
data_dir=data_dir,
analysis_dir=analysis_dir,
overwrite=overwrite,
interactive=interactive,
pyprep_rng=pyprep_rng,
ica_rng=ica_rng,
low_cutoff=low_cutoff,
high_cutoff=high_cutoff,
downsample_freq=downsample_freq,
t_min_max_epochs=t_min_max_epochs,
recompute_faster=recompute_faster,
rdm_size=rdm_size,
do_plot=do_plot,
)
return inputs | b50f6572ad779a974aa1a253706b056f5f0d60c6 | 33,144 |
def prod2( *args ):
"""
>>> prod2( 1, 2, 3, 4 )
24
>>> prod2(*range(1, 10))
362880
"""
p= 1
for item in args:
p *= item
return p | 7c59a8c5ab279e84d6764b4fd63da9444037bf63 | 33,146 |
from typing import Callable
import math
def _geometric_binary_search(
func: Callable[[float], float],
target: float,
iterations: int = 24,
reverse: bool = False
) -> float:
"""Perform a binary search using geometric centers.
Do a binary search to find the value ``n`` that makes the function ``func``
return ``target`` when ``n`` is used as the argument. By default, it is
assumed that smaller values of ``n`` will cause ``func`` to produce smaller
outputs. If smaller values of ``n`` produce larger outputs, set ``reverse``
to True.
This implementation of binary search uses the geometric mean instead of the
arithmetic mean to determine the center of the search space. This is
because the values that are being searched are weighted towards zero.
:param func: A Callable which accepts a float and returns a float. This
must be a one-to-one function.
:param target: A float representing the target output which we are trying
to make func produce.
:param iterations: An integer representing the number of iterations to run
the binary search. The default of 24 should be sufficient for most
applications.
:param reverse: A bool representing the relationship between the input and
output values of func.
:return: A float representing value n which makes the function func produce
target when called as its argument.
"""
lower_bound = 2 ** -iterations
upper_bound = 2 ** iterations
assert lower_bound <= upper_bound
for _ in range(iterations):
guess = math.sqrt(lower_bound * upper_bound)
answer = func(guess)
if (not reverse and answer > target) or (reverse and answer < target):
upper_bound = guess
else:
lower_bound = guess
return math.sqrt(lower_bound * upper_bound) | fa61dd9f1129474d43915f47344005a7eda0aa96 | 33,147 |
def convert_index(idx, m, n):
"""
Convert 1D index into 2D
:param idx: 1D index
:type idx: int
:param m: number of rows
:type m: int
:param n: number of columns
:type n: int
:return: 2D index
:rtype: tuple[int]
"""
return idx // n, idx % n | 8cfa7659dd87c8454b279e96370da5840d807ee9 | 33,148 |
import yaml
def configuration(configuration_path):
"""Load our configuration."""
with open(configuration_path, 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader) | 5fab251f28ca9eca955cd396b92a6c5c7ae8ee97 | 33,149 |
def moving_average(x, n, type='simple'):
""" compute an n period moving average.
type is 'simple' | 'exponential'
"""
if type == 'simple':
ma = x.rolling(window = n, center= False).mean()
else:
ma = x.ewm(span = n).mean()
return ma | e391ad258f6caa8cbf743e7d1283eff105c55867 | 33,150 |
def divide(value1: int, value2: int) -> float:
"""
Used to divide the number of cards to check that nothing was lost.
Handles division by 0 by returning 0, which is the reciprocal.
"""
if value1 == value2: # good for 0/0
return 1.0
else:
try:
div_value = value1 / float(value2)
except ZeroDivisionError:
div_value = 0.
return div_value | afc70043f3a60d2cbca1fce5bc0f49ac6fbd046c | 33,154 |
import os
def get_user_config_dir():
"""Return the path to the user configuration directory."""
conf_dir = os.environ.get(
'XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
return conf_dir | 3a6e64eecfe7b6fe4cb40d4d5e2c33ff6f0c638e | 33,155 |
def typed_property(name, expected_type):
"""Common function used to creating arguments with forced type
:param name: name of attribute
:param expected_type: expected type of attribute value
:return: property attribute
"""
private_name = '_' + name
@property
def prop(self):
return getattr(self, private_name)
@prop.setter
def prop(self, value):
if not isinstance(value, expected_type):
raise TypeError('Expected {}'.format(expected_type))
setattr(self, private_name, value)
return prop | d1cc4dd8ea01d2d32efeffe28dea3236b8ab30c2 | 33,156 |
def dimension(value, arg):
"""
Dimension integers
If value, append arg, otherwise output nothing
"""
if value:
return str(value) + " " + arg
return "" | f16030e793a1dfd336da4a4faeaaa00d20920e64 | 33,158 |
import os
def read_version():
"""Reads the application build version"""
version_filename = "VERSION"
if os.path.exists(version_filename):
with open(version_filename, "r") as file:
return file.read().strip()
return "" | 218c39896acd85bb4fed36e6d73e057346f483d5 | 33,160 |
def extract_sub_name(subreddit: str) -> str:
"""Extract the name of the sub without prefix."""
if subreddit.startswith("/r/"):
return subreddit[3:]
if subreddit.startswith("r/"):
return subreddit[2:]
return subreddit | bcaab5cc95a09899b0b3059e3c27ef0690d9dadd | 33,162 |
def calculate_accuracy_overall(actual_labels, predicted_labels):
"""
Calculate accuracy percentage for all labels (classes).
"""
correct = sum(1 for i in range(len(actual_labels)) if actual_labels[i] == predicted_labels[i])
return correct / len(actual_labels) * 100.0 | 07067541fb13ec11b998cbf4776273c05f831264 | 33,163 |
def compare_values(value1, value2, relative, absolute):
"""
Compare two values with respect to a relative and an absolute deviation.
:param value1: First value
:param value2: Second value
:param relative: Relative deviation (0..1)
:param absolute: Absolute deviation (e.g. 1, -5.7, 100)
:return: True is value1 is within valid deviation of value2, False if not
"""
mi = min(value1, value2)
ma = max(value1, value2)
if ((ma * (1 - relative)) - absolute) < mi:
return True
else:
return False | 9b5e7aa10e18c7b0947f7c3cdec5a8d532c8dea3 | 33,164 |
import os
import sys
def naming(local_data_csv):
"""Make sure file exists."""
if os.path.isfile(local_data_csv):
in_dir, in_file = os.path.split(local_data_csv)
return in_dir, in_file
else:
sys.exit(
"Please verify file path and name. Include extension .csv"
) | 04a53b07acf7b6d49c3f8277b91be667393eb6c9 | 33,165 |
def iterate_squarings(x, powers_to_calculate):
"""
Repeatedly square x.
The values in the "powers_to_calculate" (an iterator),
which must be increasing, will be returned.
"""
powers_calculated = {}
powers_to_calculate = sorted(powers_to_calculate)
# Repeatedly square x
previous_power = 0
for current_power in powers_to_calculate:
for _ in range(current_power - previous_power):
x = pow(x, 2)
powers_calculated[current_power] = x
previous_power = current_power
return powers_calculated | 08db56b59d5f5d632b1032fcbac3ef9bf2855ba6 | 33,166 |
def _remove_user_ip(module, oneandone_conn, user_id, user_ip):
"""
"""
try:
user = oneandone_conn.remove_user_ip(
user_id=user_id,
ip=user_ip)
return user
except Exception as e:
module.fail_json(msg=str(e)) | 2e1902d9c368408cc3ea1cffcf69037170b99cd5 | 33,167 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.