content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import click
def d_reduce_options(f):
"""Create common options for dimensionality reduction"""
f = click.option('--axes', nargs=2, type=click.Tuple([int, int]),
help='Plot the projection along which projection axes.',
default=[0, 1])(f)
f = click.option('--dimension', '-d',
help='Number of the dimensions to keep in the output XYZ file.',
default=10)(f)
f = click.option('--scale/--no-scale',
help='Standard scaling of the coordinates.',
default=True)(f)
return f | 97577d5d52b777ea33d4d47a80cbacc0f394ad00 | 43,525 |
def normalize(D, value=1):
"""normalize.
Normalize the coefficients to a maximum magnitude.
Parameters
----------
D : dict or subclass of dict.
value : float (optional, defaults to 1).
Every coefficient value will be normalized such that the
coefficient with the maximum magnitude will be +/- 1.
Return
------
res : same as type(D).
``D`` but with coefficients that are normalized to be within +/- value.
Examples
--------
>>> from qubovert.utils import DictArithmetic, normalize
>>> d = {(0, 1): 1, (1, 2, 'x'): 4}
>>> print(normalize(d))
{(0, 1): 0.25, (1, 2, 'x'): 1}
>>> from qubovert.utils import DictArithmetic, normalize
>>> d = {(0, 1): 1, (1, 2, 'x'): -4}
>>> print(normalize(d))
{(0, 1): 0.25, (1, 2, 'x'): -1}
>>> from qubovert import PUBO
>>> d = PUBO({(0, 1): 1, (1, 2, 'x'): 4})
>>> print(normalize(d))
{(0, 1): 0.25, (1, 2, 'x'): 1}
>>> from qubovert.utils import PUBO
>>> d = PUBO({(0, 1): 1, (1, 2, 'x'): -4})
>>> print(normalize(d))
{(0, 1): 0.25, (1, 2, 'x'): -1}
"""
res = type(D)()
mult = value / max(abs(v) for v in D.values())
for k, v in D.items():
res[k] = mult * v
return res | d8dcce6a35254790e82f948608a3f848f1a80286 | 43,527 |
def precond_is_classifier(iterable=None, program=None):
"""Ensure that a program can do classification."""
if program.__class__.__name__ in ['SGDClassifier',
'LogisticRegression']:
return True
else:
return False | 3b10e252780b10971924c8f0f15db2130e599873 | 43,528 |
def comment_lines_ocaml(text, start='(* ', end=' *)'):
"""
Build an OCaml comment line from input text.
Parameters
----------
text : str
Text to comment out.
start : str
Character to open the multi-line comment.
end : str
Character to close the multi-line comment.
"""
return start + ('\n').join(text.split('\n')) + end | 13cd5de355bfbb8b9861016c1d95541d6aa0dbe2 | 43,530 |
def test_cached_properties_are_cached(
PropertyRegistry, # noqa: N803
CachedPropertyRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""Cached properties are truly cached."""
# Register registry member
@PropertyRegistry.registry()
@CachedPropertyRegistry.registry()
@PropertyParamRegistry.registry()
@CachedPropertyParamRegistry.registry()
def member(pos=None, kwparam=None):
return [pos, kwparam] # Lists are different each call
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
# The properties and cached properties work
assert property_host.registry.member == [property_host, None]
assert cached_property_host.registry.member == [cached_property_host, None]
assert property_param_host.registry.member == [None, property_param_host]
assert cached_property_param_host.registry.member == [
None,
cached_property_param_host,
]
# The properties and cached properties return equal values on each access
assert property_host.registry.member == property_host.registry.member
assert cached_property_host.registry.member == cached_property_host.registry.member
assert property_param_host.registry.member == property_param_host.registry.member
assert (
cached_property_param_host.registry.member
== cached_property_param_host.registry.member
)
# Only the cached properties return the same value every time
assert property_host.registry.member is not property_host.registry.member
assert cached_property_host.registry.member is cached_property_host.registry.member
assert (
property_param_host.registry.member is not property_param_host.registry.member
)
assert (
cached_property_param_host.registry.member
is cached_property_param_host.registry.member
) | 1b10b2c5c738e3ec3edb9841d474e6688ec6e7c4 | 43,531 |
def calculate_delta_time_series(times, valid_dt):
"""
calculate_delta_time_series
This function calculates the differences between all the elements of a timeseries and
compares the differences with a valid time difference.
True is returned if all the differences are valid; i.e. equal to the valid time difference argument
False is returned if any of the difference fail to match the valid the time difference argument
:param times: List of times
:param valid_dt: Valid time difference, usually scalar, list of valid times supported
:return: boolean [True for success]
"""
return_msg = ""
for t in range(len(times)-1):
t_diff = times[t+1] - times[t]
if t_diff not in valid_dt:
return_msg = "Time difference {} is irregular or not in allowed values {}".format(t_diff, valid_dt)
return False, return_msg
return True, return_msg | c1f42e185b6b2bb9c71942222998e46758db7ff1 | 43,532 |
from typing import Dict
from typing import Callable
def dict_subset(adict: Dict, predicate: Callable):
"""Return a dict that is a subset of ``adict`` using a filter function.
The signature of the filter is: ``predicate(key, val) -> bool``
"""
return {k: v for k, v in adict.items() if predicate(k, v)} | 2ee1b2b2a20665222440cc43fdbb99a5151386cb | 43,534 |
from typing import Union
import imaplib
def delete_mails_by_recipient(
mailbox: Union[imaplib.IMAP4_SSL, imaplib.IMAP4],
recipient_mail: str,
expunge=False,
sanity_check=100,
) -> int:
"""
Delete all mail to recipient_mail in IMAP mailbox
"""
assert recipient_mail
status, count = mailbox.select("Inbox")
# find all messages to recipient mail
status, [msg_ids] = mailbox.search(
None, '(OR TO "{recipient}" CC "{recipient}")'.format(recipient=recipient_mail)
)
msg_ids = [x.strip() for x in msg_ids.decode("utf-8").split(" ") if x.strip()]
message_count = len(msg_ids)
if message_count == 0:
return message_count
# Sanity check amount of messages that will be deleted
assert message_count < sanity_check
# Mark as deleted
status, response = mailbox.store(",".join(msg_ids), "+FLAGS", "(\\Deleted)")
assert status == "OK"
if expunge:
# Expunge to really delete
status, response = mailbox.expunge()
assert status == "OK"
mailbox.close()
return message_count | c8ea6f72e5654cfc882026b2da8f70be14175965 | 43,535 |
from typing import List
def asciiRowToStrings(filename: str) -> List[str]:
"""Extract and returns a list of strings from the first row of the file.
Args:
filename
Returns:
list of string
"""
f = open(filename, "r")
line = f.readline()
f.close()
if "," in line:
line = line.replace(",", " ")
strings = line.split()
return strings | 8e9c5d181e542fd2c80186394ea98c50ad1fbd44 | 43,536 |
import math
def frac_bin(f, n=32):
""" return the first n bits of fractional part of float f """
f -= math.floor(f) # get only the fractional part
f *= 2**n # shift left
f = int(f) # truncate the rest of the fractional content
return f | fd644c38e620300ed0674aa986883dfa46463014 | 43,537 |
def calculate_boundbox(list_coordinates):
"""
coordinates are inverted x: colums y :rows
list_coordinates: list of the form [ [x2,y1], [x2,y2], . . . ]
returns top left point (x,y) and width (w) and heigth (h) of rectangle
"""
x = int(min(list_coordinates[:,0]))
y = int(min(list_coordinates[:,1]))
w =int(max(list_coordinates[:,0]) - x)
h = int(max(list_coordinates[:,1]) - y)
return x,y,w,h | bbabdd81e88dd4304293712aff389bfd568f4772 | 43,538 |
def default():
"""default."""
return "healthcheck" | 5162236a517c63a3bfa58ec1bdedf348871e7bf0 | 43,539 |
def log(x):
"""
Simulation to math.log with base e
No doctests needed
"""
n = 1e10
return n * ((x ** (1/n)) - 1) | 59d2b41645bd81f113f22c863bc7eda584e4a74e | 43,542 |
def multiply(c, d):
"""
Multiply two numbers
Here is an example reference: astropy_
"""
return c * d | e14d202edbcba0fb1fe2af88612c551f4205eb22 | 43,544 |
def gen_rotation_list(num):
"""
Returns a generator of all digit rotations of num (excluding itself).
"""
return (int(str(num)[a:] + str(num)[:a]) for a in range(1, len(str(num)))) | a92f588d02693eced6db207c2703d5c1f1a524e7 | 43,545 |
from itertools import islice, tee
def pairwise(iterable, offset=1):
"""
Return successive pairs from iterable.
"""
a, b = tee(iterable)
return zip(a, islice(b, offset, None)) | 3395e82e658f89e6505ab9d57da19dd9f53aefab | 43,547 |
def top_class_auc(df):
"""Out of all the predictive scores across all models, CHOOSE the best model prediction score for each treatment dose"""
df_max_class = df.groupby(['class']).agg(['max'])
df_max_class.columns = df_max_class.columns.droplevel(1)
df_max_class.rename_axis(None, axis=0, inplace = True)
df_max_class = df_max_class.reset_index().rename(columns={"index": "class"})
df_cls_top_auc = df_max_class.sort_values(by='values', ascending = False)
df_cls_top_auc.reset_index(drop=True, inplace = True)
df_cls_top_auc.drop(['id_name', 'model'], axis = 1, inplace = True)
return df_cls_top_auc | 0a4d79523caacc3372cef5d0ed8346caccd184ab | 43,548 |
def int_type(value):
"""Integer value routing."""
print(value + 1)
return "correct" | b17b6568b17316a85fbff846e81223e41d737556 | 43,549 |
import argparse
import ast
def add_com_train_args(
parser: argparse.ArgumentParser,
) -> argparse.ArgumentParser:
"""Add arguments specific to COM training.
Args:
parser (argparse.ArgumentParser): Command line argument parser.
Returns:
argparse.ArgumentParser: Parser with added arguments.
"""
parser.add_argument(
"--com-train-dir",
dest="com_train_dir",
help="Training directory for COM network.",
)
parser.add_argument(
"--com-finetune-weights",
dest="com_finetune_weights",
help="Initial weights to use for COM finetuning.",
)
parser.add_argument(
"--augment-shift",
dest="augment_shift",
type=ast.literal_eval,
help="If True, shift all images in each sample of the training set by a random value during training.",
)
parser.add_argument(
"--augment-zoom",
dest="augment_zoom",
type=ast.literal_eval,
help="If True, zoom all images in each sample of the training set by a random value during training.",
)
parser.add_argument(
"--augment-shear",
dest="augment_shear",
type=ast.literal_eval,
help="If True, shear all images in each sample of the training set by a random value during training.",
)
parser.add_argument(
"--augment-rotation",
dest="augment_rotation",
type=ast.literal_eval,
help="If True, rotate all images in each sample of the training set by a random value during training.",
)
parser.add_argument(
"--augment-shear-val",
dest="augment_shear_val",
type=int,
help="If shear augmentation is True, chooses random shear angle in degrees in [-augment_shear_val, augment_shear_val]",
)
parser.add_argument(
"--augment-zoom-val",
dest="augment_zoom_val",
type=float,
help="If zoom augmentation is True, chooses random zoom factor in [1-augment_zoom_val, 1+augment_zoom_val]",
)
parser.add_argument(
"--augment-shift-val",
dest="augment_shift_val",
type=float,
help="If shift augmentation is True, chooses random offset for rows and columns in [im_size*augment_shift_val, im_size*augment_shift_val]. So augment_shift_val is a fraction of the image size (must be in range [0,1])",
)
return parser | 1dbd1a82603fe62cf74de6123223f3311417afb8 | 43,550 |
import os
import json
def load_images_data(image_json_file):
"""
从
"""
if os.path.exists(image_json_file):
with open(image_json_file, 'r') as f:
image_dict = json.loads(''.join(f.readlines()))
return image_dict
else:
return False | a7ac163aa3fce212ae5d91842fe7e0b375688a6e | 43,552 |
def _get_time_signature(e):
"""
Get the time signature and return
Returns
-------
tuple (mumber, number) OR None
the tuple is (beats, beat_type). `beats` is the numerator,
`beat_type` is the denominator of the key signature fraction.
"""
if e.find('time/beats') is not None and e.find('time/beat-type') is not None:
beats = int(e.find('time/beats').text)
beat_type = int(e.find('time/beat-type').text)
return (beats, beat_type)
else:
return None | bf1ba2b885ed55c7793fa3a355eadbad1d287987 | 43,553 |
def is_observed_custom_module(module):
""" Check if a module is marked as observed custom module
or not
"""
return hasattr(module, '_is_observed_custom_module') and \
module._is_observed_custom_module | b5455ba014d397849bbae637e9367dda1d53c94a | 43,555 |
def _temp_pad(F, x, padding=1, zeros=True):
"""
Pads a 3D input along temporal axis by repeating edges or zeros
Args:
x: dim 5 b,t,c,w,h
padding: the number of dim to add on each side
zeros: pad with zeros?
Returns: padded x
"""
first = x.slice_axis(axis=1, begin=0, end=1) # symbol compatible indexing
last = x.slice_axis(axis=1, begin=-1, end=None)
if zeros:
first = first * 0
last = last * 0
if padding > 1:
first = first.repeat(repeats=padding, axis=1)
last = last.repeat(repeats=padding, axis=1)
x = F.concat(first, x, dim=1)
x = F.concat(x, last, dim=1)
return x | 192c11c9aa694755f309237294ad55397fb24a34 | 43,557 |
def infinite(smaj, smin, bpa):
"""
If the beam is not correctly fitted by AWimager, one or more parameters
will be recorded as infinite.
:param smaj: Semi-major axis (arbitrary units)
:param smin: Semi-minor axis
:param bpa: Postion angle
"""
return smaj == float('inf') or smin == float('inf') or bpa == float('inf') | 87a67ed9b880287def798fdfddd5c42c483e3b7d | 43,559 |
def secondsToTime(seconds):
"""Convert seconds to time"""
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
return "%d Weeks %d Days %d Hours %d Minutes %d Seconds" % (weeks, days, hours, minutes, seconds) | d71caafa633d17a7bcdf9e3709ff52a9a490b16f | 43,560 |
from typing import OrderedDict
def compare_headers(hdr2width_old, hdr2width_new, colwidthmax):
"""Compare headers."""
in_old_but_not_new = set(hdr2width_old.keys()).difference(
set(hdr2width_new.keys()))
in_new_but_not_old = set(hdr2width_new.keys()).difference(
set(hdr2width_old.keys()))
hdr_common = set(hdr2width_old.keys()).intersection(
set(hdr2width_new.keys()))
if in_old_but_not_new:
print("Columns in old but not new:", in_old_but_not_new)
if in_new_but_not_old:
print("Columns in new but not old:", in_new_but_not_old)
# Rearrange headings in original order
hdr2width = OrderedDict()
for s in hdr2width_old:
if s in hdr2width_new:
hdr2width[s] = min(hdr2width_old[s], colwidthmax)
return hdr2width | d88ae809b815c43df25d8139893b270aae67f086 | 43,561 |
def clip_paths(paths, bounds):
"""Return the paths that overlap the bounds."""
return [path for path in paths if path.bounds().overlap(bounds)] | c19d099673e1270fe10a9daf1b60e1da240e9660 | 43,562 |
def remove_unrelated_domains(subdomains, domains):
"""
This function removes from the entire set hostnames found, the ones who
do not end with the target provided domain name.
So if in the list of domains we have example.com and our target/scope
is example.it, then example.com will be removed because falls out of the
scope.
Args:
domains -- the list of input target domains
Returns:
subdomains -- the set of subomains strictly respecting the scope
"""
subdomains = [s for s in subdomains if s.endswith(tuple(domains))]
return subdomains | 3e9029958687e82247ffe2797fb971de15e628fd | 43,563 |
import os
def get_local_versioning(directory):
"""Get versioning.py from local repository clone"""
filename = os.path.join(directory, 'versioning', 'versioning.py')
if os.path.isfile(filename):
return open(filename).read()
else:
return None | 24c77c2f07cddf18485d1ef36abd7b0c808cebc7 | 43,564 |
def zerocase(case):
"""Check if the binary string is all zeroes"""
if int(case, 2) == 0:
return True
else:
return False | 4227ff69e8ccd4250f519de4d09d498dfc13289e | 43,566 |
def scalar( relation ):
""" Returns the Python value in the first column of the first row.
"""
for t in relation:
return t[0] | bff8dd8b6cf38d5f697a89a64f7c3c6eef7b0ce1 | 43,567 |
def parts_to_uri(base_uri, uri_parts):
"""
Converts uri parts to valid uri.
Example: /memebers, ['profile', 'view'] => /memembers/profile/view
"""
uri = "/".join(map(lambda x: str(x).rstrip('/'), [base_uri] + uri_parts))
return uri | 7fd45dba18152aed9dd18d6b2757c4f449d10930 | 43,569 |
def create_ids(data):
"""Generate IDs for the species in the dataset, by combining:
- original species name
- trait
- reference
- latitude
- longitude"""
ids = {}
for row in data:
species = row[0]
reference = row[2]
trait = row[3]
lat = row[7]
lon = row[8]
ids[species + reference + trait +
str(lat) + str(lon)] = [species, reference, trait, lat, lon]
return ids | 3488e65358278c1cb97162bf9f8a4a8b06e54223 | 43,571 |
def find_outliers(serie, cutoff_margin):
"""
Find outliers on a serie based on percentiles.
Parameters
----------
:serie: array_like
Input array or object that can be converted to an array.
:cutoff_margin: represents the edges to be cut
Returns
-------
:otlrs: array of booleans
"""
#otlrs = abs(serie - serie.mean()) > serie.quantile(0.99999) - serie.mean()
otlrs = (serie > serie.quantile(1 - cutoff_margin)) | (serie < serie.quantile(cutoff_margin))
return otlrs | 09915e27e8a67f3512676628988513739987d2a7 | 43,573 |
from functools import reduce
def mac_aton(s):
"""Convert a Mac address to an integer."""
try:
mac = list(map(lambda x: int(x, 16), s.split(':')))
mac = reduce(lambda a,b: a+b, [mac[i] << (5-i)*8 for i in range(6)])
except (ValueError, IndexError):
raise ValueError('illegal Mac: {0}'.format(s))
return mac | dd29c6e99998bfd0676ae9e7200f02500c2eed0d | 43,575 |
import argparse
def make_parser():
""" Create a parser to parse arguments """
p = argparse.ArgumentParser(description="")
p.add_argument("--log_name", "-n", help="Name of log to create. If multiple logs, log number will be appended to this name.")
p.add_argument("--log_level", "-l", help="")
p.add_argument("--log_id", "-i", help="This is used to differentiate multiple logs.")
return p | 3de179c77f9329b17c03ad17b86bf443fc13a43e | 43,577 |
from typing import Optional
from pathlib import Path
def get_resource(filename: str, path: Optional[str] = None) -> str:
"""A utility method to get the absolute path to a resource in the test suite.
Args:
filename: the name of the file to get.
path: an optional path relative to the root of the test suite.
Returns:
The absolute path of the file.
"""
root = Path(__file__).parent
full_path = root if path is None else root / Path(path)
return str(full_path / filename) | d2341484fb46dc4da2d00b9b62092d0f5b52339e | 43,579 |
def functional_distribution(word):
"""
Given a word, return a map from cryptic functions to relative probabilities.
Currently a stub.
"""
return {'syn': .4, 'null': .2, 'sub': .3, 'ana': .3} | 591f13d731a4a77e936a2906ab74bde110c07e11 | 43,580 |
import re
def columnMatcher(patterns, cols, needed=None, verbose=False):
"""Infer BQ expressions to extract required columns
"""
def matchHelper(var, matches, cols, verbose):
for m in matches:
try:
r, v = m
except ValueError: # shortcut, single item match
if verbose:
print('Simple:', m)
if m in cols:
return {var:m}
continue
if verbose:
print ("Re:", r, v)
for c in cols:
if re.search(r, c):
return {var:v.format(c=c)}
print("Warning no mapping for", var)
return {var:None}
res={}
for var, matches in patterns.items():
if needed and var not in needed:
continue
if verbose:
print ('VAR:', var, matches)
res.update(matchHelper(var, matches, cols, verbose))
return res | cc626703906c0b16fa24f34da10694517f5d1be0 | 43,581 |
def pre_check_state(s,N,args):
""" imposes that that a bit with 1 must be preceded and followed by 0,
i.e. a particle on a given site must have empty neighboring sites.
#
Works only for lattices of up to N=32 sites (otherwise, change mask)
#
"""
mask = (0xffffffff >> (32 - N)) # works for lattices of up to 32 sites
# cycle bits left by 1 periodically
s_shift_left = (((s << 1) & mask) | ((s >> (N - 1)) & mask))
#
# cycle bits right by 1 periodically
s_shift_right = (((s >> 1) & mask) | ((s << (N - 1)) & mask))
#
return (((s_shift_right|s_shift_left)&s))==0 | 999ec694bdb5adcc851d1c6a426b0103493991bd | 43,582 |
def check_input(prompt, assertion=None, default=None):
"""Get input from cmdline, ensuring that it passes the given assertion.
assertion: a function that if given a value will return None if the check
should pass, otherwise returning a helpful error message as a string."""
if default is not None:
prompt += " [default=%s]: " % str(default)
while True:
value = input(prompt).strip()
if value == "" and default is not None:
value = default
if assertion is not None:
check = assertion(value)
if check is not None:
error_msg = '\tInvalid input'
if not isinstance(check, bool):
error_msg += ': ' + str(check)
print(error_msg)
continue
return value | 9cb72146a931974c186a8473a9f0b75e2f2515d6 | 43,583 |
import os
import time
def convert_time(epoch_time, time_format="%s", delta=1):
"""
Converts GMT epoch time to the specified time format.
Also rounds off to the nearest minute, hour, day when valid delta value is passed
:param epoch_time: epoch time to be converted
:param time_format: expected output time format
example: "%Y-%m-%d %H:%M:%S", "%s", ...
:param delta: in seconds to be rounded off.
example: 300, 1800, 3600...
:rtype: time
example:
>>> epoch_time = "1541399309.143"
>>> convert_time(epoch_time)
'1541399309'
>>> convert_time(epoch_time, time_format="%Y-%m-%d %H:%M:%S %Z")
'2018-11-05 06:28:29 UTC'
>>> convert_time(epoch_time, time_format="%Y-%m-%d %H:%M:%S %Z", delta=300)
'2018-11-05 06:25:00 UTC'
>>> convert_time(epoch_time, delta=300)
'1541399100'
"""
os.environ["TZ"] = "UTC"
time.tzset()
# reset delta if unexpected value
if delta <= 0:
delta = 1
# round off the epoch to specified delta
epoch_rounded = float(epoch_time) - (float(epoch_time) % delta)
# return in GMT format
return time.strftime(time_format, time.gmtime(float(epoch_rounded))) | 4ff577732d9205dd9228d84fd57242ea19fd0f94 | 43,584 |
def single_value_columns(df):
"""Return a list of the columns in `df` that have only one value."""
return { col: df[col].iloc[0] for col in df if df[col].nunique() == 1 } | e0cbdf42dec1600edcd78d3f74d302bcba9fd93b | 43,585 |
def quantile_bin_column(self, column_name, num_bins=None, bin_column_name=None):
"""
Classify column into groups with the same frequency.
Group rows of data based on the value in a single column and add a label
to identify grouping.
Equal depth binning attempts to label rows such that each bin contains the
same number of elements.
For :math:`n` bins of a column :math:`C` of length :math:`m`, the bin
number is determined by:
.. math::
\lceil n * \frac { f(C) }{ m } \rceil
where :math:`f` is a tie-adjusted ranking function over values of
:math:`C`.
If there are multiples of the same value in :math:`C`, then their
tie-adjusted rank is the average of their ordered rank values.
Notes
-----
1. The num_bins parameter is considered to be the maximum permissible number
of bins because the data may dictate fewer bins.
For example, if the column to be binned has a quantity of :math"`X`
elements with only 2 distinct values and the *num_bins* parameter is
greater than 2, then the actual number of bins will only be 2.
This is due to a restriction that elements with an identical value must
belong to the same bin.
Parameters
----------
:param column_name: (str) The column whose values are to be binned.
:param num_bins: (Optional[int]) The maximum number of quantiles.
Default is the Square-root choice
:math:`\lfloor \sqrt{m} \rfloor`, where :math:`m` is the number of rows.
:param bin_column_name: (Optional[str]) The name for the new column holding the grouping labels.
Default is <column_name>_binned
:return: (List[float]) A list containing the edges of each bin
Examples
--------
Given a frame with column *a* accessed by a Frame object *my_frame*:
<hide>
>>> my_frame = tc.frame.create([[1],[1],[2],[3],[5],[8],[13],[21],[34],[55],[89]],
... [('a', int)])
-etc-
</hide>
>>> my_frame.inspect( n=11 )
[##] a
========
[0] 1
[1] 1
[2] 2
[3] 3
[4] 5
[5] 8
[6] 13
[7] 21
[8] 34
[9] 55
[10] 89
Modify the frame, adding a column showing what bin the data is in.
The data should be grouped into a maximum of five bins.
Note that each bin will have the same quantity of members (as much as
possible):
>>> cutoffs = my_frame.quantile_bin_column('a', 5, 'aEDBinned')
<progress>
>>> my_frame.inspect( n=11 )
[##] a aEDBinned
===================
[0] 1 0
[1] 1 0
[2] 2 1
[3] 3 1
[4] 5 2
[5] 8 2
[6] 13 3
[7] 21 3
[8] 34 4
[9] 55 4
[10] 89 4
>>> print cutoffs
[1.0, 2.0, 5.0, 13.0, 34.0, 89.0]
"""
return self._tc.jutils.convert.from_scala_seq(self._scala.quantileBinColumn(column_name,
self._tc.jutils.convert.to_scala_option(num_bins),
self._tc.jutils.convert.to_scala_option(bin_column_name))) | 99dcef7869efa3e6439d380dda9b19fcb2b0ff99 | 43,586 |
def close_connection(connection) -> bool:
"""Function to close Database connection.
Args:
connection (mysql.connector.connection_cext):
The argument received is a MySQL connection.
Returns:
bool: The original return was False when database
connection was closed. But, I forced the return
to be True when the database connection is closed.
"""
if not connection:
return False
else:
connection.close()
return True | b01fbf53c03b74fc60e65b91aaf681747254ebb5 | 43,587 |
import math
def project_gdf(gdf, to_crs=None, to_latlong=False):
"""
Project a GeoDataFrame from its current CRS to another.
If to_crs is None, project to the UTM CRS for the UTM zone in which the
GeoDataFrame's centroid lies. Otherwise project to the CRS defined by
to_crs. The simple UTM zone calculation in this function works well for
most latitudes, but may not work for some extreme northern locations like
Svalbard or far northern Norway.
Parameters
----------
gdf : geopandas.GeoDataFrame
the GeoDataFrame to be projected
to_crs : dict or string or pyproj.CRS
if None, project to UTM zone in which gdf's centroid lies, otherwise
project to this CRS
to_latlong : bool
if True, project to settings.default_crs and ignore to_crs
Returns
-------
gdf_proj : geopandas.GeoDataFrame
the projected GeoDataFrame
"""
if gdf.crs is None or len(gdf) < 1:
raise ValueError("GeoDataFrame must have a valid CRS and cannot be empty")
# if to_latlong is True, project the gdf to latlong
if to_latlong:
gdf_proj = gdf.to_crs({"init": "epsg:4326"})
# utils.log(f"Projected GeoDataFrame to {settings.default_crs}")
# else if to_crs was passed-in, project gdf to this CRS
elif to_crs is not None:
gdf_proj = gdf.to_crs(to_crs)
# utils.log(f"Projected GeoDataFrame to {to_crs}")
# otherwise, automatically project the gdf to UTM
else:
# if CRS.from_user_input(gdf.crs).is_projected:
# raise ValueError("Geometry must be unprojected to calculate UTM zone")
# calculate longitude of centroid of union of all geometries in gdf
avg_lng = gdf["geometry"].unary_union.centroid.x
# calculate UTM zone from avg longitude to define CRS to project to
utm_zone = int(math.floor((avg_lng + 180) / 6.0) + 1)
utm_crs = (
f"+proj=utm +zone={utm_zone} +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
)
# project the GeoDataFrame to the UTM CRS
gdf_proj = gdf.to_crs(utm_crs)
# utils.log(f"Projected GeoDataFrame to {gdf_proj.crs}")
return gdf_proj | d25ee19f834bb7befbee05a344c4581f872eb8dd | 43,588 |
import numpy
import pandas
def scan_preprocessing_loess_impute(df):
"""
LOESS imputer.
Value imputer for LOESS procedure.
Parameters
----------
df : dataframe
Dataframe with potentially missing responses.
Returns
-------
df : dataframe
Dataframe with no missing responses.
"""
# Mean imputation for LOESS, not raw data
# Get series
series_area = df['Area'].copy()
# Get missing data for MAR imputation
series_area[pandas.isna(series_area)] = numpy.nanmean(series_area)
# If all missing, fill nan with 0, will be dropped later
if True in set(pandas.isna(series_area)):
series_area = series_area.fillna(0)
df['Area'] = series_area.copy()
return df | 3f7cca5eb44d282cc8ce8dee7dc702cd5a66c059 | 43,589 |
def list_experiment_names(exp_file_path):
"""Retrieve experiment names from the given file."""
def helper():
with open(exp_file_path, "r") as ef:
for line in ef:
exp_name = line.rstrip("\n")
yield exp_name
return [x for x in helper()] | 143d958629c85926018f225d1b61427f7eceb30f | 43,591 |
def legend_positions(df, y, scaling):
"""
Calculate position of labels to the right in plot...
"""
positions = {}
for column in y:
positions[column] = df[column].values[-1] - 0.005
def push(dpush):
"""
...by puting them to the last y value and
pushing until no overlap
"""
collisions = 0
for column1, value1 in positions.items():
for column2, value2 in positions.items():
if column1 != column2:
dist = abs(value1-value2)
if dist < scaling:# 0.075: #0.075: #0.023:
collisions += 1
if value1 < value2:
positions[column1] -= dpush
positions[column2] += dpush
else:
positions[column1] += dpush
positions[column2] -= dpush
return True
dpush = .001
pushings = 0
while True:
if pushings == 1000:
dpush*=10
pushings = 0
pushed = push(dpush)
if not pushed:
break
pushings+=1
return positions | 97ec60cc944b972159fd2dbd08305a6bbc0a01ce | 43,595 |
from datetime import datetime
def sTimeUnitString( ismilli=False, abbr=True ):
"""OpendTect-like time stamp
Parameters:
* ismilli (bool, optional): Include millisecond (default is False)
* abbr (bool, optional): Abbreviated (default is True)
Returns:
* str: Time stamp string formatted like done by OpendTect
Examples:
>>> sTimeUnitString()
'Mon 20 Apr 2020, 13:59:54'
>>> sTimeUnitString( True )
'Mon 20 Apr 2020, 13:59:54.001245'
>>> sTimeUnitString( True, True )
'Mon 20 Apr 2020, 13:59:54.001245'
>>> sTimeUnitString( True, False )
'Monday 20 April 2020, 13:59:54'
"""
if abbr:
fmt = "%a %d %b"
else:
fmt = "%A %d %B"
fmt += " %Y, %X"
if ismilli:
fmt += ".%f"
return datetime.now().strftime(fmt) | 2ba388ee895501490caecf37243f08bc8f4d2f24 | 43,596 |
def calc_Gunning_fog(n_psyl, n_words, n_sent):
"""Метрика Gunning fog для английского языка"""
n = 0.4 * ((float(n_words)/ n_sent) + 100 * (float(n_psyl) / n_words))
return n | a10b21c4bc4c59e54b7f37a7f48b7d114b948eae | 43,600 |
def _uniform_probability(action_spec):
"""Helper function for returning probabilities of equivalent distributions."""
# Equivalent of what a tfp.distribution.Categorical would return.
if action_spec.dtype.is_integer:
return 1. / (action_spec.maximum - action_spec.minimum + 1)
# Equivalent of what a tfp.distribution.Uniform would return.
return 1. / (action_spec.maximum - action_spec.minimum) | f5f0d68e710661bd9f6806b60ae9d695a1e78ccb | 43,601 |
from typing import Mapping
from typing import Any
from typing import List
import re
def get_int_regex_matches(pattern: str, state: Mapping[str, Any]) -> List[int]:
"""Matches a pattern with an integer capture group against state keys."""
matches = [re.match(pattern, key) for key in state]
return sorted(set(int(m.group(1)) for m in matches if m is not None)) | 2df629de28ecf4a48d628dd179966b538e5f2db7 | 43,602 |
def longest_line_length(file_name):
"""
This function takes a string file_name and
returns the length of the longest line in the file.
"""
with open(file_name) as file:
lines = file.readlines()
length = 0
for line in lines:
if len(line) > length:
length = len(line)
if length == 0:
return None
else:
return length | ead6e352b5886191318df7308efff7099e874a68 | 43,604 |
from typing import Tuple
from typing import List
from typing import Set
def _get_repeating_chars_count(box_id: str) -> Tuple[int, int]:
"""Returns a tuple (twice_repeated, thrice_repeated)
twice_repeated, thrice_repeated - 1 - atleast 1 character is repeated twice exactly
- 0 No character is repeated exactly twice in the id
Arguments:
box_id {String} -- Box id containing lowercase alphabets only
Returns:
[Tuple(int, int)]
"""
counting_bucket: List[int] = [0] * 26
char_code_start: int = ord("a")
for letter in box_id:
counting_bucket[(ord(letter) - char_code_start)] += 1
unique_char_counts: Set[int] = set(counting_bucket)
return int(2 in unique_char_counts), int(3 in unique_char_counts) | af4ac5f5b972e69d591691cf3176cf48661a30c2 | 43,605 |
import torch
def binary_accuracy(y_pred, y_true):
"""Function to calculate binary accuracy per batch"""
y_pred_max = torch.argmax(y_pred, dim=-1)
correct_pred = (y_pred_max == y_true).float()
acc = correct_pred.sum() / len(correct_pred)
return acc | 7fdd406e2871c33ffdf9324c1073e4a362572483 | 43,608 |
def star(table):
"""
Return the list of all columns in a table. This is used much like the ``*`` notation in SQL::
``select(*star(employee), where=employee.department == 'Finance')``
returns all of the columns from the *employee* table for the Finance department.
:type table: :class:`hustle.Table`
:param col: the table to extract the column names from
"""
return [table._columns[col] for col in table._field_names] | 2935ddc7cccbdca173d90574e3370f45a49a1841 | 43,609 |
def valide(seq):
"""
Vérifie la validité d'une séquence ADN.
"""
return len(seq)==(seq.count('A') + seq.count('C') + seq.count('G') + seq.count('T')) | 14a3f70324dd8d3d94ae11187ef95eff49ca663c | 43,610 |
import numpy
def transformData(data):
"""
this function will add
3 sin(dec),
4 cos(dec),
5 tan(dec),
6 sin(RA),
7 cos(RA),
8 tan(RA),
9 dec*RA,
10 sin(dec*RA),
11 cos(dec*RA),
12 tan(dec*RA),
13 dec/RA,
14 sin(dec/RA),
15 cos(dec/RA),
16 tan(dec/RA),
17 RA/dec,
18 sin(RA/dec),
19 cos(RA/dec),
20 tan(RA/dec) as the appends to the input array
"""
#sin(dec)
a = numpy.sin(data[:,1])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#cos(dec)
a = numpy.cos(data[:,1])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#tan(dec)
a = numpy.tan(data[:,1])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#sin(RA)
a = numpy.sin(data[:,2])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#cos(RA)
a = numpy.cos(data[:,2])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#tan(RA)
a = numpy.tan(data[:,2])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#dec*RA
a = data[:,1]*data[:,2]
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#sin(dec*RA)
a = numpy.sin(data[:,1]*data[:,2])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#cos(dec*RA)
a = numpy.cos(data[:,1]*data[:,2])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#tan(dec*RA)
a = numpy.tan(data[:,1]*data[:,2])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#dec/RA
a = data[:,1]/data[:,2]
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#sin(dec/RA)
a = numpy.sin(data[:,1]/data[:,2])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#cos(dec/RA)
a = numpy.cos(data[:,1]/data[:,2])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#tan(dec/RA)
a = numpy.tan(data[:,1]/data[:,2])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#RA/dec
a = data[:,2]/data[:,1]
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#sin(RA/dec)
a = numpy.sin(data[:,2]/data[:,1])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#cos(RA/dec)
a = numpy.cos(data[:,2]/data[:,1])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
#tan(RA/dec)
a = numpy.tan(data[:,2]/data[:,1])
a.shape = (a.shape[0], 1)
data = numpy.concatenate((data, a), axis = 1)
return data | 7fc728050d8341185341ce77fbdfda659f5a452f | 43,611 |
def get_refresh_rate():
"""
What should be the refresh rate (in seconds)
Output:
--> integer, nb of seconds between refresh
"""
refresh_rate = input("Enter the refresh rate (in seconds): ")
print("")
print("****************************************************************")
try:
return int(refresh_rate)
except ValueError:
print("Incorrect input. Please try again.")
return get_refresh_rate() | a27725199a2bb8cd32e44843e8afba92fc8e60ad | 43,612 |
def _safe_lin_pred(X, coef):
"""Compute the linear predictor taking care if intercept is present."""
if coef.size == X.shape[1] + 1:
return X @ coef[1:] + coef[0]
else:
return X @ coef | f74eba444cf03681ec41ea2314c0457b321a4344 | 43,614 |
from typing import List
from pathlib import Path
def open_article(filename: str) -> List[str]:
"""Loads plain text article into memory.
Args:
filename (str): article filename
Returns:
str: raw article text
"""
input_path = Path("local", filename)
with open(input_path, "r") as f:
return f.readlines() | b064286cb097dd16fb9900278bdfd16e31d14b3b | 43,615 |
def has_data_been_validated(flags):
"""Return True (or a boolean series) if flags has been validated"""
return flags > 1 | 987f0c7dcd7d23d67075863642b2a7cbe314d6ac | 43,616 |
import requests
def get_job_id(
domain,
org_key,
headers,
hostname="*",
process="*",
window="10h",
start="0",
end="0",
):
"""
Function takes in the domain, org_key, headers, hostname, and timeframe to generate
the initial query an retrieve the job id of that query returns job_id
"""
url = f"{domain}/api/investigate/v2/orgs/{org_key}/processes/search_jobs"
if start != "0":
time_range = {"end": end, "start": start}
else:
time_range = {"window": "-" + window}
if hostname == "*":
query_payload = {
"query": "process_name:" + process,
"fields": [
"device_name",
"process_start_time",
"process_cmdline",
"process_name",
"process_pid",
"parent_pid",
],
"sort": [{"field": "device_timestamp", "order": "asc"}],
"start": 0,
"rows": 10000,
"time_range": time_range,
}
else:
query_payload = {
"criteria": {"device_name": [hostname]},
"query": "process_name:" + process,
"fields": [
"device_name",
"process_start_time",
"process_cmdline",
"process_name",
"process_pid",
"parent_pid",
],
"sort": [{"field": "device_timestamp", "order": "asc"}],
"start": 0,
"rows": 10000,
"time_range": time_range,
}
print("")
response = requests.post(url, headers=headers, json=query_payload).json()
job_id = response.get("job_id")
print("Query sent to Carbon Black Cloud")
return job_id | e004ec3b9be5cfb45b8b7cce5c6ed38e68f77928 | 43,618 |
def _match_specie_group(file_metadata):
"""
Classifies the virus taxonomy group from NCBI based
structure to broadly used Baltimore Classification
Based on https://en.wikipedia.org/wiki/Virus#Classification
Baltimore Classification:
I: dsDNA viruses (e.g. Adenoviruses, Herpesviruses, Poxviruses)
II: ssDNA viruses (+ strand or "sense") DNA (e.g. Parvoviruses)
III: dsRNA viruses (e.g. Reoviruses)
IV: (+)ssRNA viruses (+ strand or sense) RNA (e.g. Picornaviruses, Togaviruses)
V: (−)ssRNA viruses (− strand or antisense) RNA (e.g. Orthomyxoviruses, Rhabdoviruses)
VI: ssRNA-RT viruses (+ strand or sense) RNA with DNA intermediate in life-cycle (e.g. Retroviruses)
VII: dsDNA-RT viruses DNA with RNA intermediate in life-cycle (e.g. Hepadnaviruses)
"""
# NCBI based taxonomy
# https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=Undef&id=10239&lvl=3&lin=f&keep=1&srchmode=1&unlock
groups_patterns = [
'; dsDNA viruses, no RNA stage; ',
'; ssDNA viruses; ',
'; dsRNA viruses; ',
'; ssRNA positive-strand viruses, no DNA stage; ',
'; ssRNA negative-strand viruses; ',
# '; ', # no clear match with VI from taxonomy
# '; ' # no clear match with VII
]
groups = [
'dsDNA',
'ssDNA',
'dsRNA',
'(+)ssRNA',
'(-)ssRNA',
'ssRNA-RT',
'dsDNA-RT'
]
for pattern in groups_patterns:
if pattern in file_metadata:
return groups[groups_patterns.index(pattern)]
return 'unclassified' | 36273c7678321acbea64dbd710fb2fbd40729a0a | 43,619 |
def tolist_if_not(arr):
"""
Convert `arr` to list if it is not already.
>>> import numpy as np
>>> tolist_if_not([0])
[0]
>>> tolist_if_not(np.arange(1))
[0]
"""
try:
tolist = arr.tolist
except AttributeError:
return arr
return tolist() | 6aeccb2011dd4660953ffe20417f7528ae5d7137 | 43,620 |
def _safe_hasattr(obj, attr):
"""Workaround unreliable hasattr() availability on sqlalchemy objects"""
try:
object.__getattribute__(obj, attr)
return True
except AttributeError:
return False | d726f83115431b39aebfde341cc1ea055117c8c0 | 43,621 |
def user_input_coords(string):
"""Converts user coordinate input from string to list"""
while True: # makes sure the input has only two integer coordinates
inp = input(string)
if len([i for i in inp.split() if i.isdigit()]) == 2 and len(inp.split()) == 2:
return list(map(int, inp.split())) | c4f4e617777c7076e14fcba3fd84251b8f46edf9 | 43,622 |
def softmax_bias(p, slope, bias):
"""
Symmetric softmax with bias. Only works for binary. Works elementwise.
Cannot use too small or large bias (roughly < 1e-3 or > 1 - 1e-3)
:param p: between 0 and 1.
:param slope: arbitary real value. 1 gives identity mapping, 0 always 0.5.
:param bias: between 1e-3 and 1 - 1e-3. Giving p=bias returns 0.5.
:return: transformed probability.
:type p: torch.FloatTensor
:type slope: torch.FloatTensor
:type bias: torch.FloatTensor
:rtype: torch.FloatTensor
"""
k = (1. - bias) ** slope
k = k / (bias ** slope + k)
q = k * p ** slope
q = q / (q + (1. - k) * (1. - p) ** slope)
return q
# k = -torch.log(tensor(2.)) / torch.log(tensor(bias))
# q = (p ** k ** slope)
# return q / (q + (1. - p ** k) ** slope) | fe13cacabe721710c4108bc1322b90d2e971ca21 | 43,623 |
def default_str_tester(s: str) -> bool:
"""
Default test whether s URL, file name or just data. This is pretty simple - if it has a c/r, a quote
:param s: string to test
:return: True if this is a vanilla string, otherwise try to treat it as a file name
"""
return not s.strip() or any(c in s for c in ['\r', '\n', '\t', ' ', '"', "'"]) | 2234f3a34d3f24d104cb97193a300489d595b62d | 43,625 |
def index_to_angle(i):
""" Takes an index into a LIDAR scan array and returns the associated
angle, in degrees. """
return -135.0 + (i / 1081.0) * 0.25 | 63f99389ef532a662d5ea3a3a173a1ba8dd9df09 | 43,626 |
import socket
def getInterfaceAddress(peer_ip_address):
"""tries to find the ip address of the interface that connects to a given host"""
s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((peer_ip_address, 53)) # 53=dns
ip=s.getsockname()[0]
s.close()
return ip | 6a98ea178b14ff13c777072a62c04d4f92a7c58e | 43,627 |
def count_tracks_in_album_from(df, decade_year):
"""returns count of tracks on albums published over given
decade and dataframe; dataframe has to have `release_year`
and `track_id` columns."""
years = [decade_year + y for y in range(10)]
mask = df["release_year"].isin(years)
return df.loc[mask, "track_id"].count() | 7d1fc82146003e246a7981332e6a2bd7bba5d1fb | 43,628 |
def remove_implications(ast):
"""
@brief Removes implications in an AST.
@param ast The ast
@return another AST
"""
if len(ast) == 3:
op, oper1, oper2 = ast
oper1 = remove_implications(oper1)
oper2 = remove_implications(oper2)
if op == '->':
return ('ou', ('non', oper1), oper2)
else:
return ast
return ast | 4d9c4b46a102a613a805608df58163194a320ce1 | 43,629 |
import binascii
def hex_to_bytes(value: str) -> list:
"""
Parameters
----------
value : str
hex encoded string
Returns : list
-------
bytes array of hex value
"""
return list(binascii.unhexlify(value)) | d2ad089185accf72451a89bcd30cd0ddf90f5a49 | 43,633 |
def F_hydro(R):
"""
fraction of all free-free emissions due to
hydrogen. R is the ratio of the number of heliums to
the number of hydrogens, which is approximately
0.08
"""
result = 1 - R
return result | 20083d14be33bcf2b0ee01eb2208bac073b8ec5b | 43,636 |
def apply_filters(genes, records, db_info, filters):
"""Apply predefined and additional filters.
Args:
genes: list of Gene objects
records: list of records as Variant objects
db_info: database configuration as Config object
filters: list of filters as Filter objects
"""
passed = []
qual_filter = "PASS"
try:
for record in records:
# quality filter
if not (record.filter == qual_filter):
continue
# additional filters from user
filters_passed = True
for filt in filters:
if not filt.apply(record):
filters_passed = False
break
if not filters_passed:
continue
passed.append(record)
for gene in genes:
if gene.name == record.gene_name:
gene.set_status("HC LoF found")
except Exception as error:
print(error)
raise
return passed | be6cb19865b711cec56dd2f74563bf6ea8fe5a8e | 43,637 |
def ParseIssueNumber(issueCode):
"""Notes: Issue code comes in a five digit number.
The first three numbers are the issue number padded in zeroes
The fouth number is for varient covers starting at 1
The fith number seems to always be 1
"""
if len(issueCode) == 5:
issueNumber = issueCode[:3].lstrip("0")
return issueNumber
else:
return None | a8e7643a3008ef0062c17ed64c864b104e54909c | 43,640 |
import torch
def _read_response_manual_input(self, additional_text):
"""
read response provided by user as manual input via prompt
:param additional_text (str): additional text to display in prompt when sourcing user input;
to be used in case a datapoint is to be re-sampled
"""
# read candidate datapoint
response_candidate_string = input(
"ITERATION "
+ str(self.model["covars_proposed_iter"])
+ additional_text
+ " - Please provide response: "
)
# assumes only a single response provided (i.e. not providing input on multiple parameters and weighing)
response_candidate_float_tensor = torch.tensor(
[[float(z) for z in response_candidate_string.split(",")]],
device=self.device,
dtype=self.dtype,
)
return response_candidate_float_tensor | 9d8cb3587a9044ec4529940c027d8466ec210cc1 | 43,641 |
def get_ranking07():
"""
Return the ranking with ID 07.
"""
return [
("a2", 0.644440),
("a1", 0.623018),
("a3", 0.593228),
("a6", 0.591963),
("a4", 0.543750),
("a5", 0.540097),
] | a11103990ae212edb8008369d4bbcfff2e0fbf2a | 43,642 |
def dual(x, y):
""" dual """
ret = 3 * x * x * x * y * y * y
return ret | 62203bfee0eb98b1b219ae45a2b1424e02f5440a | 43,643 |
def findMaxAverage(self, nums, k): # ! 超时
"""
:type nums: List[int]
:type k: int
:rtype: float
"""
maxSum = -10001
curSum = 0
for i in range(len(nums) - k + 1):
curSum = sum(nums[i:i + k])
maxSum = max(maxSum, curSum)
return float(maxSum / k) | 0a63b4e54a3252b6f1f5c96c1b6e18705bd2b919 | 43,644 |
from typing import List
def get_grid() -> List[List[int]]:
""" Retuns the Grid that the SudokuBoard
is based off of """
return [
[0, 1, 6, 5, 0, 8, 4, 0, 2],
[5, 2, 0, 0, 0, 4, 0, 0, 0],
[0, 0, 7, 6, 0, 0, 0, 3, 1],
[0, 6, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 4, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 0, 0, 6, 0, 0],
[1, 0, 0, 9, 0, 0, 2, 0, 0],
[6, 0, 2, 3, 0, 0, 8, 0, 4],
[0, 4, 5, 0, 0, 0, 0, 1, 0]
] | 3c92e60683c3986d60b8261b3980ccd72cccf091 | 43,645 |
def calculate_profit_pod(location, destination):
""" calculate net profit by good (for one pod) between location planet and destination planet
return a list of list, with a 2 decimals float format
"""
_profit = []
for key in destination.price_slip.keys():
if location.price_slip[key] != 0 and destination.price_slip[key] != 0 and location.price_slip[key][1] != 0 and location.price_slip[key][2] != 0:
benefit = destination.price_slip[key][0] - location.price_slip[key][1]
_profit.append([f'{benefit:.2f}'])
else:
_profit.append([f'0.00'])
return _profit | f9298ba58ea19d4f2a20f9ff770d8ec01405023b | 43,646 |
def iterit(*args, **kwargs):
"""
This takes some input (int, string, list, iterable, whatever) and
makes sure it is an iterable, making it a single item list if not.
Importantly, it does rational things with strings.
You can pass it more than one item. Cast is optional.
def foo(offsets=10):
offsets = iterit(offsets, cast=int)
for f in offsets:
print "Value %s" % (10 + f)
>>> foo()
Value 20
>>> foo(3)
Value 13
>>> foo([1,2])
Value 11
Value 12
>>> foo('3')
Value 13
>>> foo(('3', 4))
Value 13
Value 14
Also useful this way:
foo, bar = iterit(foo, bar)
"""
if len(args) > 1:
return [iterit(arg, **kwargs) for arg in args]
return map(kwargs.get('cast', None),
args[0] if hasattr(args[0], '__iter__') else [args[0], ]) | 696bf6f99c79924d25a649b73ee35e624d52e53d | 43,647 |
import struct
def readShort(f):
"""Read unsigned 2 byte value from a file f."""
(retVal,) = struct.unpack("H", f.read(2))
return retVal | ce730430018b9589670e411c4186139fc2f0345d | 43,648 |
from typing import Union
from typing import List
from typing import Dict
from typing import OrderedDict
def sort_callbacks_by_order(
callbacks: Union[List, Dict, OrderedDict]
) -> OrderedDict:
"""Creates an sequence of callbacks and sort them.
Args:
callbacks: either list of callbacks or ordered dict
Returns:
sequence of callbacks sorted by ``callback order``
Raises:
TypeError: if `callbacks` is out of
`None`, `dict`, `OrderedDict`, `list`
"""
if callbacks is None:
output = OrderedDict()
elif isinstance(callbacks, (dict, OrderedDict)):
output = [(k, v) for k, v in callbacks.items()]
output = sorted(output, key=lambda x: x[1].order)
output = OrderedDict(output)
elif isinstance(callbacks, list):
output = sorted(callbacks, key=lambda x: x.order)
output = OrderedDict([(i, value) for i, value in enumerate(output)])
else:
raise TypeError(
f"Callbacks must be either Dict/OrderedDict or list, "
f"got {type(callbacks)}"
)
return output | bba43ab6292e1132f8447e79403d9d730831e3de | 43,649 |
def _make_env_vars(runlevel):
"""
makes standard env vars to pass to the scripts when they are run. the script
must be expecting them have a "--env=XXX=xxx" entry for each
"""
env_vars = dict()
env_vars['NEST_RUNLEVEL'] = runlevel.get_runlevel_name()
return env_vars | 384db73cd53d24878fb6fe0b07bb1507a8086127 | 43,651 |
def sign(value):
"""
Returns an integer that indicates the sign of a number.
Parameters:
value (int): The number to get the sign of
Returns:
-1 if the number is negative, 0 if the number is 0, or 1
if the number is positive.
"""
if value < 0:
return -1
elif value == 0:
return 0
else:
return 1 | 71997a571fcdbadf45fa1dd3b8d2b6c54eafdd61 | 43,652 |
import socket
import struct
def build_ip_header(src_ip, dst_ip):
"""Builds a valid IP header and returns it
Parameters:
- src_ip: A string with a valid IP address which will be used as
SRC IP
- dst_ip: A string with a valid IP address where the packets will be
sent to
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Version| IHL |Type of Service| Total Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Identification |Flags| Fragment Offset |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Time to Live | Protocol | Header Checksum |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Address |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Options | Padding |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
ihl = 5
version = 4
ip_ihl_ver = (version << 4) | ihl
ip_tos = 0
ip_tot_len = 0 # kernel will fill the correct total length
ip_id = 0xbeef # Id of this packet
ip_frag_off = 0
ip_ttl = 255
ip_proto = socket.IPPROTO_UDP
ip_check = 0 # kernel will fill the correct checksum
ip_saddr = socket.inet_aton (src_ip) # Spoof the src IP if you want to
ip_daddr = socket.inet_aton (dst_ip)
# the ! in the pack format string means network order
# see http://docs.python.org/2/library/struct.html#format-characters
ip_header = struct.pack('!BBHHHBBH4s4s' ,
ip_ihl_ver, ip_tos, ip_tot_len, ip_id, ip_frag_off, ip_ttl, ip_proto,
ip_check, ip_saddr, ip_daddr)
return ip_header | 8f949e70ef55e18c7d2adaa812a8ed45fc86f358 | 43,653 |
def get_default_freesolv_task_names():
"""Get that default freesolv task names and return measured expt"""
return ['expt'] | 17da2d1af123c71e6a2de9799b25110b54b0a022 | 43,654 |
def repr_object(o):
"""
Represent an object for testing purposes.
Parameters
----------
o
Object to represent.
Returns
-------
result : str
The representation.
"""
if isinstance(o, (str, bytes, int, float, type)) or o is None:
return repr(o)
return "!" + str(type(o)) | 7ae24f46d626b7673af6389574b55583ec70971e | 43,655 |
import shlex
def _lexcmds(cmds):
"""spit pipeline specification into arguments"""
if isinstance(cmds, str):
return shlex.split(cmds)
else:
return [shlex.split(cmd) if isinstance(cmd, str) else cmd for cmd in cmds] | 44102762f46e097caae8b9a1ef5f788897d8c9aa | 43,657 |
def ifb(bites):
"""
ifb is a wrapper for int.from_bytes
"""
return int.from_bytes(bites, byteorder="big") | ae02449bd48ebe69e6fcbc2714da90dc15a24d40 | 43,659 |
def build_container_sas_uri(storage_acc_url: str, container: str, sas: str) -> str:
"""
Create a container SAS URL in the format of: {account-url}/{container}?{SAS}
Note that this method is not responsible for the generation of the SAS token.
:param storage_acc_url: Base URL to the storage account
:param container: Name of the container in the storage account
:param sas: Generated SAS token
"""
return "{}/{}?{}".format(storage_acc_url, container, sas) | 8ab30acf0b792324c5e170c8b3eadd657494b5c6 | 43,662 |
def _to_signed32(n):
"""Converts an integer to signed 32-bit format."""
n = n & 0xffffffff
return (n ^ 0x80000000) - 0x80000000 | b5562063cc0467222f3d972eca9305dddbe4e05e | 43,664 |
def generate_method_bindings(obj):
"""Function to create the function calls, which contain calls to the godot apis"""
result = "\n##################################Generated method bindings#########################################\n"
result += f"cdef godot_method_bind *bind_{obj['name']}\n"
for method in obj["methods"]:
result += f"cdef godot_method_bind *bind_{obj['name'].lower()}_{method['name']}\n"
result += f"cdef init_method_bindings_{obj['name']}():\n"
result += f' bind_{obj["name"]} = api_core.godot_method_bind_get_method("Object", "_get")\n'
for method in obj["methods"]:
result += f" global bind_{obj['name'].lower()}_{method['name']}\n"
for method in obj["methods"]:
result += f""" bind_{obj['name'].lower()}_{method['name']} = api_core.godot_method_bind_get_method('{
obj['name']}', '{method['name']}')\n"""
return result | 62e1c319021bdd549d78f4b96afbdb3aef5e68b7 | 43,665 |
def test(when): # this is another custom comment
"""
this is a test docstring for test
"""
tester1 = {"test1": 1, "test2": 2}
tester2 = {
"test1": 1,
"test2": 2,
}
return tester1 | df3a90e4e43a95ebdc3b58fb1aec5869b95b706c | 43,666 |
def define_chains(pdb_id, sabdab_chains_df):
"""
"""
entry = sabdab_chains_df[sabdab_chains_df["pdb"] == pdb_id].values[0]
Hchain = entry[1]
Lchain = entry[2]
# check if antigen has more than one chain
if len(entry[3]) > 1:
antigen_chain = entry[3].split(" | ")
else:
antigen_chain = list(entry[3])
return Hchain, Lchain, antigen_chain | d643822dc71b7549151971d150eb9a91dab49a2c | 43,667 |
from typing import Tuple
import gc
import torch
def find_tensor_by_shape(target_shape: Tuple, only_param: bool = True) -> bool:
"""Find a tensor from the heap
Args:
target_shape (tuple):
Tensor shape to locate.
only_param (bool):
Only match Parameter type (e.g. for weights).
Returns:
(bool):
Return True if found.
"""
for obj in gc.get_objects():
try:
# Only need to check parameter type objects if asked.
if only_param and "torch.nn.parameter.Parameter" not in str(type(obj)):
continue
if torch.is_tensor(obj) or (hasattr(obj, "data") and torch.is_tensor(obj.data)):
if obj.shape == target_shape:
return True
except Exception as e:
pass
return False | 606e0223ea4014d8b1f7d5c6bba16ec64a1fc1ea | 43,668 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.