content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def average_children(df):
""" Returns average number of children per person, rounded to 2 d.p. """
return round(df.children.mean(), 2) | 19f45a6b7eee45d98be7b434104244531c5443d0 | 119,957 |
def version2float(s):
"""Converts a version string to a float suitable for sorting"""
# Remove 2nd version: '4.1.1-4.5.5' --> '4.1.1'
s = s.split('-')[0]
# Max one point: '4.1.1' --> '4.1'
s = '.'.join(s.split('.')[:2])
# Remove non numeric characters
s = ''.join(c for c in s if c.isdigit() or c == '.')
return float(s) | a5afe9d0115b291d468fea0b6ec96c837260d67a | 119,958 |
def get_false_positives_for_given_class(class_lists, class_c):
"""
Get the False positives for the given class
"""
false_positives = []
for item in range(len(class_lists)):
if item != class_c:
false_positives.extend(class_lists[item])
# Return the false positive count
return false_positives | f5b7b3a384f1bcd5b10430b431acaa6b5e784a93 | 119,961 |
def interval_pos(test_x, x0, x1):
"""
In 1D space:
x0 x1
############## ################## ##############
# ---- position 0 position + position
:param test_x:
:param x0:
:param x1:
:return:
"""
assert x1 >= x0
if test_x < x0:
return test_x - x0
if test_x > x1:
return test_x - x1
return 0.0 | c24206d54c8b68594dcae1c35ffc44aa366373af | 119,963 |
def get_predecessor(cx, graph, default=None):
"""Select first predecessor in graph.
In Construction graphs, any node only
has one predecessor. This function returns
that item.
"""
return next(iter(graph.pred[cx]), default) | 692c9f57e64acb45ca333ad7d82e4947529c80dd | 119,975 |
import re
def expand_skips(conf, letters=None):
"""
Computes list of all pins to skip given a list of specifiers which
may be either specific pins or ranges of pins.
Optional letters gives expanded list of BGA pin letters in use.
Returned list always contains strings.
"""
if letters is None:
letters = []
skips = conf.get("skip_pins", [])
out = []
for skip in skips:
skip = str(skip)
if "-" not in skip:
out.append(skip)
continue
match = re.search(r"^([A-Z]+(?:-[A-Z]+)?)?([0-9]+(?:-[0-9]+)?)$", skip)
if not match:
raise ValueError("Unknown skip specifier {}".format(skip))
let, num = match.groups()
if "-" in num:
num_start, num_stop = [int(x) for x in num.split("-")]
nums = list(range(num_start, num_stop+1))
else:
nums = [num]
if "-" in let:
let_start, let_stop = let.split("-")
let_start_idx = letters.index(let_start)
let_stop_idx = letters.index(let_stop)
lets = letters[let_start_idx:let_stop_idx+1]
else:
lets = [let]
for let in lets:
for num in nums:
out.append(let + str(num))
return out | 2aa8ce95cd9ac40bf7c936b85ddbe571adc60f88 | 119,976 |
def pkg_as_json(pkg):
"""Return a dictionary of information for the package."""
result = {
'name': pkg.name,
'ensure': pkg.evr,
'platform': pkg.arch}
return result | db43d2eadf57f3a27d88d650f562c805fbadb2a2 | 119,979 |
def fuse_key_value(key, value, key_padding_mask, value_padding_mask, fusing):
"""
Fuse key representation and value representation
Args:
key:
:math:`(K, N, E)` where N is the batch size, K is the key number, E is the embedding size.
value:
:math:`(L, K, N, E)` where L is the value length, N is the batch size, K is the key number,
E is the embedding size.
key_padding_mask:
:math:`(N, K)` where N is the batch size, K is the key number, E is the embedding size.`
value_padding_mask:
:math:`(N, K, L)` where N is the batch size, K is the key number, L is the value length size.`
fusing: fusing type
Returns:
- output: fused representation for key-value pair
"""
if fusing == 'max-pool-value':
value, _ = value.max(dim=0)
return key + value, key_padding_mask
elif fusing == 'expand-key':
key = key.unsqueeze(0)
return key + value, value_padding_mask
else:
raise NotImplementedError | c1ef6bab3a759ff27f1b7d86f90eb369cbd8016e | 119,980 |
import base64
def encode_str_to_base_64(str_to_encode):
"""
Encodes the a given string to base64
:param str_to_encode: str to encode
:return: base64 encoded str
"""
return base64.urlsafe_b64encode(str_to_encode.encode("UTF-8")).decode(
"ascii"
) | 4f0df9d8d962052c8883ae980cf0a90ad4712e3a | 119,982 |
def children_are_mapping(trans, elem):
"""Child elements of elem are the key/value pairs. tag name is
key, value is inner text"""
res = {}
for i in elem:
key = trans.tagnorm(i)
if len(i):
value = trans.from_elem(i)
else:
value = i.text
res[key] = value
return res | 33f06f1ad373119ef33ebf59be72264dc24a8de8 | 119,986 |
def get_axdr_length(data: bytearray):
"""
Will find the length of an xadr element assuming the length is the first bytes in
the data
Works with bytearray and will remove element from the array as it finds the
variable length.
"""
length_data = bytearray()
first_byte = data.pop(0)
length_is_multiple_bytes = bool(first_byte & 0b10000000)
if not length_is_multiple_bytes:
return first_byte
number_of_bytes_representing_the_length = first_byte & 0b01111111
for _ in range(0, number_of_bytes_representing_the_length):
length_data.append(data.pop(0))
return int.from_bytes(length_data, "big") | 078b78675f0fd55b9527564c37c3f5ce2c0629d5 | 119,991 |
import time
def code1(arg):
"""Original code path."""
print('Code 1')
time.sleep(arg)
return arg | 983be12d54a27fd55f29b2c82aec586e573af72c | 119,992 |
import socket
def _get_alias_records(hostname):
"""Return all IPv4 A records for a given hostname
"""
return socket.gethostbyname_ex(hostname)[2] | 490cdaaca3bfd44fbc5985a992933e5c6ff9dc26 | 119,993 |
def doctest(doc: str) -> str:
"""Wrap doctest as markdown Python code."""
keep = False
docs = []
lines = doc.splitlines()
for i, line in enumerate(lines):
signed = line.startswith(">>> ")
if signed:
if not keep:
docs.append("```python")
keep = True
elif keep:
docs.append("```")
keep = False
docs.append(line)
if signed and i == len(lines) - 1:
docs.append("```")
keep = False
return '\n'.join(docs) | cb0d6ad05f6f0150cd284e5ea1722d2a7f6fe4d5 | 119,994 |
def parse_humanip(humanip_str):
"""
takes a humanip like string and return a dictionary containing wordparts
example:
humanip_str == "v1/en:muscle-abandon-abandon-access"
will return
{
'version': 1,
'language': "en",
'words': ["muscle", "abandon", "abandon", "access"]
}
"""
parsed = {'version': None, 'language': None, 'words': []}
version_language, words = humanip_str.split(":")
version, language = version_language.split("/")
word_list = words.split("-")
parsed['version'] = int(version.strip("v"))
parsed['language'] = language
parsed['words'] = word_list
return parsed | 5213633d96068a196aea777bdcabd905a601cf79 | 119,996 |
import re
def get_api_name(header_file_path):
"""Get name of API from header file path.
Args:
header_file_path: path to the header file.
Returns:
The name of API.
"""
api_name = ""
regex = r".*/(.*)\.h"
m = re.search(regex, header_file_path)
if m:
api_name = m.group(1)
return api_name | 773889ae53fd7fef0a0da6aecc7dd82a57a2d7a7 | 119,998 |
def pretty_print_rows(data,prepend=False):
"""Format row-wise data into 'pretty' lines
Given 'row-wise' data (in the form of a list of lists),
for example:
[['hello','A salutation'],[goodbye','The End']]
formats into a string of text where lines are
newline-separated and the 'fields' are padded with
spaces so that they line up left-justified in
columns, for example:
hello A salutation
goodbye The End
Arguments:
data: row-wise data as a list of lists
prepend: (optional), if True then columns
are right-justified (i.e. padding is
added before each value).
"""
# Get maximum field widths for each column
widths = []
for row in data:
for i in range(len(row)):
width = len(str(row[i]))
try:
widths[i] = max(width,widths[i])
except IndexError:
widths.append(width)
# Build output
output = []
for row in data:
line = []
for item,width in zip([str(x) for x in row],widths):
padding = ' '*(width-len(item))
if prepend:
line.append(padding + item)
else:
line.append(item + padding)
output.append(' '.join(line))
return '\n'.join(output) | bbd3f86f155399be28adc92af7f94572485cbc79 | 120,000 |
def _basis_key(t):
"""
Return a key for the basis element of the Askey-Wilson algebra
indexed by ``t``.
EXAMPLES::
sage: from sage.algebras.askey_wilson import _basis_key
sage: I = algebras.AskeyWilson(QQ).indices()
sage: _basis_key(I((0,2,3,1,2,5)))
(13, (0, 2, 3, 1, 2, 5))
"""
return (sum(t), t.value) | dcd39b528e84765572e129f4d0ef2170f542e760 | 120,007 |
def int_to_hex_string(integer):
"""Converts an integer to an hex string with at least a length of 4, e.g. 255 to '00FF'.
"""
return hex(integer)[2:].upper().zfill(4) | ad61f1cf78395998053f3e4e5190312acbd0903f | 120,013 |
def RetryOnIOError(exception):
"""Returns True if 'exception' is an IOError."""
return isinstance(exception, IOError) | 6aa0584887430b6d2142da5873f05cbcd2a2feaa | 120,014 |
def _install_name(f):
"""Returns the install name for a dylib on macOS."""
return f.short_path | c304affb2704f6bc4cfedbcb9030239ab3c0c8ca | 120,018 |
def are_broadcastable( *shapes ):
"""
Check whether an arbitrary list of array shapes are broadcastable.
:Parameters:
*shapes: tuple or list
A set of array shapes.
:Returns:
broadcastable: bool
True if all the shapes are broadcastable.
False if they are not broadcastable.
"""
if len(shapes) < 2:
# A single shape is always broadcastable against itself.
return True
else:
# Extract the dimensions and check they are either
# equal to each other or equal to 1.
for dim in zip(*[shape[::-1] for shape in shapes]):
if len(set(dim).union({1})) <= 2:
# Dimensions match or are 1. Try the next one.
pass
else:
# Dimensions do not match. Not broadcastable.
return False
# All dimensions are broadcastable.
return True | 3953417022b7a4fe0ceb9972d2c41b118b4009fa | 120,031 |
def _append_trailing_slash(s): # pylint: disable=invalid-name
"""
Return ``s`` with a trailing ``"/"`` appended.
If ``s`` already ends with a trailing ``"/"`` it'll be returned unmodified.
"""
if not s.endswith("/"):
s = s + "/"
return s | 6b53b7310e99c141c07ac6bbf2ac7837cf986d67 | 120,032 |
from datetime import datetime
import json
def init_run(engine, runner='Havasi', start_ts=None, status='prepared',
outcome='not_run', comment=None, plot_dict=None, profiler=None):
"""Initialize the `run` table with basic info.
Parameters
----------
engine : sqlalchemy engine whit psycopg2 driver
For managing connection to the DB.
runner : str, optional
Person's name/identifier who created(executed) the data(process).
start_ts : datetime.datetime, optional
Timezone-less datetime object.
If omitted, .now() will be used.
status : str, optional
One of the following strings:
| 'prepared' (default) | 'run' | 'error'
outcome : str, optional
One of the following strings:
| 'not_run' (default) | 'optimum' | 'optimum_not_found' | 'error'
comment : str, optional
Any text based comment. (No length limit.)
plot_dict : dict, optional
Dictionary returned by the rivus.io.plot.fig3d function.
profiler : pandas.Series, optional
Series containing profiled process name and execution time pairs.
Execution time is measured in *seconds*
Returns
-------
int
run_id of the initialized run row in the DB.
"""
if start_ts is None:
start_ts = datetime.now()
if profiler is not None:
profiler = profiler.to_json()
if plot_dict is not None:
plot = json.dumps(plot_dict)
else:
plot = None
run_id = None
connection = engine.raw_connection()
try:
with connection.cursor() as curs:
curs.execute("""
INSERT INTO run (runner, start_ts, status, outcome, comment,
plot, profiler)
VALUES (%s, %s, %s, %s, %s, %s, %s)
RETURNING run_id;
""", (runner, start_ts, status, outcome, comment, plot,
profiler))
run_id = curs.fetchone()[0]
connection.commit()
finally:
connection.close()
return run_id | a585aa4a4fc7a948f4fed9fc5b43d1378ee75725 | 120,038 |
def transform_count(n_examples):
"""Return the number of transformations that should be applied to
each example in a class.
This function returns the number of pitch and dynamic range
compression (DRC) transformations that should be applied to a class
in which the total number of examples is equal to `n_examples`. The
idea is that small classes should have a larger number of
transformations applied in order to balance the dataset.
Args:
n_examples (int): The number of examples in the class.
Returns:
tuple: A tuple of the form ``(n_pitches, n_drc)``.
"""
if n_examples < 500:
return (8, 3)
elif n_examples < 999:
return (5, 2)
elif n_examples < 4999:
return (2, 1)
elif n_examples < 9999:
return (2, 0)
return (0, 0) | 9d376502e2835a57a56946d394d9832a865f3778 | 120,041 |
def runge_kutta_variables(storage, noise, noise2, tau):
"""
This is a function that accepts a storage and noise objects and returns the
pre-requisite variables for a runge-kutta integration step in a list
that can be unraveled to correctly feed into runge_kutta_step.
PARAMETERS
----------
1. storage : HopsStorage object
an instantiation of HopsStorage associated with the trajectory
2. noise : HopsNoise object
an instantiation of HopsNoise associated with the trajectory
3. noise2 : HopsNoise object
an instantiation of HopsNoise associated with the trajectory
4. tau : float
the time step
RETURNS
-------
1. variables : dict
a dictionary of variables needed for Runge Kutta
"""
phi = storage.phi
z_mem = storage.z_mem
t = storage.t
z_rnd = noise.get_noise([t, t + tau * 0.5, t + tau])
z_rnd2 = noise2.get_noise([t, t + tau * 0.5, t + tau])
return {"phi": phi, "z_mem": z_mem, "z_rnd": z_rnd, "z_rnd2": z_rnd2, "tau": tau} | 88e36f835773d1beb60ed6e1434a5a8dc25fbb88 | 120,043 |
def get_title(movie_path):
"""
Returns the title of a given movie given a path.
"""
movie_title = movie_path.split('/')[-2:-1][0].split('.')[0]
return movie_title | 4772d888ff3b7c55fb5325c5168aba708d7cf1e6 | 120,048 |
def store_is_remote(store):
"""Determine whether a given store is remote"""
return store.startswith("http") | f9b0e1a1e308e0e53ec65c8fd047fbd76dcb7cfc | 120,050 |
def rename(client, name, new_name, file_=None):
"""Rename a feature.
Args:
client (obj):
creopyson Client.
name (str|int, optional):
Feature name (str) or Feature ID (int).
new_name (str):
New name for the feature.
`file_` (str, optional):
File name.
Defaults is the currently active model.
Returns:
None
"""
data = {"new_name": new_name}
if file_ is not None:
data["file"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
if isinstance(name, (str)):
data["name"] = name
elif isinstance(name, (int)):
data["feat_id"] = name
else:
raise TypeError("name must be str or int")
return client._creoson_post("feature", "rename", data) | eeb1226318bfa12c7bf902fb831122011efa3d7e | 120,051 |
def count_nonref_reads(record_sample):
"""Count the number of reads supporting all non-reference alleles"""
allelic_depths = record_sample['AD']
try:
nonref_reads = sum(allelic_depths[1:])
except TypeError: # Occurs when AD is a single value, not a list
nonref_reads = 0
return(nonref_reads) | 79f628306e04078a3ea5f6957630883dbc918e9a | 120,062 |
def parameter_of_point_on_line(a, b, point):
"""
Get the parameter of a point on a line. For this function to give a
correct result it is important that the provided point already lies on the
line. The :func:`closest_point_on_line` can be used to get that point on the
line.
:param OpenMaya.MVector a:
:param OpenMaya.MVector b:
:param OpenMaya.MVector point:
:return: Parameter of the point on the line
:rtype: float
"""
# get vectors
ap = point - a
ab = b - a
# get parameter
parameter = ap.length() / ab.length()
return parameter | 4c8e130a8c5b791b021df088a7518038eee2da3a | 120,063 |
import copy
def get_population_health_state(state):
"""Gets the list that maps index to health state."""
return copy.deepcopy(state.health_states) | b3ea840337f36ff85d0541d8a796d7edff5a7aec | 120,068 |
def set_diag(x, new_diag):
"""Set the diagonal along the last two axis.
Parameters
----------
x : array-like, shape=[dim]
Initial array.
new_diag : array-like, shape=[dim[-2]]
Values to set on the diagonal.
Returns
-------
None
Notes
-----
This mimics tensorflow.linalg.set_diag(x, new_diag), when new_diag is a
1-D array, but modifies x instead of creating a copy.
"""
arr_shape = x.shape
x[..., range(arr_shape[-2]), range(arr_shape[-1])] = new_diag
return x | 1204729c7248f57969c0900dfe2522ae68be12e9 | 120,073 |
import logging
import lzma
import pickle
def import_go_tree(import_location):
"""
Decompresses and deserialises the given file containing the processed GO
tree created by create_go_tree.py
:type import_location: str
:param import_location: File location of the LZMA compressed and pickled
object.
:rtype: dict[str, GoTerm]
:return: The deserialised object from the file.
"""
logging.info("Decompressing and importing GO dictionary from %s ...", import_location)
with lzma.open(import_location, "rb") as f:
return pickle.load(f) | 4931988b751bc2d5c8659cc3180e7e95943bd2e6 | 120,080 |
def abstract(cls):
"""
Creates an __init__ method for the given class that raises a
NotImplementedError indicating that the class is abstract. Classes inheriting
from an abstract base class that don't override __init__ will also be
effectively abstract.
"""
def __init__(self):
raise NotImplementedError(
"Can't instantiate abstract class '{}'.".format(
type(self).__name__
)
)
cls.__init__ = __init__
return cls | 6b09b2b52ac7e3dd7bd6c44a64606045c45dfbfc | 120,081 |
def TestMaxValue(df, col, val):
"""
Test a column from a dataframe to validate that the max value == val.
"""
print(("Validating max of %s == %.2f" % (col, val)))
ordered = df[col].sort_values(ascending=False)
max_val = ordered.iloc[0]
if max_val != val:
print(("Maximum value %.2f != %.2f" % (max_val, val)))
return False
return True | c18946e5d8cf2355b2fa111177ebb34113b91304 | 120,082 |
def calculate_evaluation_metrics(confusion_matrix):
"""
Calculates the evaluation metrics of the model.
:param confusion_matrix: The confusion matrix of the model.
:return: dictionary, with the metrics of the model.
"""
metrics = dict()
metrics['precision'] = confusion_matrix.get('TP', 1) / (
confusion_matrix.get('TP', 1) + confusion_matrix.get('FP', 1))
metrics['recall'] = confusion_matrix.get('TP', 1) / (
confusion_matrix.get('TP', 1) + confusion_matrix.get('FN', 1))
metrics['f1_score'] = 2 * metrics['precision'] * metrics['recall'] / (metrics['precision'] + metrics['recall'])
return metrics | d2c3ea5b75529d4ae324f22350bf4d65c89fbf7c | 120,086 |
import copy
def removeDebuff(state, remDebuff) :
"""Removes a debuff from a state
"""
newState = copy.deepcopy(state)
# Removes the debuff from the list of debuffs
newState['enemy']['debuff'] = [ d for d in newState['enemy']['debuff'] if d['name'] not in remDebuff ]
# Removes the potential remaining actions to make the debuff fall down
newState['timeline']['nextActions'] = [ na for na in newState['timeline']['nextActions'] if na[1] not in [ { 'type': 'removeDebuff', 'name': dn } for dn in remDebuff ] ]
return newState | 5f8b26092c108b895b3376505b10dde661374919 | 120,094 |
def int_to_bytes(n, minlen=0): # helper function
""" int/long to bytes (little-endian byte order).
Note: built-in int.to_bytes() method could be used in Python 3.
"""
nbits = n.bit_length() + (1 if n < 0 else 0) # plus one for any sign bit
nbytes = (nbits+7) // 8 # number of whole bytes
ba = bytearray()
for _ in range(nbytes):
ba.append(n & 0xff)
n >>= 8
if minlen > 0 and len(ba) < minlen: # zero pad?
ba.extend([0] * (minlen-len(ba)))
return ba | dab22da35530448167544e1954c0ebf7d25ceba5 | 120,095 |
def is_strictly_forces_https(domain):
"""
A domain "Strictly Forces HTTPS" if one of the HTTPS endpoints is
"live", and if both *HTTP* endpoints are either:
* down, or
* redirect immediately to an HTTPS URI.
This is different than whether a domain "Defaults" to HTTPS.
* An HTTP redirect can go to HTTPS on another domain, as long
as it's immediate.
* A domain with an invalid cert can still be enforcing HTTPS.
"""
http, httpwww, https, httpswww = domain.http, domain.httpwww, domain.https, domain.httpswww
def down_or_redirects(endpoint):
return ((not endpoint.live) or endpoint.redirect_immediately_to_https)
https_somewhere = https.live or httpswww.live
all_http_unused = down_or_redirects(http) and down_or_redirects(httpwww)
return https_somewhere and all_http_unused | 2b9f5949864a62f4b3bd0d6d1e2d30b4d19ddf99 | 120,097 |
def get_hk_variable_names(my_df):
"""
This procedure will return al ist of variables in the
housekeeping file.
Parameters
----------
my_df: ACT Dataset
The dataframe to get the variable names from
Returns
-------
var_names: list
The names of each variable in the file.
"""
return [my_str for my_str in my_df.variables.keys()] | 54c2034306c77ab44560ef58c78065c8c7adecfb | 120,098 |
import torch
def diff_single(y, x, device):
"""Computes the derivative of a single y w.r.t. x
"""
yp = torch.autograd.grad(
y,
x,
grad_outputs=torch.ones_like(y, device=device),
create_graph=True,
retain_graph=True,
)[0]
return yp.transpose(0, 1) | 7cbf1ef5df39ade647463bae20505ef44f346ccc | 120,100 |
def perform_biopsy_site_type_filtering(maf_df, cli_df, filter_type):
"""Keep only samples in clinical_df in which the sample's Biopsy_Site_Type matches
the specified filter.
Notes
-----
- If filter_type is "PRIMARY_ONLY", then keep only samples with
"PRIMARY" biopsy_site_type.
"""
if filter_type == "PRIMARY_ONLY":
keep_samples = set(cli_df[cli_df['Biopsy_Site_Type'] == "Primary"]['Tumor_Sample_Barcode'].unique())
cli_df = cli_df[cli_df['Tumor_Sample_Barcode'].isin(keep_samples)]
maf_df = maf_df[maf_df['Tumor_Sample_Barcode'].isin(keep_samples)]
else:
raise Exception("Invalid filter_type: %s" % filter_type)
return maf_df, cli_df | 7f2d9c6936e384e30b70047a0944f848239a420c | 120,103 |
def explode(df):
"""
Take a DataFrame and return a triple of
(df.index, df.columns, df.values)
"""
return df.index, df.columns, df.values | 03233ffc01be61c44749f7eba30d573e2a392a84 | 120,106 |
def abbrv_num(num):
"""Shorten string representation of large numbers by single-letter notation and rounding to 2 decimals
Arguments:
num (int): (Large) number to be formatted
Return:
Abbreviated and rounded number with single-letter notation
"""
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
# add more suffixes if you need them
if magnitude == 0:
return '%s' % (num)
else:
return '%.2f%s' % (num, ['', 'K', 'M', 'B', 'T', 'Q'][magnitude]) | e736c8d7a83af1dde90f3bfb7b33776099f8e367 | 120,111 |
import collections
def _unique_sort(iterable):
"""Returns unique elements in the iterable, in order of appearance"""
d = collections.OrderedDict()
for i in iterable:
d[i] = None
return list(d.keys()) | d5594083cca02916abb67648f7cc0c69bca1c3ea | 120,115 |
def frequency_encoder (df, categorical_name, drop=False, new_col_name=None):
"""
Pandas dataframe series frequency encoder in a new column.
Parameters
----------
df : Pandas.Dataframe()
Dataset.
categorical_name : string
Name of the column to encoder.
drop : boolean
To drop column after encoding or not.
new_col_name : string
Name of the new column.
Returns
-------
Pandas.DataFrame()
Pandas dataframe with a frequency encoded column
"""
if new_col_name is None:
new_col_name = 'freq_'+categorical_name
g = df[categorical_name].value_counts()/df.shape[0]
for i in g.index:
df.loc[df[categorical_name] == i, new_col_name] = g.loc[g.index==i].values[0]
if drop:
df = df.drop(categorical_name, axis=1)
return df | 23894834e252a13ebd8a360cd6d4bf0a84db23c6 | 120,118 |
def _check_need_broadcast(shape1, shape2):
"""Returns True if broadcast is necessary for batchmatmul."""
return shape1[:-2] != shape2[:-2] | 23ec8d359c0285d7bb523ccf523c2950be9acf71 | 120,128 |
def countOf(a, b):
"""Return the number of times b occurs in a."""
count = 0
for i in a:
if i == b:
count += 1
return count | e26d0c75fa76d18057f3f6304e3869f1f090ceb0 | 120,129 |
def triplet_sum2(arr: list[int], target: int) -> tuple[int, int, int]:
"""
Mengembalikan triplet dalam array dengan jumlah sama dengan target,
lain (0, 0, 0).
>>> triplet_sum2([13, 29, 7, 23, 5], 35)
(5, 7, 23)
>>> arr = [6, 47, 27, 1, 15]
>>> target = 11
>>> triplet_sum2(arr, target)
(0, 0, 0)
"""
arr.sort()
n = len(arr)
for i in range(n - 1):
kiri, kanan = i + 1, n - 1
while kiri < kanan:
if arr[i] + arr[kiri] + arr[kanan] == target:
return (arr[i], arr[kiri], arr[kanan])
elif arr[i] + arr[kiri] + arr[kanan] < target:
kiri += 1
elif arr[i] + arr[kiri] + arr[kanan] > target:
kanan -= 1
return (0, 0, 0) | e3bd1f597aa3955b62f2bd14761b43db47c8d73e | 120,132 |
def recall_pos(y,y_pred):
"""compute recall of the positive class"""
return (y[y == 1] == y_pred[y==1]).sum()/float((y==+1).sum()) | 357fbce5d7b163bc1812b646c291874ce6204a97 | 120,139 |
def binary_search(list: list, target: str):
"""binary search
Args:
list (list): sorted list
target (str): search target
"""
low = 0
high = len(list) - 1
while low <= high:
mid = (low + high) // 2
pick = list[mid]
if pick == target:
return mid
if pick > target:
high = mid -1
else:
low = mid + 1
return None | 5b592f7cb7046cb63bf54c5d5a67b3ecf3458b26 | 120,140 |
def read_n5(ds, skel_id):
""" Read skeleton stored in custom n5-based format
The skeleton data is stored via varlen chunks: each chunk contains
the data for one skeleton and stores:
(n_skel_points, coord_z_0, coord_y_0, coord_x_0, ..., coord_z_n, coord_y_n, coord_x_n,
n_edges, edge_0_u, edge_0_v, ..., edge_n_u, edge_n_v)
Arguments:
ds [z5py.Dataset]: input dataset
skel_id [int]: id of the object corresponding to the skeleton
"""
# read data from chunk
data = ds.read_chunk((skel_id,))
# check if the chunk is empty
if data is None:
return None, None
# read number of points and coordinates
n_points = data[0]
offset = 1
coord_len = int(3 * n_points)
nodes = data[offset:offset+coord_len].reshape((n_points, 3))
offset += coord_len
# read number of edges and edges
n_edges = data[offset]
offset += 1
edge_len = int(2 * n_edges)
assert len(data) == offset + edge_len, "%i, %i" % (len(data), offset + edge_len)
edges = data[offset:offset+edge_len].reshape((n_edges, 2))
return nodes, edges | 8060c3b622c348d33451aef39359ea7414ab6e5c | 120,141 |
def sequence_name(table_name):
"""
Generates a standard sequence name of a primary key for a given `table_name`.
:param str table_name: table name
:return: sequence name
:rtype: str
"""
return '{table_name}_id_seq'.format(table_name=table_name) | c8747e8552d8441193070b6aa7aeaf076ac8f3fe | 120,142 |
def name_to_slack_color(color_name):
"""map hipchat color-names to slack colors
Args:
color_name (str): name of color
Returns:
str: slack color name
"""
color_name = color_name.lower()
if color_name == 'green':
return 'good'
if color_name == 'yellow':
return 'warning'
if color_name == 'red':
return 'danger'
return '' | 39556bf04f069663eb4c735350e6cd8b9cce9f40 | 120,147 |
def _AppendIf(container, condition, value):
"""Appends to a list if a condition evaluates to truth.
"""
if condition:
container.append(value)
return condition | 03c58328c370513bbc48a6b1d5653aeeb8e20441 | 120,150 |
def get_reason(r):
"""Get reason, why an http request failed.
r is response as returned by requests library
"""
try:
return r.json().get("error").get("message")
except (ValueError, TypeError):
maxlen = 200
maxlines = 3
lines = r.text[:maxlen].splitlines()[:maxlines]
reason = "\n".join(lines).strip()
if len(reason):
return reason
else:
return r.reason | 104294dfb1e9061d185a9e78396ebcd6740ccac7 | 120,151 |
def conll2dict(conll, iter_id=None, agent=None, mode='train', doc=None, epoch_done=False):
"""
Opens the document, reads it, and adds its contents as a string to the dictionary.
Args:
conll: path to the conll file
iter_id: number of operations
agent: agent name
mode: train/valid mode
doc: document name
epoch_done: flag
Returns: dict { 'iter_id': iter_id,
'id': agent,
'epoch_done': epoch_done,
'mode': mode,
'doc_name': doc
'conll_str': s}
"""
data = {'iter_id': iter_id,
'id': agent,
'epoch_done': epoch_done,
'mode': mode,
'doc_name': doc}
with open(conll, 'r', encoding='utf8') as f:
s = f.read()
data['conll_str'] = s
f.close()
return data | f9257478c577c224111bb8ff135aaef56dd06846 | 120,153 |
def _jsonify_action(name, description_dict):
""" Remove all the extra cruft and dispatch fields,
and create one dict describing the named action / operator. """
short_description = {"internal_name": name}
for data in ["arguments", "cost", "user_name", "description", "short_description"]:
if data in description_dict:
short_description[data] = description_dict[data]
return short_description | 0877ed8b81c85068b40ac79dd69736ce0374c80a | 120,155 |
def estimate_transmission_latency(
input_size: float, transmission_speed: float
) -> float:
"""Estimate transmission latency for a given data size and transmission speed."""
return input_size / transmission_speed | 38b58cee569985e634dfb1970c010eabf610c8d9 | 120,171 |
import requests
def get_prep(file_name):
"""Gets a JSON file from the PREP, saves it and extracts required data to a dict.
Parameters
----------
file_name : str
The name of the file used for the log.
Returns
-------
dict
A dictionary containing all the data we will require for the plot and the table.
"""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0"}
url = "https://rp18.ine.mx/assets/JSON/PRESIDENTE/NACIONAL/Presidente_NACIONAL.json"
with requests.get(url, headers=headers) as response:
# Save a copy for future analysis.
with open("./raw_data/{}.json".format(file_name), "w", encoding="utf-8") as temp_file:
temp_file.write(response.text)
candidates = list()
percentages = list()
votes = list()
colors = list()
json_data = response.json()
for item in json_data["votosCandidatoPartidoCoalicion"]:
name = item["nombreCandidatoPropietario"]
if item["siglasPartido"] == "CNR":
name = "Candidatos No Registrados"
if item["siglasPartido"] == "VN":
name = "Nulos"
color = "#{}".format(item["colorPartido"])
colors.append(color)
candidates.append(name)
percentages.append(float(item["porcentaje"]))
votes.append(int(item["total"]))
return dict({"candidates": candidates, "percentages": percentages,
"votes": votes, "colors": colors, "totalVotos": json_data["totalVotos"],
"actasCapturadas": json_data["actasCapturadas"]["porcentaje"]}) | d38deef875af5ab907270fa3950ecc527bea910f | 120,173 |
def convert_longitude(lon):
"""Convert `lon` from [-180, 180] form to [0, 360] form."""
if lon < 0:
return 360.0 + lon
else:
return lon | af6651322ed7565210529ce29c303a637aad786b | 120,176 |
def parse_user_id(user_id):
"""
Parse a user_id, removing possible @-notation and make sure it's uppercase.
"""
if user_id.startswith("<@") and user_id.endswith(">"):
return user_id[2:-1].upper()
return user_id.upper() | 494ab273aad41a6fc2eee8d80e653856fd151127 | 120,180 |
def cpu_medium_roll_points(total, new_points):
"""Add round score to total game score."""
print("computer chose to stop round!")
return total + new_points | a3cd3edf0f4233b3a887ac66c5ec89ee48559f78 | 120,181 |
def one(*args, **kwargs):
""" The function that is constant in 1 """
return 1 | ed28daf877a093ea2e35219a63b939eb141ee383 | 120,190 |
def expand_bbox_lbub(bbox_dims, length: int = 256):
"""
Symmetrically expanding a part of a bounding box to `length`.
Args:
bbox_dims (list): [lower bound, upper bound]
length (int): to expand the lower bound and upper bound to
"""
current_len = bbox_dims[1] - bbox_dims[0]
if current_len >= length:
return bbox_dims
else:
diff = length-current_len
if diff % 2 == 0:
return [bbox_dims[0]-diff//2, bbox_dims[1]+diff//2]
elif diff % 2 == 1:
# when odd, expanding the bbox more on the max-side
# - no particular reason, the offset is just 1 pixel anyways
return [bbox_dims[0]-diff//2-1, bbox_dims[1]+diff//2] | 471b5eb66efa803a6f3b4fa8f66e85fb066b92e5 | 120,191 |
def centerM(coor, maxM):
"""
Center vector coor in M axis.
:param coor: coordinate of vector from S=0 to M center
:param maxM: value representing end of mobile axis
:return: M centered coordinate
"""
return int(coor - maxM / 2.0) | b394cce8bba6ce1b1f7fe573f207c7bf6d897005 | 120,194 |
def statistical_filter(pcl_cloud, meank=50, mul_thresh=1.0):
"""
meank: Set the number of points (k) to use for mean distance estimation.
mul_thresh: Set the standard deviation multiplier threshold.
All points who have a distance larger than mul_thresh standard deviation of the mean distance
"""
sor = pcl_cloud.make_statistical_outlier_filter()
sor.set_mean_k(meank)
sor.set_negative(False)
sor.set_std_dev_mul_thresh(mul_thresh)
cloud_filtered = sor.filter()
return cloud_filtered
# Test
# cloud_filtered = cloud | 490f2e89f18a8c52ce6ff9a901c6852968587178 | 120,195 |
def format_output(decryption, verbose):
"""Return the formatted output based on verbosity"""
if verbose:
return "{0}\nkey: {1}\nscore: {2}".format(''.join(decryption.plaintext), decryption.key, decryption.score)
return ''.join(decryption.plaintext) | 85ee5a6097186b57466dea9e378e151cff730641 | 120,203 |
import time
def timeit(f):
""" Simple decorator for timing functions"""
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
print('func:%r args:[%r, %r] took: %2.4f sec' % \
(f.__name__, args, kw, te-ts))
return result
return timed | eeeb21f4eb37c347267a16c4fc5451cf50fc956b | 120,205 |
def escape_invis_chars(content):
"""Escape invisible/control characters."""
return content.encode("ascii", "escape-invis").decode("utf-8") | e9a44255cd2020ac1a6ace8612db810e2df2c5a3 | 120,208 |
def _merge(left, right):
"""
Merges two sorted list, left and right, into a combined sorted result
:param left: A sorted list
:param right: A sorted list
:return: One combined sorted list
"""
# the sorted left + right will be stored in result
result = []
leftIndex, rightIndex = 0, 0
# loop through until either the left or right list is exhausted
while leftIndex < len(left) and rightIndex < len(right):
# Edited by Pavan and Vinayak
if left[leftIndex].sum >= right[rightIndex].sum:
result.append(left[leftIndex])
leftIndex += 1
else:
result.append(right[rightIndex])
rightIndex += 1
# take the un-exhausted list and extend the remainder onto the result
if leftIndex < len(left):
result.extend(left[leftIndex:])
elif rightIndex < len(right):
result.extend(right[rightIndex:])
return result | d41464ef3e1700f3160ef7e0ebfd9fcbf617b1d2 | 120,212 |
def priming(temp, beer_vol, co2):
"""
Calculate the required weight priming (table) sugar for a given
volume of beer at a specific temperature for desired CO2 volume.
Beer temperature should be the temperature that the beer has
been at the longest.
From: http://www.straighttothepint.com/priming-sugar-calculator/
PS = 15.195 * Vbeer * (VCO2 - 3.0378 + (0.050062 * Tferm) -
(0.00026555 * (Tferm ^ 2))
:arg temp: Temperature of beer in fahrenheit
:arg beer_vol: Volume of beer to prime in gallons US
:arg co2: The volume of CO2 required
:returns: The amount table sugar required
"""
return (
15.195
* beer_vol
* (co2 - 3.0378 + (0.050062 * temp) - (0.00026555 * (temp ** 2)))
) | d3f02641cabfdfb84dd88a3b427841dbbb7e0d18 | 120,213 |
import imghdr
def check_image(file_path, content = None):
"""
Tests the image data contained in the file named by filename, and returns a string describing the image type.
:param file_path: filepath
:return: image type str or None
"""
return imghdr.what(file_path, content) | 755aa6232b555f4e207ce88c310e092de8bdd4a8 | 120,217 |
def urlmaker_sec(queryDic):
"""
Produces the URL, which can be entered into the search (Designed for SEC.gov)
Parameters
----------
queryDic : dict
searchText (str): Company name to be searched (Default: '*')
formType (str): Type of the document to be retrieved (Default: '1')
sic (str): SIC code for the companies to be searched (Default: '*')
cik (str): CIK code for the company to be searched (Default: '*')
startDate (str): Start date of the produced results (YYYYMMDD) (Default: '*')
endDate (str): End date of the produced results (YYYYMMDD) (Default: '*')
sortOrder (str): Ascending (Value = 'Date') or Descending (Value = 'ReverseDate') retrieval of results, (Default: 'Date')
Returns
-------
str
URL to be searched on the SEC website
"""
#query for SEC
searchText = queryDic['searchText'] if 'searchText' in queryDic else '*'
formType = queryDic['formType'] if 'formType' in queryDic else '1'
sic = queryDic['sic'] if 'sic' in queryDic else '*'
cik = queryDic['cik'].lstrip('0') if 'cik' in queryDic else '*'
startDate = queryDic['startDate'] if 'startDate' in queryDic else '*'
endDate = queryDic['endDate'] if 'endDate' in queryDic else '*'
sortOrder = queryDic['sortOrder'] if 'sortOrder' in queryDic else 'Date'
url = "https://searchwww.sec.gov/EDGARFSClient/jsp/EDGAR_MainAccess.jsp?search_text={}&sort={}&formType=Form{}&isAdv=true&stemming=true&numResults=100&fromDate={}&toDate={}&queryCik={}&querySic={}&numResults=100".format(searchText, sortOrder, formType, startDate, endDate, cik, sic)
return url | 7fc707bdd79d9a17c3c7cc01a8eb37407f58d233 | 120,219 |
def make_datetime_naive(any_datetime):
"""
Takes a timezone-aware datetime, and makes it naive.
Useful because Frappe is not storing timezone-aware datetimes in MySQL.
"""
return any_datetime.replace(tzinfo=None) | 351e4b5fc9b3a7b1f3f71419a4413099fdb1ca37 | 120,222 |
import hashlib
def generate_content_hash(content):
"""Return the sha1 digest of a string."""
hash_object = hashlib.sha1(content.encode('utf-8'))
return hash_object.hexdigest() | 3ac180e05be33c7fd72daa4eb0e865e1e8fe9096 | 120,227 |
import re
import tempfile
import shutil
def replace_inplace(file_name, search, replace):
""" Do a simple in-place regex search/replace. """
reg = re.compile(search)
tot_n_subs = 0
with tempfile.NamedTemporaryFile(mode="w", delete=False) as tmp_file:
with open(file_name, "r") as cfile:
for line in cfile:
line, n_subs = reg.subn(replace, line)
tot_n_subs += n_subs
tmp_file.write(line)
tname = tmp_file.name
shutil.move(tname, file_name)
return tot_n_subs | 3275c33ee7bea8911e5f648ecaa292f51e1e5b35 | 120,228 |
from typing import Any
def nestedAttributeFromString(root: Any, loc: str) -> Any:
"""return a sub-object. Example::
>>> nestedAttributeFromString(parent_object, 'foo.bar.spam.bacon')
returns the object that can be found at parent_object.foo.bar.spam.bacon.
"""
mods = loc.split('.')
obj = root
for m in mods:
obj = getattr(obj, m)
return obj | 1c0ec06b623a51872aacdd5886cdfc206963e30c | 120,229 |
import io
import json
def collect_data(*paths):
"""
Collect set of relations occurring in given samples.
:param paths: Paths of json files containing samples.
:return: Set of relation tuples.
"""
# First, we read json samples to learn relations from
samples = []
for path in paths:
with io.open(path, 'r', encoding='utf-8') as f:
samples += json.load(f)
# Collect all the occurring relations
relations = set()
for sample in samples:
entities = sample['entities']
for interaction in sample['interactions']:
i, j = interaction['participants']
for a in entities[i]['names']:
for b in entities[j]['names']:
relations.add((a, b))
relations.add((b, a))
return relations | c4e65bf828d5e0a38800550a678fb8796589989e | 120,234 |
from datetime import datetime
import pytz
def datetime_to_utc(timestamp: str,
timezone: str,
timestamp_format: str = '%Y-%m-%d %H:%M:%S') -> str:
"""Convert timezone specific timestamp to UTC time."""
local = datetime.strptime(timestamp, timestamp_format)
return str(pytz.timezone(timezone).localize(local).astimezone(pytz.utc))[:-6] | 6fbda5e7181716723be32a71e7802bc1709ea8c4 | 120,235 |
import torch
def condat_vu_iteration(iterate_p, iterate_d, y, operator,
prior, sigma, tau, lambd, inpainting=False):
"""
Condat Vu iteration
Parameters
----------
iterate_p : torch.Tensor
Current primal iterate
iterate_d : torch.Tensor
Current dual iterate
y : torch.Tensor
Data
operator : torch.Tensor
Measurement matrix
prior : torch.Tensor
Prior
sigma : torch.Tensor, shape (1)
Step size
tau : torch.Tensor, shape (1)
Step size
lambd : float
Regularization parameter
Returns
-------
out_p : torch.Tensor
Next primal iterate
out_d : torch.Tensor
Next dual iterate
"""
# Gradient descent primal
if inpainting:
first_gradient = operator * (iterate_p - y)
else:
first_gradient = torch.matmul(
operator.t(), torch.matmul(operator, iterate_p) - y
)
second_gradient = torch.matmul(prior.t(), iterate_d)
out_p = iterate_p - tau * (first_gradient + second_gradient)
# Gradient ascent dual
gradient_dual = torch.matmul(prior, 2 * out_p - iterate_p)
ascent = iterate_d + sigma * gradient_dual
out_d = ascent / torch.max(torch.ones_like(ascent),
torch.abs(ascent) / lambd)
return out_p, out_d | 7225cc419c005b45e6570b47c324ffa027eb60d3 | 120,238 |
def main_stream_time_lists(main_dataframe):
""" Getting time series with type of unix timestamp from the main dataframe """
#time_lists = [pd.to_datetime(i, unit='s') for i in list(main_dataframe.created)]
time_lists = [int(k) for k in list(main_dataframe.created)]
return time_lists | 40b27db4a5adbc04918d6d9a8ad76cc07f269f28 | 120,240 |
def length_last_word(s):
"""
Given a string s consists of upper/lower-case alphabets and
empty space characters ' ', return the length of last word in the string.
"""
s = s.strip()
word_length = 0
for c in s:
if c == ' ':
word_length = 0
else:
word_length += 1
return word_length | 2fdd7acbbd2d85e6552fe408660381d5ca5b457d | 120,241 |
def count_deck(deck: dict) -> int:
"""Count the number of remaining cards in the deck."""
total = 0
for card, card_item in deck.items():
total += card_item['count']
return total | 4d01dbf478b55b0145a4461fd554371a951f892c | 120,246 |
def block_name(block_id):
"""Returns the scope name for the network block `block_id`."""
return 'progressive_gan_block_{}'.format(block_id) | cde341f49e6314b511463d47bd94ab25dc048767 | 120,247 |
def get_row(A, i):
"""返回矩阵 A 的第 i 行"""
return A[i] | 758f534521a7aa56bd18090555eca3cb811360ea | 120,252 |
def create_response(status: str, message: str) -> dict:
"""
Creates a response object
Args:
status (str): Status of the API/Method Called
message (str): Human Friendly message for the Response
Returns:
dict: Response Object with status and message
"""
return {
'status': status,
'message': message
} | fe96a5fde0050ba1f0ee801ba2a12588ca174b35 | 120,253 |
def CType(obj, interface):
"""Casts obj to interface and returns comtypes POINTER or None"""
try:
newobj = obj.QueryInterface(interface)
return newobj
except:
return None | ea2f6ef901dca49df7976e1d7d4a663087cf7ef2 | 120,254 |
def replaceAll(text, replace_dict):
"""
Replace all the ``replace_dict`` keys by their associated item in ``text``.
"""
for i, j in replace_dict.items():
text = text.replace(i, j)
return text | 3cba9a96e70049d169c5a7d0eeedc2dca777e8cd | 120,256 |
def _get_stripped_keywords(keywords: str) -> str:
"""Returns a valid database search keywords string.
Args:
keywords: The search keywords.
Returns:
A valid database search keywords string.
"""
keywords = " ".join(keywords.split())
keywords = keywords.replace(" ", ":* & ") + ":*"
return keywords | c4cf629d1f95c6995bcc8d5f9ca00cf9ac36e55d | 120,258 |
def replace_none_with_empty_iter(iterator):
""" If it is "None", return an empty iterator; otherwise, return iterator.
The purpose of this function is to make iterating over results from
functions which return either an iterator or None cleaner.
Parameters
----------
it: None or some object
Returns
-------
empty_iterator: list of size 0
If iterator is None
--- OR ---
iterator:
The original iterator, if it was not None
"""
if iterator is None:
return []
return iterator | 64b89717152c6720cf59c01fd0eb1d105d0f0b19 | 120,260 |
from typing import MutableSequence
from typing import Any
import random
def mut_polynomial_bounded(individual: MutableSequence[Any], eta: float, low: float, up: float, mut_pb: float) -> MutableSequence[Any]:
"""Return a polynomial bounded mutation, as defined in the original NSGA-II paper by Deb et al.
Mutations are applied directly on `individual`, which is then returned.
Inspired from code from the DEAP library (https://github.com/DEAP/deap/blob/master/deap/tools/mutation.py).
Parameters
----------
:param individual
The individual to mutate.
:param eta: float
Crowding degree of the mutation.
A high ETA will produce mutants close to its parent,
a small ETA will produce offspring with more differences.
:param low: float
Lower bound of the search domain.
:param up: float
Upper bound of the search domain.
:param mut_pb: float
The probability for each item of `individual` to be mutated.
"""
for i in range(len(individual)):
if random.random() < mut_pb:
x = individual[i]
delta_1 = (x - low) / (up - low)
delta_2 = (up - x) / (up - low)
rand = random.random()
mut_pow = 1. / (eta + 1.)
if rand < 0.5:
xy = 1. - delta_1
val = 2. * rand + (1. - 2. * rand) * xy**(eta + 1.)
delta_q = val**mut_pow - 1.
else:
xy = 1. - delta_2
val = 2. * (1. - rand) + 2. * (rand - 0.5) * xy**(eta + 1.)
delta_q = 1. - val**mut_pow
x += delta_q * (up - low)
x = min(max(x, low), up)
individual[i] = x
return individual | 0461f08212277c0d1a57533149abc25f438fbd81 | 120,262 |
def drop_columns(df, lst):
"""
Drops columns specified in the Pandas DataFrame
:param df: DataFrame that will be altered
:param lst: A list of strings, denoting columns in the DataFrame
:return: New DataFrame, with specified columns removed.
"""
df.drop(lst, axis=1, inplace=True)
return df | fff6b86e18388561d4ae1e246fa6d3c499260be9 | 120,266 |
from typing import List
import statistics
def median_absolute_deviation(data: List):
"""
Calculate the sample `median absolute deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation/>`_, defined as
median(abs(data - median(data)).
Parameters
----------
data: List
List of data points.
Returns
-------
mad: float
The median absolute deviation of the data.
"""
data_median = statistics.median(data)
normalized_data = []
for item in data:
normalized_data.append(abs(item - data_median))
mad = statistics.median(normalized_data)
return mad | 9f775461310ab514a6c3516788302d354d709d09 | 120,268 |
def multiply_speed(clip, factor=None, final_duration=None):
"""Returns a clip playing the current clip but at a speed multiplied by ``factor``.
Instead of factor one can indicate the desired ``final_duration`` of the clip, and
the factor will be automatically computed. The same effect is applied to the clip's
audio and mask if any.
"""
if final_duration:
factor = 1.0 * clip.duration / final_duration
new_clip = clip.time_transform(lambda t: factor * t, apply_to=["mask", "audio"])
if clip.duration is not None:
new_clip = new_clip.with_duration(1.0 * clip.duration / factor)
return new_clip | ed74cac383032edcacf5c5d921f5935f720c7573 | 120,269 |
import socket
def create_send_sock() -> socket.SocketType:
"""
Creates a raw AF_INET sending socket requiring an accompanying IP header.
"""
send_sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
send_sock.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
return send_sock | 8e015b4f026c0530a1e25c066eb00d2ae9a79374 | 120,273 |
def splitpop(string, delimeter):
"""
Splits a string along a delimiter, and returns the
string without the last field, and the last field.
>>> splitpop('hello.world.test', '.')
'hello.world', 'test'
"""
fields = string.split(delimeter)
return delimeter.join(fields[:-1]), fields[-1] | 48aa10fb824a712df05b937c66e3d60ce114513d | 120,286 |
def colour(percent):
"""return colour code for given percentage"""
if percent > 80:
return 31 # red
if percent > 60:
return 33 # yellow
return 32 | 887577da5849faca97d81cbe9a56979cb6d34445 | 120,293 |
def to_boto3_tags(tagdict):
"""
Converts tag dict to list format that can feed to boto3 functions
"""
return [{'Key': k, 'Value': v} for k, v in tagdict.items()
if 'aws:' not in k] | d417b3a8929018d68ddef7108d907c9712f11d78 | 120,295 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.