content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def get_table_item(table, pk_value, sk_value):
"""
Return item read by primary key.
"""
response = table.get_item(Key={"pk": pk_value, "sk": sk_value})
if "Item" not in response:
response["Item"] = []
return response["Item"]
|
e06656011041fba8bdf95e9f11ab285fae7429a3
| 514,809
|
def _query_for_log(query: bytes) -> str:
"""
Takes a query that ran returned by psycopg2 and converts it into nicely loggable format
with no newlines, extra spaces, and converted to string
:param query: Query ran by psycopg2
:return: Cleaned up string representing the query
"""
return ' '.join(query.decode().replace('\n', ' ').split())
|
047480bb6ad41621fb7f92ffa6fedfb03cfa4f6b
| 32,962
|
def centeroidOfTriangle(pa, pb, pc):
"""
when given 3 points that are corners of a triangle
this code calculates and returns the center of the triangle.
the exact type of center is called "centeroid".
it is the intersection point of the connection of each angle
to the middle of its opposed edge.
two of these connections are enough to get the intersection point.
https://en.wikipedia.org/wiki/Centroid
https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Given_the_equations_of_the_lines
"""
# get the middle of the opposing line (point pa to point pb has middle cm)
am = [(pb[0] + pc[0]) / 2, (pb[1] + pc[1]) / 2]
bm = [(pa[0] + pc[0]) / 2, (pa[1] + pc[1]) / 2]
denominator = (pa[0] - am[0]) * (pb[1] - bm[1]) - \
(pa[1] - am[1]) * (pb[0] - bm[0])
if denominator != 0:
x = ((pa[0] * am[1] - pa[1] * am[0]) * (pb[0] - bm[0]) -
(pa[0] - am[0]) * (pb[0] * bm[1] - pb[1] * bm[0])) / denominator
y = ((pa[0] * am[1] - pa[1] * am[0]) * (pb[1] - bm[1]) -
(pa[1] - am[1]) * (pb[0] * bm[1] - pb[1] * bm[0])) / denominator
else:
print(f"cant find center for {pa}, {pb}, {pc}")
x = pa[0]
y = pa[1]
return [x, y]
|
ab21975df0b87dfdbaa459c80fb7d01fdc319e2e
| 183,552
|
from functools import reduce
from operator import or_
def variables(formula):
"""
set of variables in formula
>>> variables([1,(1,-2),(3,4),(-3,-1)])
{1, 2, 3, 4}
"""
return reduce(or_,map(lambda x:set(map(abs,x)), formula[1:]))
|
9d2a8328264a658c3dabc3b7cd79e4937bb09053
| 347,705
|
def trash_file(drive_service, file_id):
"""
Move file to bin on google drive
"""
body = {"trashed": True}
try:
updated_file = drive_service.files().update(fileId=file_id, body=body).execute()
print(f"Moved old backup file to bin.")
return updated_file
except Exception:
print(f"!!! did not find old bkp file with id {file_id}")
|
f9a2a331a74cdb4050dc6bb788fc398d7db90ec1
| 672,214
|
def generate_hand(deck):
"""Generate a random hand from the available cards"""
return [deck.pop() for x in range(7)]
|
56750b380c7c69c78298224d2b9c44ad26ed6a47
| 179,168
|
def gc_content(dna_seq):
"""Returns GC content of a DNA strand as a percentage"""
return round((dna_seq.count("C") + dna_seq.count("G")) / len(dna_seq) * 100)
|
99bbb34cc5793447d848751b242c6a00bb3e8dd2
| 193,073
|
def dist_mapper(dist, package):
"""
Add download_url from source tag, if present. Typically only present in
composer.lock
"""
url = dist.get('url')
if not url:
return package
package.download_url = url
return package
|
d204104c379c482fae5c5fac5a3c27df9b44dbd5
| 427,658
|
import torch
def get_knn_inds(pdist, k=20, remove=False):
"""Get k nearest neighbour index based on the pairwise_distance.
Args:
pdist (torch.Tensor): tensor (batch_size, num_nodes, num_nodes)
k (int): the number of nearest neighbour
remove (bool): whether to remove itself
Returns:
knn_inds (torch.Tensor): (batch_size, num_nodes, k)
"""
if remove:
_, knn_inds = torch.topk(pdist, k + 1, largest=False, sorted=False)
return knn_inds[..., 1:]
else:
_, knn_inds = torch.topk(pdist, k, largest=False, sorted=False)
return knn_inds
|
d6c70a3a4e4f7959fa2f11c02eb23a27422b616b
| 163,245
|
import typing
def get_args(t: type) -> typing.Tuple[type, ...]:
"""
Get the arguments from a collection type (e.g. ``typing.List[int]``) as a
``tuple``.
:param t: the collection type.
:return: a ``tuple`` containing types.
"""
args_ = getattr(t, '__args__', tuple()) or tuple()
args = tuple([attr for attr in args_
if type(attr) != typing.TypeVar])
return args
|
35262366558d27fe1fb8b8d07fb6ff0069501bc9
| 518,649
|
from typing import List
def matrix_reshape(mat: List[List[int]], r: int, c: int) -> List[List[int]]:
"""
Reshapes a given matrix into a new one with a different size r x c keeping its original data
@param mat: input matrix
@param r: new row
@param c: new column
@return: reshaped matrix
"""
if len(mat) * len(mat[0]) != r * c:
return mat
values = (val for row in mat for val in row)
return [[next(values) for _ in range(c)] for _ in range(r)]
|
f4a760aa37601a3a7cf7010fbf17dd1a5969df44
| 381,309
|
import six
def byte(integer):
"""
Convert integer to a one-character byte-string
Similar to chr(), except in Python 3 produce bytes() not str()
Same as pack_integer(integer, num_bytes=1)
"""
return six.binary_type(bytearray((integer,)))
|
15ed5b914c93536f2d1c15cb00a5f8ed83fa496f
| 493,909
|
import re
def replaceTags(value, data_record):
"""
As efficiently as possible, replace tags in input string with corresponding values in
data_record dictionary.
The idea is to iterate through all of the elements of the data_record, and replace each
instance of a bracketed key in the input string with the associated value indicated by
the data record.
This function will be used a lot, so it's important to make it as efficient as possible.
Args:
value (str): The string with tags to replace
data_record (dict): A dict containing the tags to replace, and what to replace them with
Returns:
str: `value` with the tags replaced
Examples:
>>> data_record = {"a": "AAA", "b": "BBB"}
>>> input_value = "aye [a] and bee [b]"
>>> replaceTags(input_value, data_record)
'aye AAA and bee BBB'
"""
prog = re.compile(r"\[([a-zA-Z_][a-zA-Z0-9_\(\)]*)\]")
matches = prog.finditer(value)
for match in matches:
if match.group(1) in data_record:
value = value.replace(match.group(0), data_record[match.group(1)])
return value
|
88e1e2d6c279eb50a615e9a6648fa5d08f525f18
| 676,977
|
def d2_parse_output(predictor_output):
"""
Returns valuable information from Detectron2 predictor output.
Args:
coco_output (dictionary) : Output from Detectron2 COCO Predictor.
Returns:
pred_masks (np.array) : Each predicted binary mask.
mask_scores (np.array) : Score of each mask.
pred_class (int) : Integer representing predicted class.
pred_bboxed (np.array) : Predicted Boxes.
"""
instances = predictor_output["instances"]
pred_bboxes = instances.pred_boxes.tensor.cpu().numpy()
pred_classes = instances.pred_classes.cpu().numpy()
pred_masks = instances.pred_masks.cpu().numpy()
mask_scores = instances.scores.cpu().numpy()
return pred_masks, mask_scores, pred_classes, pred_bboxes
|
7941eb6b63312524a63d49624ac024a9d3faa0d2
| 298,286
|
import csv
def precip_table_etl_cnrccep(
ws_precip,
rainfall_adjustment=1
):
"""
Extract, Transform, and Load data from a Cornell Northeast Regional
Climate Center Extreme Precipitation estimates csv into an array
Output: 1D array containing 24-hour duration estimate for frequencies 1,2,5,10,25,50,100,200 years. Example:
[5.207, 6.096, 7.5438, 8.8646, 10.9982, 12.9286, 15.2146, 17.907, 22.1996]
"""
precips = []
# Open the precipitation data csv and read all the precipitations out.
with open(ws_precip) as g:
input_precip= csv.reader(g)
# Skip the first 10 rows of the csv, which are assorted header information.
for j in range (1, 11):
next(g)
k=1
for row in input_precip:
# Grab data from column containing 24-hour estimate
P=float(row[10])
# convert to cm and adjust for future rainfall conditions (if rainfall_adjustment is > 1)
precips.append(P*2.54*rainfall_adjustment)
if k>8:
break
else:
k=k+1
return precips
|
ed1fef33ad36b0a5c1405242ef491b327e5d5911
| 674,437
|
def _print(*args):
"""Print an object in the stdout."""
return print(*args)
|
c1b25a1bb6c75fbff7c23b826b48918af95351bf
| 223,074
|
def get_next_dev_version(current_version, version_increment_strategy, incremental_release=False):
"""
Returns the next development version to use. The development version
always ends with "-SNAPSHOT".
If incremental_release is False:
Increments and returns current_version using the specified
version_increment_strategy.
If incremental_release is True:
Returns the current version (because it hasn't been released yet)
"""
if current_version is None:
return None
if incremental_release:
next_version = current_version
else:
next_version = version_increment_strategy(current_version)
if not next_version.endswith("-SNAPSHOT"):
next_version += "-SNAPSHOT"
return next_version
|
9742efce3ba62ec5beb733bc9f3d33aece8326dd
| 467,950
|
def format_in_columns(lst, max_columns):
"""
Format a list containing strings to a string containing the items
in columns.
"""
lst = [str(_m) for _m in lst]
col_len = max([len(_m) for _m in lst]) + 2
ncols = 80//col_len
if ncols > max_columns:
ncols = max_columns
if ncols <= 0:
ncols = 1
if len(lst) % ncols == 0:
nrows = len(lst)//ncols
else:
nrows = 1 + len(lst)//ncols
fmt = ' %%-%ds ' % (col_len-2)
lines = []
for n in range(nrows):
lines.append("".join([fmt % x for x in lst[n::nrows]]))
return "\n".join(lines)
|
015c05caf2d4f8540adbdc530e178dd86d208133
| 348,141
|
def paths_to_edges(paths, repeat=False):
"""Chops a list of paths into its edges.
Parameters
----------
paths: list of tuples
list of the paths.
repeat: bool (False)
include edges repeats?
Returns
-------
edges: list
List of edges
"""
edges = []
for path in paths:
edges += [(path[i], path[i+1]) for i in range(len(path)-1)]
# Return a list of edges with repeats
if repeat:
return edges
# Else remove repeats
else:
return list(set(edges))
|
c3c9e0d5874acdf3a1b648a4009d05213469ce11
| 155,582
|
def get_dict_diff(d1, d2):
"""Get added/removed/changed keys between two dicts.
Each key in the return value is a list, which is the namespaced key that
was affected.
Returns:
3-tuple:
- list of added keys;
- list of removed key;
- list of changed keys.
"""
def _diff(d1_, d2_, namespace):
added = []
removed = []
changed = []
for k1, v1 in d1_.items():
if k1 not in d2_:
removed.append(namespace + [k1])
else:
v2 = d2_[k1]
if v2 != v1:
if isinstance(v1, dict) and isinstance(v2, dict):
namespace_ = namespace + [k1]
added_, removed_, changed_ = _diff(v1, v2, namespace_)
added.extend(added_)
removed.extend(removed_)
changed.extend(changed_)
else:
changed.append(namespace + [k1])
for k2 in d2_.keys():
if k2 not in d1_:
added.append(namespace + [k2])
return added, removed, changed
return _diff(d1, d2, [])
|
f111be20eda2271d43272fd9958edfcc43e15546
| 457,762
|
def long_repeat(line: str) -> int:
"""
length the longest substring that consists of the same char
"""
if len(line) == 0:
return 0
if len(set(list(line))) == 1:
return len(line)
ans = 1
count = 1
for i in range(len(line)-1):
if line[i] == line[i+1]:
count += 1
else:
if count > ans:
ans = count
count = 1
return ans
|
a7a3bcf3025fc1aa01832b2c441332a915239c87
| 265,858
|
import re
def _get_metadata_from_filename_by_regex(filename, metadata_regexp):
"""
Tries to ried the metadata from the filename based on the given re.
This requires to use symbolic group names in the pattern.
The part to read the metadata from the filename based on a regular
expression is taken from Pelican - pelican/readers.py
"""
match = re.match(metadata_regexp, filename)
meta = {}
if match:
# .items() for py3k compat.
for key, value in match.groupdict().items():
meta[key.lower()] = value # metadata must be lowercase
return meta
|
5189f9adcee3f58d890356b7afce22098e28813a
| 183,607
|
def create_search_group(recordset = None, field = None, values = None,
criteria_operator = "EqualTo"):
"""
Create a Coalesce search group/filter object (the value of "group" in a
`Coalesce search request object
<https://github.com/InCadence/coalesce/wiki/REST-API#search-query-data-format>`_)
that combines all entered values for a given field, either including or
excluding (depending on the value of "criteria_operator") all records
matching any of the supplied values.
:param recordset: the (:class:`str`) recordset of the field to be
search on
:param field: the (:class:`str`) field to be searched on
:param values: a :class:`list` or list-like of values to search for.
The values themselves should be of simple, JSON-serializable types
(e.g., strings, numbers, Boolean).
:param criteria_operator: "EqualTo", if the search is to *include*
a record matching *any* element of "values", or "NotEqualTo", if
the search is to *exclude all* records matching *any* element of
"values".
:returns: a Coalesce search group as a :class:`dict`, or ``None`` if
"values" is empty
"""
# Check for valid input for "recordset" and "field".
if not isinstance(recordset, str):
raise TypeError("Please supply a recordset as a string.")
if not isinstance(field, str):
raise TypeError("Please supply a field to search on, as a string.")
# Check for valid input for "values". This includes a check to make sure
# we got an iterable of values, not just a single one.
values_error_msg = "Please supply a list or list-like of values to " + \
"search for."
if not values:
raise TypeError(values_error_msg)
elif isinstance(values, str):
raise TypeError(values_error_msg)
else:
try:
values[0]
except:
raise TypeError(values_error_msg)
# Check for valid inpurt for "criteria_operator".
if criteria_operator == "EqualTo":
group_operator = "OR"
elif criteria_operator == "NotEqualTo":
group_operator = "AND"
else:
raise ValueError('The value of "criteria_operator" must be either ' +
'"EqualTo" or "NotEqualTo".')
# Create the criteria list.
criteria = []
for value in values:
criteria_set = {"recordset": recordset, "field": field,
"operator": criteria_operator, "value": value,
"matchCase": False}
criteria.append(criteria_set)
# Create the search group and return it, or return None if no values
# were supplied.
if len(criteria) > 0:
group = {"operator": group_operator, "criteria": criteria}
else:
group = None
return group
|
b55e37f2df255b19e8e12566031cf7f784ff094b
| 629,414
|
def interpolate_line(x1, y1, x2, y2):
"""
This functions accepts two points (passed in as four arguments)
and returns the function of the line which passes through the points.
Args:
x1 (float): x-value of point 1
y1 (float): y-value of point 1
x2 (float): x-value of point 2
y2 (float): y-value of point 2
Returns:
callable: the function of the line
"""
if x1 == x2:
raise ValueError("x1 and x2 must be different values")
def f(x):
slope = (y2 - y1) / (x2 - x1)
return slope * (x - x1) + y1
return f
|
71b26e50fb21f22333df7b20ddacf7bc376789cc
| 33,222
|
def _get_date_middle(date_a, date_b, position=0.5):
"""Linearly interpolates two given timestamps."""
return date_a + (date_b - date_a) * position
|
08d2d3958b38357446d8dd456f34bd5a795fffe1
| 527,274
|
def _models_delete_all_function_name(model):
"""Returns the name of the function to delete all models from the database"""
return '{}_delete_all'.format(model.get_table_name())
|
cae69394299f7dd79e95253740481c0455b4facd
| 193,820
|
import re
def format_docstring(docstring):
"""
Strips whitespace from docstrings (both on the ends, and in the middle, replacing
all sequential occurrences of whitespace with a single space).
"""
if not docstring:
return ''
return re.sub(r'\s+', ' ', docstring).strip()
|
0dfb6e144ce543805344788a733d413f50363ca6
| 180,468
|
def prettyprint_file_size(size_b: int) -> str:
"""
Format a filesize in terms of bytes, KB, MB, GB, whatever is most appropriate.
:param size_b: int size in bytes
:return: string
"""
if size_b < 1024:
# bytes
ret = "%d B" % size_b
elif size_b < 1024*1024:
# kilobytes
s = size_b / 1024
ret = "{:.2f} KB".format(s)
elif size_b < 1024*1024*1024:
# megabytes
s = size_b / (1024*1024)
ret = "{:.2f} MB".format(s)
else:
# gigabytes
s = size_b / (1024*1024*1024)
ret = "{:.2f} GB".format(s)
return ret
|
0b1fb5760dc260a2deda6b79eb21ed72af20bcee
| 398,754
|
def quick_deepcopy(obj):
"""Deep copy an object consisting of dicts, lists, and primitives.
This is faster than Python's `copy.deepcopy` because it doesn't
do bookkeeping to avoid duplicating objects in a cyclic graph.
This is intended to work fine for data deserialized from JSON,
but won't work for everything.
"""
if isinstance(obj, dict):
obj = {k: quick_deepcopy(v) for k, v in obj.items()}
elif isinstance(obj, list):
obj = [quick_deepcopy(v) for v in obj]
return obj
|
1a18f9b6ee1e25b2ccab718685b31ab0fbbc6682
| 343,140
|
import torch
def D_power_bias_X(graph, X, power, coeff, bias):
"""Y = (coeff*D + bias*I)^{power} X"""
degs = graph.ndata["deg"]
degs = coeff * degs + bias
norm = torch.pow(degs, power)
Y = X * norm.view(X.size(0), 1)
return Y
|
bf1df377d672c7010be012fe126f033c31d63b67
| 286,410
|
from datetime import datetime
def diff_datewise(left, right, left_parse=None, right_parse=None):
"""
Parameters
left : a list of datetime strings or objects
right : a list of datetime strings or objects
left_parse : if left contains datetimes, None; else a strptime format
right_parse : if right contains datetimes, None; else a strptime format
Returns
A tuple of two sets:
[0] : the datetime objects in left but not right
[1] : the datetime objects in right but not left
"""
if left_parse:
left_set = set([
datetime.strptime(l.strip(), left_parse)
for l in left if len(l.strip())
])
else:
left_set = set(left)
if right_parse:
right_set = set([
datetime.strptime(r.strip(), right_parse)
for r in right if len(r.strip())
])
else:
right_set = set(right)
return (left_set - right_set, right_set - left_set)
|
fc9f4e07be98969729c87cfe649dee8f0a621485
| 387,888
|
def getTitlePerks() -> list:
"""
Get the list of perks to look for in the job description
:return: list - list of perks
"""
perkList = ["Why you'll like working here", 'Benefits',
"The top five reasons our team members joined us (according to them)",
"What’s in it for you?", "Some benefits", "Here’s what you’ll get if you join", "What you get",
"What We Offer", "benefits and perks",
"Why should I choose", "What we offer", "We offer", "Our Perks", "What You’ll Get", "Perks",
"What we offe"]
return perkList
|
6dae31ce2ed55fab41ad495067ff0cdca4372bb0
| 628,237
|
from typing import List
def count_failed_students(student_scores: List[int]) -> int:
"""
:param student_scores: list of integer student scores.
:return: integer count of student scores at or below 40.
"""
return len(list(filter(lambda x: x <= 40, student_scores)))
|
c3fb0cc4fecb946a1850465f342e3530012826c1
| 629,675
|
from typing import Union
import json
def load_json_from_file(path: str) -> Union[dict, list, str, int]:
"""Load JSON from specified path"""
with open(path, 'r', encoding='utf-8') as infile:
return json.load(infile)
|
c8ba755c62ea4ab6fe74b4571034967ce610afdf
| 30,626
|
import glob
def find_file(filename):
"""
This helper function checks whether the file exists or not
"""
file_list = list(glob.glob("*.txt"))
if filename in file_list:
return True
else:
return False
|
42895e66e258ba960c890f871be8c261aec02852
| 705,802
|
import copy
def rescale_with_ratios(info, width_ratio, height_ratio):
"""
Rescale heatmaps sides given its info and ratios.
:param info: dict, heatmaps info
:param width_ratio: float, ratio by which width will be multiplied
:param height_ratio: float, ratio by which height will be multiplied
:return: dict, heatmaps info rescaled (copy)
"""
info = copy.deepcopy(info)
if width_ratio == 1 and height_ratio == 1:
return info
info['img_width'] = round(width_ratio * info['img_width'])
info['img_height'] = round(height_ratio * info['img_height'])
for cwh in info['cwh_list']:
cwh['width'] = round(width_ratio * cwh['width'])
cwh['height'] = round(height_ratio * cwh['height'])
cwh['center'] = (cwh['center'][0] * width_ratio, cwh['center'][1] * height_ratio)
return info
|
f9b00f7cdadf1fc819d85d593a5edb8aa16ec40e
| 473,025
|
def denormalize(data, norm_params, method='zscore'):
"""
Reverse normalization time series
:param data: normalized time series
:param norm_params: tuple with params mean, std, max, min
:param method: zscore or minmax
:return: time series in original scale
"""
assert method in ['zscore', 'minmax', None]
if method == 'zscore':
return (data * norm_params['std']) + norm_params['mean']
elif method == 'minmax':
return (data * (norm_params['max']) - (norm_params['min']) + norm_params['max'])
elif method is None:
return data
|
6ade12499b2f89639b128585290d676dfef8dee7
| 550,894
|
import random
def random_tuple(t1_max, t2_max):
"""Return a random tuple between (0, 0) and (t1_max, t2_max)."""
return (int(random.random()*t1_max), int(random.random()*t2_max))
|
526a00850e2b3c0e91b28f08b30eb1ec908e8b4f
| 347,249
|
def mafIsEmpty(maf, headers):
""" take a path to a maf file and a list of possible header strings.
an "empty" maf in this context will contain only a header line.
"""
f = open(maf)
s = f.read()
for h in headers:
if s == h:
return True
return False
|
c793c9f167013ae6876a514108ce51f7047c3231
| 245,613
|
async def unlock(command, fps):
"""Unlocks the FPS."""
result = await fps.unlock()
if result:
command.info(locked=False)
return command.finish(text="FPS unlocked")
else:
return command.fail(error="failed to unlock FPS")
|
5021268813eae46afc19eb5632b24e6cbff71479
| 268,997
|
def get_ta(tr, n_slices):
""" Get slice timing. """
return tr - tr/float(n_slices)
|
c1cba31153cd44ac6b72235b876da352c37eb7e2
| 687,175
|
def get_expected_number_of_faces(off_file):
"""
Finds the expected number of faces in an OFF file. Used to check this matches the number of items in a pixel
mapping list.
:param off_file: The OFF file.
:return: The number of faces in the OFF file.
"""
for line in off_file.split("\n")[1:]:
if line[0] != "#":
return int(line.split()[1])
|
8486fa165f43d924c6dd17b2670d75f8091256d1
| 49,323
|
def length_of_last_word(s):
"""
Returns the length of last word in the given string
:param s: given string
:type s: str
:return: the length of last word
:rtype: int
"""
return len(s.strip().split(' ')[-1])
|
d9c0a998a7f12f5e0bfa7aef207e822010e46061
| 158,148
|
import yaml
def _read_yaml(fpath, loader=yaml.Loader):
"""
Returns contents of YAML file as parsed by `yaml.load`.
:param fpath: file path
:param loader: Loader class.
:return: contents of YAML file
"""
with open(fpath, 'r') as reader:
contents = yaml.load(reader, Loader=loader)
return contents
|
dbd4417272e8d57613772d52da890c5f9e7b666b
| 281,484
|
def find_empty_cells(gr):
"""
Collects all the (i,j) free couples of the grid
:param gr: 2D list
:return: list
"""
l = list()
for i in range(0,9):
for j in range(0,9):
if(gr[i][j] == 0):
l.append([i, j])
return l
|
32a20594785223f981774eef26a259e131e1f7ce
| 207,009
|
def middle(a):
"""Returns a (list) without the first and last element"""
return a[1:-1]
|
112854e009afaf6080363f3f1b3df944b4739ede
| 20,922
|
import inspect
def get_fields_and_properties(cls):
"""
Return all fields and @property methods for a model.
"""
fields = [field.name for field in cls._meta.get_fields(include_parents=False)]
properties = []
try:
properties = [
method[0]
for method in inspect.getmembers(cls, lambda o: isinstance(o, property))
]
except BaseException:
properties = []
return fields + properties
|
d7f4bdb52194181a97bdb28805b4ab73403b618d
| 487,787
|
def split_verses_refs(ref, total=0):
""" If there are references to multiple verses, split them
"Romans 8:1-3" -> ["Romans 8:1", "Romans 8:2", "Romans 8:3"]
>>> split_verses_refs("Romans 8:1-3")
["Romans 8:1", "Romans 8:2", "Romans 8:3"]
>>> split_verses_refs("Book 2:4-7")
["Book 2:4", "Book 2:5", "Book 2:6", "Book 2:7"]
>>> split_verses_refs("Book 3", 5)
["Book 3:1", "Book 3:2", "Book 3:3", "Book 3:4", "Book 3:5"]
"""
start_verse = 1
if ":" in ref:
split_refs = ref.split(":")
book_and_chapter = split_refs[0]
verse_refs = split_refs[1].split("–")
start_verse = int(verse_refs[0])
end_verse = int(verse_refs[1])
else:
book_and_chapter = ref
end_verse = total
full_refs = []
for i in range(start_verse, end_verse + 1):
full_refs.append(f"{book_and_chapter}:{i}")
return full_refs
|
a6a64662fc8d2a46f37ab7e42c2c9ec77fbda162
| 424,177
|
def get_syntax(command, fn, command_prefix):
"""
Read back the syntax argument provided in a command's wrapper. Return it
in a printable format.
:param command: Command being called.
:param fn: Function which the command is wrapped around.
:param command_prefix: Prefix used for commands in chat.
:return: String. Syntax details of the target command.
"""
return "Syntax: {}{} {!s}".format(
command_prefix,
command,
fn.syntax)
|
2780b883bc067805744baa9163ba3ad7786490c3
| 72,534
|
def list_range(x):
"""
Returns the range of a list.
"""
return max(x) - min(x)
|
43afbd92537f6d1e044c6ecafabcb82b0a823056
| 120,713
|
def dict_factory(cursor, row):
"""Convert database row objects to a dictionary.
This is useful for building dictionaries which
are then used to render a template.
Note that this would be inefficient for large queries.
"""
output = {}
for idx, col in enumerate(cursor.description):
output[col[0]] = row[idx]
return output
|
6cbacf38db778f3aeb559c84860354dfa3296344
| 580,907
|
def get_seq_diff(seq_tuple):
"""Returns the difference between two TCP sequence numbers."""
(seq_min, seq_max) = seq_tuple
if None in (seq_min, seq_max) or 0 in (seq_min, seq_max):
return None
# Seq wrap-around
diff = seq_max - seq_min
if diff < 0:
diff += 2 ** 32
return diff
|
cdb18eedcf563e245a1f42cdd4cfb7131b073acf
| 606,478
|
import hashlib
def valid_proof(block_string, new_proof):
"""
Validates the Proof: Does hash(block_string, proof) contain 6
leading zeroes? Return true if the proof is valid
:param block_string: <string> The stringified block to use to
check in combination with `proof`
:param proof: <int?> The value that when combined with the
stringified previous block results in a hash that has the
correct number of leading zeroes.
:return: True if the resulting hash is a valid proof, False otherwise
"""
guess = f'{block_string}{new_proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:3] == "000"
|
6c1f2c79d114ebca1da75324be2db0f7d330b253
| 491,708
|
from pathlib import Path
def package_path(package):
"""Returns the location of the passed package.
Arguments:
package: A package object.
Returns:
An absolute Path object pointing to the package's location.
"""
return Path(package.__path__[0])
|
81df072ce85f9f39b39fe5c15dc28ca267c1f892
| 155,913
|
def unpack_question_dimid(dimension_id):
"""Decompose the dimension id into unit, lesson and question id.
Returns:
A tuple unit_id, lesson_id, question_id.
unit_id and question_id are strings. lesson_id can be a string or
None.
"""
unit_id, lesson_id, question_id = dimension_id.split(':')
if lesson_id == 'None':
lesson_id = None
return unit_id, lesson_id, question_id
|
4133ec1ce5cd986b64c9af096931b9f2bf8cb123
| 18,046
|
def time_diff_in_ms(start, end):
""" Returns the time difference end-start in ms.
"""
return (end-start)*1000
|
fa1ebb92da81eafb117d357866bafbbb70f18f0f
| 474,574
|
from pathlib import Path
def fixture_fixtures_dir() -> Path:
"""Return the path to the fixtures directory"""
return Path("tests/fixtures/")
|
509dab97a066fb286bdd8311fadf3b7f0d62b7f2
| 124,610
|
def rstrip_lines(text: str) -> str:
"""Remove trailing whitespace from each line in the text."""
return '\n'.join(line.rstrip() for line in text.split('\n'))
|
ddd3bb5fca65c921f33e67d4c3d7e03b26227cc7
| 369,727
|
import re
def validate_maintenance_window(window):
"""Validate PreferredMaintenanceWindow for DBInstance"""
days = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")
day_re = r'[A-Z]{1}[a-z]{2}'
hour = r'[01]?[0-9]|2[0-3]'
minute = r'[0-5][0-9]'
r = ("(?P<start_day>%s):(?P<start_hour>%s):(?P<start_minute>%s)-"
"(?P<end_day>%s):(?P<end_hour>%s):(?P<end_minute>%s)") % (day_re,
hour,
minute,
day_re,
hour,
minute)
range_regex = re.compile(r)
m = range_regex.match(window)
if not m:
raise ValueError("DBInstance PreferredMaintenanceWindow must be in "
"the format: ddd:hh24:mi-ddd:hh24:mi")
if m.group('start_day') not in days or m.group('end_day') not in days:
raise ValueError("DBInstance PreferredMaintenanceWindow day part of "
"ranges must be one of: %s" % ", ".join(days))
start_ts = (days.index(m.group('start_day')) * 24 * 60) + \
(int(m.group('start_hour')) * 60) + int(m.group('start_minute'))
end_ts = (days.index(m.group('end_day')) * 24 * 60) + \
(int(m.group('end_hour')) * 60) + int(m.group('end_minute'))
if abs(end_ts - start_ts) < 30:
raise ValueError("DBInstance PreferredMaintenanceWindow must be at "
"least 30 minutes long.")
return window
|
ced6140fa250e157aaa532f7c531abf64e41768e
| 637,860
|
def load_tag_dict(filename):
"""
Load a tag dictionary from a file containing one tag
per line.
"""
tag_dict = {}
with open(filename, 'rb') as f:
code = 0
for tag in f:
tag = tag.decode('utf-8').strip()
if tag:
tag_dict[tag] = code
code += 1
return tag_dict
|
d80ebe3eb904f0d86a6661626d91da6f6cce3ae2
| 201,759
|
def flatten_response(response):
"""Helper function to extract only the filenames from the list directory command"""
return [item['filename'] for item in response]
|
2b1463064efd12d635cd6e7ba80374a4d2f36ff4
| 481,102
|
def extractParams(param_str):
"""
Extracts parameters into a dict from string.
Input:
1. param_str (str) string of parameters in format “k1:10;k2:20”
Returns:
2. mydict (dict) with format {k1: 10, k2:20}
"""
#default parameters?
if ',' in param_str:
raise NameError(' , is an invalid character for parameter string.')
if ':' not in param_str:
raise NameError('No parameters found.')
mydict = {}
mystr = param_str.split(';')
for s in mystr:
k,v = s.split(':')
mydict[k.strip()] = v.strip()
return mydict
|
0af01c6b89f1da1428a327ad02c628e916e6074c
| 512,377
|
import base64
def _uri_to_bytes(uri, resolver):
"""
Take a URI string and load it as a
a filename or as base64.
Parameters
--------------
uri : string
Usually a filename or something like:
"data:object/stuff,base64,AABA112A..."
resolver : trimesh.visual.Resolver
A resolver to load referenced assets
Returns
---------------
data : bytes
Loaded data from URI
"""
# see if the URI has base64 data
index = uri.find('base64,')
if index < 0:
# string didn't contain the base64 header
# so return the result from the resolver
return resolver[uri]
# we have a base64 header so strip off
# leading index and then decode into bytes
return base64.b64decode(uri[index + 7:])
|
ec7308430bc5c072709a07e5ca894217db651d11
| 183,019
|
def _get_chunk_info(tsuid, index, md_list, chunk_size):
"""
Get the chunk <index> information for a TSUID split into chunks of <chunk_size> points each
:param tsuid: tsuid to get points from
:type tsuid: str
:param index: the index of the chunk to get
:type index: int
:param md_list: List of metadata
:type md_list: dict
:param chunk_size: the size of the chunk
:type chunk_size: int
:return: information about the chunk (chunk_index, chunk_start_window, chunk_end_window)
:rtype: list
"""
# Number of points
nb_points = int(md_list[tsuid]["qual_nb_points"])
# Timeseries start date
start_date = int(md_list[tsuid]["ikats_start_date"])
# Timeseries end date
end_date = int(md_list[tsuid]["ikats_end_date"])
# Extrapolation of the number of points
delta = int((end_date - start_date) * chunk_size / nb_points)
# Chunk start date
chunk_start = start_date + index * delta
# Chunk end date
chunk_end = min(end_date, chunk_start + delta)
return [index, chunk_start, chunk_end]
|
8c3673a0f90bd137872e8fb36e3344dd0ae09600
| 165,751
|
def padded_list(candidate, reference_list):
"""
Method creates a `list` where the first element is `candidate` and the rest
`len(reference_list) - 1` elements are empty strings (''s).
This is operation is useful when trying to construct tables where empty
strings can represent empty cells.
:param candidate: a list of strings
:param reference_list: another list such that `candidate` should match it in
terms of number of elements
:return: [text] + [''] * (len(reference_list) - 1)]
"""
if not reference_list:
raise AttributeError('The reference_list argument must be non-empty.'.format(reference_list))
pad_length = len(reference_list) - len(candidate)
return candidate + [''] * pad_length
|
ca4a244be02d6ce981c4f97ae87dbb897c6d89fa
| 533,390
|
def flatten(l):
"""Flatten list of lists."""
if all(isinstance(x, list) for x in l):
return [item for sublist in l for item in sublist]
else:
return l
|
c340ec1db11bd07a0de71556d384131d694b602f
| 411,019
|
def get_requirements(req):
"""Load list of dependencies."""
install_requires = []
with open(req) as f:
for line in f:
if not line.startswith("#"):
install_requires.append(line.strip())
return install_requires
|
c3b97188cb0808018f301820f9a66d8e283668c7
| 453,683
|
def modify_idx(*args, idx, dim):
"""
Make an index that slices a specified dimension while keeping the slices
for other dimensions the same.
Parameters
----------
*args : tuple of int or None
constructor arguments for the slice object at target axis
idx : tuple of slice
tuple of slices in the original region of interest
dim : int
target axis
Returns
-------
new_idx : tuple of slice
New tuple of slices with dimension dim substituted by slice(*args)
Can be used to index np.ndarray and torch.Tensor
"""
new_idx = list(idx)
new_idx[dim] = slice(*args)
return tuple(new_idx)
|
09d0553b6281d1c7e5103b14dfde78003f92a554
| 42,523
|
from pathlib import Path
import zipfile
import gzip
import shutil
def unzip_file(element_path):
"""Checking if the path points to an existing folder or to its .zip format; if only the .zip format exists,
it unzips the folder.
Parameters
----------
element_path: PosixPath
Absolute path of the folder or file of interest.
Returns
-------
Absolute path of the (unzipped) folder of interest.
"""
if element_path.exists():
return element_path
elif Path(str(element_path) + ".zip").exists():
zip_folder = zipfile.ZipFile(str(element_path) + ".zip", 'r')
zip_folder.extractall(element_path.resolve().parent)
zip_folder.close()
return element_path
elif Path(str(element_path) + ".gz").exists():
with gzip.open(str(element_path) + ".gz", 'rb') as f_in:
with open(str(element_path), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return element_path
else:
raise FileNotFoundError(f"{element_path} does not exist")
|
646133d74f84fb496c3eae51b5516d2d50535b48
| 598,762
|
def global_pct_id( segments ):
"""
Calculated like this:
10bp @ 50% id = 5 matching residues, 10 total residues
10bp @ 80% id = 8 matching residues, 10 total residues
13 matching residues, 20 total residues
---------------------------------------
13 / 20 * 100 = 65%
"""
match_length = 0
identical_residues = 0
for segment in segments:
segment_length = abs(segment['qry_start'] - segment['qry_end']) + 1
match_length += segment_length
matched_residues = segment_length * (segment['pct_id'] / 100)
identical_residues += matched_residues
return (identical_residues / match_length) * 100
|
29dfaea0faa21fbf29d58d5ce7dc67590010a56f
| 493,323
|
def unpack_quaternion_msg(msg, stamped=False):
""" Get coordinates from a Quaternion(Stamped) message. """
if stamped:
q = msg.quaternion
else:
q = msg
return q.w, q.x, q.y, q.z
|
8252c24cbabbf32ab096811b4ef93d647d885ad5
| 209,484
|
def make_list(obj):
""" Turn an object into a list if it isn't already """
if isinstance(obj, list):
return obj
else:
return list(obj)
|
3819e1a789ff7d52beda0508cf25d1468c0d83a0
| 235,994
|
import torch
def compensate_masking(X, mask):
"""
Compensate feature vectors after masking values, in a way that the matrix product W @ X would not be affected on average.
If p is the proportion of unmasked (active) elements, X' = X / p = X * feat_dim/num_active
Args:
X: (batch_size, seq_length, feat_dim) torch tensor
mask: (batch_size, seq_length, feat_dim) torch tensor: 0s means mask and predict, 1s: unaffected (active) input
Returns:
(batch_size, seq_length, feat_dim) compensated features
"""
# number of unmasked elements of feature vector for each time step
num_active = torch.sum(mask, dim=-1).unsqueeze(-1) # (batch_size, seq_length, 1)
# to avoid division by 0, set the minimum to 1
num_active = torch.max(num_active, torch.ones(num_active.shape, dtype=torch.int16)) # (batch_size, seq_length, 1)
return X.shape[-1] * X / num_active
|
b5e0c4c6908acaac516d2595ba233e6d69b48f03
| 164,599
|
def estimate_infectious_rate_constant(events,
t_start,
t_end,
kernel_integral,
count_events=None):
"""
Returns estimation of infectious rate for given events on defined interval.
The infectious is expected to be constant on given interval.
:param events: array of event tuples containing (event_time, follower_cnt)
:param t_start: time interval start
:param t_end: time interval end
:param kernel_integral: integral function of kernel function
:param count_events: count of observed events in interval (used for time window approach)
:return: estimated value for infectious rate
"""
kernel_int = [
fol_cnt * kernel_integral(t_start - event_time, t_end - event_time)
for event_time, fol_cnt in events
]
if count_events is not None:
return count_events / sum(kernel_int)
else:
return (len(events)) / sum(kernel_int)
|
d1a83bd79988de9467dbc6c952aec35e3d496cd2
| 683,855
|
def proper(s):
"""Strips then capitalizes each word in a string."""
s = s.replace("-", " ").title()
s = s.replace("_", " ").title()
return s
|
78fa0f28bfdb77fdc572eb851f4021daabdc1a2b
| 627,906
|
def crop(img, w, h):
"""
Extract part of size w x h from center of the given image
"""
w_, h_ = img.shape[0:2]
w_off, h_off = (w_ - w) // 2, (h_ - h) // 2
assert w_ >= w and h_ >= h, "cannot crop from {}x{} to {}x{}".format(w_, h_, w, h)
return img[w_off:w_off + w, h_off:h_off + h]
|
8b3eb2c4057eaec7904e58e092bd9d0a7a736552
| 116,682
|
from typing import List
from typing import Callable
def make_random_function(returns: List[float]) -> Callable[[], float]:
"""
Return a function that returns the input values in sequence.
Used to simulate a random function for tests.
Parameters
----------
returns : list
The values to return in sequence.
"""
returns_copy = returns.copy()
def next_random() -> float:
nonlocal returns_copy
if len(returns_copy) == 0:
raise IndexError("Needed more random numbers than were provided")
value_to_return = returns_copy[0]
returns_copy = returns_copy[1:]
return value_to_return
return next_random
|
c10757033815d50b66b2136b35198e389e84236e
| 139,716
|
def unsubscribe(dest):
"""STOMP unsubscribe command.
dest:
This is the channel we wish to subscribe to
Tell the server we no longer wish to receive any
further messages for the given subscription.
"""
return "UNSUBSCRIBE\ndestination:%s\n\n\x00\n" % dest
|
b148ac089a635c82ec702971c5a0da5dfaa09541
| 525,038
|
def agg_var_num(dataframe, group_var, dict_agg, prefix):
"""
Aggregates the numeric values in a dataframe.
This can be used to create features for each instance of the grouping variable.
Parameters
--------
dataframe (dataframe): the dataframe to calculate the statistics on
group_var (string): the variable by which to group df
df_name (string): the variable used to rename the columns
Return
--------
agg (dataframe):
a dataframe with the statistics aggregated for
all numeric columns. Each instance of the grouping variable will have
some statistics (mean, min, max, sum ...) calculated.
The columns are also renamed to keep track of features created.
"""
# Remove id variables other than grouping variable
for col in dataframe:
if col != group_var and 'SK_ID' in col:
dataframe = dataframe.drop(columns=col)
group_ids = dataframe[group_var]
numeric_df = dataframe.select_dtypes('number')
numeric_df[group_var] = group_ids
# Group by the specified variable and calculate the statistics
agg = numeric_df.groupby(group_var).agg(dict_agg)
# Ajout suffix mean, sum...
agg.columns = ['_'.join(tup).strip().upper()
for tup in agg.columns.values]
# Ajout du prefix bureau_balance pour avoir une idée du fichier
agg.columns = [prefix + '_' + col
if col != group_var else col
for col in agg.columns]
agg.reset_index(inplace=True)
return agg
|
9f5d92d932ce966160825a5c9a9dfd678a162645
| 82,972
|
def splitdate(yyyymmddhh):
"""
yyyy,mm,dd,hh = splitdate(yyyymmddhh)
give an date string (yyyymmddhh) return integers yyyy,mm,dd,hh.
"""
yyyy = int(yyyymmddhh[0:4])
mm = int(yyyymmddhh[4:6])
dd = int(yyyymmddhh[6:8])
hh = int(yyyymmddhh[8:10])
return yyyy,mm,dd,hh
|
30b6394bf679d22a6ee33dab94f5943ba8f38d77
| 600,670
|
def normalize_comment(comment: str) -> str:
"""
Normalize a comment.
It does the following:
* uncheck checked boxes
"""
fixed = comment.replace("[x]", "[ ]")
return fixed
|
f55601b4b72ebc979490943cd388f225837710c8
| 378,516
|
def result(score):
"""
Returns a string indicating the result of a game
https://www.gokgs.com/json/dataTypes.html#score
"""
if type(score) == float:
if score > 0:
out = "Black + " + str(score)
else:
out = "White + " + str(-score)
else:
out = score
return out
|
d3e06bbfddd342a0a26dadf88447e5dbf345de30
| 268,831
|
def PredicateSplit(func, iterable):
"""Splits an iterable into two groups based on a predicate return value.
Arguments:
func: A functor that takes an item as its argument and returns a boolean
value indicating which group the item belongs.
iterable: The collection to split.
Returns:
A tuple containing two lists, the first containing items that func()
returned True for, and the second containing items that func() returned
False for.
"""
trues, falses = [], []
for x in iterable:
(trues if func(x) else falses).append(x)
return trues, falses
|
a2d631baa4b0ba140d74d0b1ba59d010e663ae45
| 579,386
|
def str_fmt_width(string,pad,width):
"""
util function for breaking a string on spaces
while trying to keep lines less than width.
pad, when non zero inserts that number of spaces
on the left side of the formated string.
"""
base = "\n"
if pad > 1:
base = "\n" +" "*pad +":"
words = string.split()
out = ""
temp = base
for word in words:
test = temp + " " + word
if len(test)>width-pad:
out += test
temp = base
else:
temp = test
if temp != base:
out += temp
return out
|
b1eef845ba58bfee8a75e4a685428dbaf9edd2cd
| 329,165
|
import _asyncio
async def timer(seconds=1.0):
"""
Wait a number of seconds. Used with the await keyword like this:
@play.repeat_forever
async def do():
await play.timer(seconds=2)
print('hi')
"""
await _asyncio.sleep(seconds)
return True
|
e6155d7f026d34e4142c4e50f5224b5a3956cb91
| 431,090
|
def _collect_cluster_stats(lane):
"""Retrieve total counts on cluster statistics.
"""
stats = {"Clusters" : 0, "Clusters passed": 0}
for tile in lane.find("Read").findall("Tile"):
stats["Clusters"] += int(tile.find("clusterCountRaw").text)
stats["Clusters passed"] += int(tile.find("clusterCountPF").text)
return stats
|
b3d6906ede391341e291d46f5c168155db2234d2
| 516,241
|
def miniAOD_customizeIsolatedTracksFastSim(process):
"""Switch off dE/dx hit info on fast sim, as it's not available"""
process.isolatedTracks.saveDeDxHitInfo = False
return process
|
b552f353ef2068c0d5d076ae288865382580cf34
| 83,175
|
def get_alignment_pdb_chain(alignment):
"""
Returns a string of the chain id, e.g. 'A', 'B', etc.
:param alignment:
:return:
"""
pdb_chain_id = alignment.hit_def.encode('ascii').split()[0]
chain = pdb_chain_id.split('_')[1].upper()
return chain
|
d322ea53606e8d8e5dd61bd50bf0ad44fbb24182
| 661,156
|
import torch
def erase(img, i, j, h, w, v, inplace=False):
""" Erase the input Tensor Image with given value.
Args:
img (Tensor Image): Tensor image of size (C, H, W) to be erased
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the erased region.
w (int): Width of the erased region.
v: Erasing value.
inplace(bool, optional): For in-place operations. By default is set False.
Returns:
Tensor Image: Erased image.
"""
if not isinstance(img, torch.Tensor):
raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))
if not inplace:
img = img.clone()
img[:, i:i + h, j:j + w] = v
return img
|
63a15369db5e1c4186293a64f408105cbb445127
| 471,938
|
def GetLapicSpuriousVectorFields(reg_val):
""" Helper function for DoLapicDump that prints the fields of the
spurious vector register.
Params:
reg_val: int - the value of the spurious vector registre to print
Returns:
string showing the fields
"""
vector = reg_val & 0xff
enabled = (reg_val & 0x100) >> 8
return "[VEC={:3d} ENABLED={:d}]".format(vector, enabled)
|
ae6cfe89a2d0363129d667b478ecab8bb337b9c5
| 339,786
|
def _doktocsr(dok):
"""Converts a sparse matrix to Compressed Sparse Row (CSR) format.
Parameters
==========
A : contains non-zero elements sorted by key (row, column)
JA : JA[i] is the column corresponding to A[i]
IA : IA[i] contains the index in A for the first non-zero element
of row[i]. Thus IA[i+1] - IA[i] gives number of non-zero
elements row[i]. The length of IA is always 1 more than the
number of rows in the matrix.
Examples
========
>>> from sympy.matrices.sparsetools import _doktocsr
>>> from sympy import SparseMatrix, diag
>>> m = SparseMatrix(diag(1, 2, 3))
>>> m[2, 0] = -1
>>> _doktocsr(m)
[[1, 2, -1, 3], [0, 1, 0, 2], [0, 1, 2, 4], [3, 3]]
"""
row, JA, A = [list(i) for i in zip(*dok.row_list())]
IA = [0]*((row[0] if row else 0) + 1)
for i, r in enumerate(row):
IA.extend([i]*(r - row[i - 1])) # if i = 0 nothing is extended
IA.extend([len(A)]*(dok.rows - len(IA) + 1))
shape = [dok.rows, dok.cols]
return [A, JA, IA, shape]
|
82733143a51cb9dcb65d0994f8551953d687aff9
| 430,294
|
def _prime_powers(N):
"""
Find the prime powers dividing ``N``.
In other words, if `N = q_1^(e_1)q_2^(e_2)...q_n^(e_n)`, it returns
`[q_1^(e_1),q_2^(e_2),...,q_n^(e_n)]`.
INPUT:
- ``N`` -- an integer
OUTPUT:
- A list of the prime powers dividing N.
EXAMPLES::
sage: sage.combinat.binary_recurrence_sequences._prime_powers(124656)
[3, 16, 49, 53]
sage: sage.combinat.binary_recurrence_sequences._prime_powers(65537)
[65537]
"""
output = sorted([i ** j for i, j in N.factor()])
return output
|
06b840091571e6201b2063b25f40719e9db8747d
| 462,210
|
def d_binary_crossentropy_loss ( y, y_hat, eps=1e-10 ):
"""
Gradient of the cross-entropy loss for predictions, given the
true values.
# Arguments:
y: a numpy array of true binary labels.
y_hat: a numpy array of predicted labels,
as numbers in open interval (0, 1). must have
the same number of entries as y, but not
necessarily identical shape
eps: a small offset to avoid numerical problems
when predictions are very close to 0 or 1
# Returns:
grad: a numpy array of individual cross-entropy
gradient values for each prediction. will be
the same shape as y_hat irrespective of the
shape of y
"""
y = y.reshape(y_hat.shape)
return (y_hat - y)/(y_hat * (1 - y_hat) + eps)
|
b4460a1ff73c8e48d1570de3990182552ea639f8
| 331,310
|
def index_of_closest_element(ascending_list, datum):
"""Return index of the list element whose value is closest to datum."""
old_delta = abs(ascending_list[0] - datum)
for index, element in enumerate(ascending_list[1:]):
delta = abs(element - datum)
if delta > old_delta:
return index
old_delta = delta
return len(ascending_list) - 1
|
d5afed6d2be18475910d773857bc5742f89f913c
| 340,427
|
def discover_device(module, device):
""" Discover AIX devices."""
cfgmgr_cmd = module.get_bin_path('cfgmgr', True)
if device is not None:
device = "-l %s" % device
else:
device = ''
changed = True
msg = ''
if not module.check_mode:
rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device])
changed = True
msg = cfgmgr_out
return changed, msg
|
c0e55e49c32ceb7380be92a242bf1a122e13ffa0
| 140,192
|
def read_file(filepath: str) -> str:
"""
Opens the given file to read all the contents into a single value, replacing unicode errors with a '?'.
"""
file = open(filepath, errors="replace")
content = file.read()
file.close()
return content
|
8f1eab13058db0c8840a394978de80400edeb95b
| 166,536
|
def GCreadlines(fobject, comment='#'):
"""Strip files from comments.
fobject: file object to be read
comment: string indicating comment.
"""
lines = []
for l in fobject.readlines():
# ignore comments and empty lines
l = l.strip()
pos = l.find(comment)
if pos>-1:
l = l[:pos]
if len(l)==0:
continue
lines.append(l)
return lines
|
0947a79273bc4731e86c5711a1c792d91c7531ea
| 611,844
|
import json
def get_rule(arg: str):
"""Fetches `arg` in `./cogs/rules.json`."""
with open("./cogs/rules.json") as f:
config = json.load(f)
ret = config[arg]
return ret
|
9caad76bd3555201d31918e95db023da66992828
| 206,836
|
from typing import List
def parse_input(input_text: str) -> List[int]:
"""Given the contents of an input file, return the list of numbers in it"""
return list(int(word) for word in input_text.split())
|
4b16f4f8d1a3ff087f88f709b8865180d6174d67
| 309,141
|
def update_parser(parser):
"""Parse the arguments from the CLI and update the parser."""
parser.add_argument(
'--prepro_step',
type=str,
default='preprocessing', #'slicing', 'preprocessing'
help='To execute a preprocessing method')
#this is for allfeatures_preprocessing:
parser.add_argument(
'--train_period',
type=int,
nargs='*',
default=[121,323], #[156, 180], [121,143], # 279],
help='Training Period')
parser.add_argument(
'--valid_period',
type=int,
nargs='*',
default=[324,329], #[181,185], [144,147],
help='Validation Period')
parser.add_argument(
'--test_period',
type=int,
nargs='*',
default= [330, 342], # [186,191], [148, 155],
help='Testing Period')
parser.add_argument(
'--prepro_dir',
type=str,
default='chuncks_random_c1mill',
help='Directory with raw data inside data/raw/ and it will be the output directory inside data/processed/')
parser.add_argument(
'--prepro_chunksize',
type=int,
default=500000,
help='Chunk size to put into the h5 file...')
parser.add_argument(
'--prepro_with_index',
type=bool,
default=True,
help='To keep indexes for each record')
parser.add_argument(
'--ref_norm',
type=bool,
default=True,
help='To execute the normalization over the raw inputs')
#to execute slice_table_sets:
parser.add_argument(
'--slice_input_dir',
type=str,
default='chuncks_random_c1mill',
help='Input data directory')
parser.add_argument(
'--slice_output_dir',
type=str,
nargs='*',
default=['chuncks_random_c1mill_train', 'chuncks_random_c1mill_valid', 'chuncks_random_c1mill_test'],
help='Output data directory. Input and output could be the same per group, it is recommendable different directories...')
parser.add_argument(
'--slice_tag',
type=str,
nargs='*',
default=['train', 'valid', 'test'],
help='features group to be extracted')
parser.add_argument(
'--slice_target_name',
type=str,
nargs='*',
default=['c1mill99-01_train', 'c1mill99-01_valid', 'c1mill99-01_test'],
help='file name root inside output directory')
parser.add_argument(
'--slice_chunksize',
type=int,
default=1000,
help='Chunk size to put into the h5 output files...')
parser.add_argument(
'--slice_target_size',
type=int,
default=36000000,
help='Output file size')
parser.add_argument(
'--slice_with_index',
type=bool,
default=False,
help='To keep indexes for each record')
parser.add_argument(
'--slice_index',
type=int,
default=0,
help='index to label each output file')
return parser.parse_known_args()
|
5cd74d2e50801db7ea7d27a37bca3c224a2b039a
| 647,268
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.