content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def moon_illumination(phase: float) -> float:
"""Calculate the percentage of the moon that is illuminated.
Currently this value increases approximately linearly in time from new moon
to full, and then linearly back down until the next new moon.
Args:
phase: float
The phase angle of the Moon, in degrees.
Returns:
illumination: flaot
The percentage of the Moon that is illuminated.
"""
return 100 * (1 - np.abs(phase - 180) / 180)
|
c40a3a6cb4de6da1fd64a188c99892afe3d385d7
| 3,645,700
|
def convex_hull_mask_iou(points_uv, im_shape, gt_hull_mask):
"""Computes masks by calculating a convex hull from points. Creates two masks (if possible),
one for the estimated foreground pixels and one for the estimated background pixels.
Args:
points_uv: (2, N) Points in u, v coordinates
im_shape: image shape [image_height, im_width]
gt_hull_mask: mask created by calculating convex hull
Returns:
best_iou: best mask iou calculated from the calculated hull masks and the ground truth hull
mask
"""
im_height, im_width = im_shape
# Segment the points into background and foreground
if len(set(points_uv[0])) > 1:
thresh = filters.threshold_li(points_uv[0])
pred_seg_1 = points_uv[0] > thresh
pred_seg_2 = points_uv[0] < thresh
segs = [pred_seg_1, pred_seg_2]
else:
# There is only one unique point so a threshold cannot be made
segs = [np.full(points_uv[0].shape, True, dtype=bool)]
mask_list = []
# Loop over both segments since it is uncertain which segment is foreground or background
for seg in segs:
# Obtain the coordinates of the pixels
pred_u = np.int32(points_uv[0][seg])
pred_v = np.int32(points_uv[1][seg])
# Remove duplicate coordinates by forming a set
coords = set(zip(pred_u, pred_v))
# Convex hull calculation requires a numpy array
coords = np.array(list(coords))
# Need at least 3 points to create convex hull
if len(coords) < 3:
continue
# Points must not lie along a single line in order to create convex hull
elif any(np.all(coords == coords[0, :], axis=0)):
continue
else:
hull = ConvexHull(coords)
img = Image.new('L', (im_width, im_height), 0)
vertices = list(zip(coords[hull.vertices, 0], coords[hull.vertices, 1]))
ImageDraw.Draw(img).polygon(vertices, outline=1, fill=1)
mask = np.array(img)
mask_list.append(mask)
best_iou = 0
for mask in mask_list:
iou = evaluation.mask_iou(mask, gt_hull_mask)
if iou > best_iou:
best_iou = iou
return best_iou
|
09cb5cf8f7721a7430ab3825e0e6ddbbb0966be6
| 3,645,701
|
def run(text, base_dir, debug_filename, symbols = set()):
"""Rudimentary resolver for the following preprocessor commands:
// #include <some-file>
(no check for cyclic includes!)
// #ifdef | #if <symbol>
// <contents>
// [ #elif
// <alt-contents> ]*
// [ #else
// <alt-contents> ]
// #endif
"""
out = []
stack = []
lines = text.split('\n')
l_iter = iter(zip(range(1, len(lines)+1),lines))
push_line = None
nline = -1
def error(msg):
raise Exception(msg + ' @ ' + debug_filename + ':' + str(nline))
while True:
try:
nline, line = push_line or next(l_iter)
push_line = None
except StopIteration:
break
match = line_re.match(line)
if match:
skip_branch = False
cmd = match.group(1)
if cmd == 'include':
name = match.group(2).strip('<>"\'')
fpath = os.path.join(base_dir, name)
print 'handling js #include: ' + fpath
with open( fpath, 'rt' ) as inp:
out.append(run(inp.read(), os.path.split(fpath)[1], name, symbols))
elif cmd in ['if', 'ifdef', 'ifndef']:
val = eval_conditional(match.group(2), symbols)
if cmd == 'ifndef':
val = not val
print('eval: ' + cmd + ' ' + match.group(2) + ' as ' + str(val))
skip_branch = not val
stack.append(val)
elif cmd in ['else', 'elif']:
if not stack:
error('syntax error, unexpected ' + cmd)
# has been handled before?
if stack[-1]:
skip_branch = True
elif cmd != 'elif' or eval_conditional(match.group(2), symbols):
stack[-1] = True
else:
skip_branch = True
elif cmd == 'endif':
if not stack:
error('syntax error, unexpected endif')
continue
stack.pop()
else:
error('define/ifdef/endif/else currently ignored')
if skip_branch:
# skip everything up to the next elif/else/endif at the same nesting level
nesting = 1
while True:
try:
nline, line = next(l_iter)
match = line_re.match(line)
if match:
done = False
cmd = match.group(1)
if cmd in ['if', 'ifdef']:
nesting += 1
elif cmd == 'endif':
nesting -= 1
if nesting == 0:
done = True
if cmd in ['else', 'elif'] and nesting == 1:
done = True
if done:
push_line = nline, line
break
except StopIteration:
error('syntax error, unexpected EOF')
return
else:
out.append(line)
return '\n'.join(out)
|
de2e06f7cdf93e80eca05b6fef0dd0cab55175e3
| 3,645,702
|
from typing import Callable
from typing import Awaitable
import inspect
def load_callback(module: ModuleType, event: Event) -> Callable[..., Awaitable[None]]:
"""
Load the callback function from the handler module
"""
callback = getattr(module, "handler")
if not inspect.iscoroutinefunction(callback):
raise TypeError(
f"expected 'coroutine function' for 'handler', got {type(callback).__name__!r}"
)
signature = inspect.signature(callback)
params = dict(signature.parameters)
# Construct the model from the callback for manual events
if isinstance(event, ManualEvent):
expect_returns(signature, None, Response, allow_unannotated=True)
event.model = build_model_from_params(params)
# Ensure the signature is passed the same parameters as the event sends
elif isinstance(event, AutomatedEvent):
expect_returns(signature, None, allow_unannotated=True)
# Get the model parameters
model_signature = inspect.signature(event.input_validator)
model_params = dict(model_signature.parameters)
validate_automated_signature(params, model_params)
return callback
|
e961adaae0c7f4ad5abe228ef677b3b61288d531
| 3,645,703
|
import os
import ast
def read_config_key(fname='', existing_dict=None, delim=None):
"""
Read a configuration key.
"""
# Check file existence
if os.path.isfile(fname) is False:
logger.error("I tried to read key "+fname+" but it does not exist.")
return(existing_dict)
logger.info("Reading: "+fname)
# Expected Format
expected_words = 3
expected_format = "config_type config_name params_as_dict"
# Open File
infile = open(fname, 'r')
# Initialize the dictionary
if existing_dict is None:
out_dict = {}
else:
out_dict = existing_dict
# Loop over the lines
lines_read = 0
while True:
line = infile.readline()
if len(line) == 0:
break
if skip_line(line, expected_words=expected_words, delim=delim, expected_format=expected_format):
continue
this_type, this_value, this_params = parse_one_line(line, delim=delim)
# Check if the type of entry is new
if (this_type in out_dict.keys()) == False:
out_dict[this_type] = {}
# Initialize a configuration on the first entry - configs can have several lines
if (this_value not in out_dict[this_type].keys()):
out_dict[this_type][this_value] = {}
# Parse the parameters as a literal
try:
this_params_dict = ast.literal_eval(this_params)
except:
logger.error("Could not parse parameters as a dictionary. Line is: ")
logger.error(line)
continue
# Now read in parameters. To do this, define templates for
# expected fields and data types for each type of
# configuration. Check to match these.
if this_type == "array_tag":
expected_params = {
'timebin':'0s',
}
if this_type == "interf_config":
expected_params = {
'array_tags':[],
'res_min_arcsec':0.0,
'res_max_arcsec':0.0,
'res_min_pc':0.0,
'res_max_pc':0.0,
'res_step_factor':1.0,
'res_list':[],
'clean_scales_arcsec':[]
}
if this_type == "feather_config":
expected_params = {
'interf_config':'',
'res_min_arcsec':0.0,
'res_max_arcsec':0.0,
'res_step_factor':1.0,
'res_min_pc':0.0,
'res_max_pc':0.0,
'res_list':[]
}
if this_type == "line_product":
expected_params = {
'line_tag':'',
'channel_kms':0.0,
'statwt_edge_kms':50.0,
'fitorder':0,
'combinespw':False,
'lines_to_flag':[],
}
if this_type == "cont_product":
expected_params = {
'freq_ranges_ghz':[],
'channel_ghz':0.0,
'lines_to_flag':[]
}
# Check configs for expected name and data type
for this_key in this_params_dict.keys():
if this_key not in expected_params.keys():
logger.error('Got an unexpected parameter key. Line is:')
logger.error(line)
continue
if type(this_params_dict[this_key]) != type(expected_params[this_key]):
logger.error('Got an unexpected parameter type for parameter '+str(this_key)+'. Line is:')
logger.error(line)
continue
if this_key in out_dict[this_type][this_value].keys():
logger.debug("Got a repeat parameter definition for "+this_type+" "+this_value)
logger.debug("Parameter "+this_key+" repeats. Using the latest value.")
out_dict[this_type][this_value][this_key] = this_params_dict[this_key]
lines_read += 1
infile.close()
logger.info("Read "+str(lines_read)+" lines into a configuration definition dictionary.")
return(out_dict)
|
87fa9ea607c34fd31b6a53224a8f8c2c5673e1f9
| 3,645,704
|
def molmer_sorensen(theta, N=None, targets=[0, 1]):
"""
Quantum object of a Mølmer–Sørensen gate.
Parameters
----------
theta: float
The duration of the interaction pulse.
N: int
Number of qubits in the system.
target: int
The indices of the target qubits.
Returns
-------
molmer_sorensen_gate: :class:`qutip.Qobj`
Quantum object representation of the Mølmer–Sørensen gate.
"""
if targets != [0, 1] and N is None:
N = 2
if N is not None:
return expand_operator(molmer_sorensen(theta), N, targets=targets)
return Qobj(
[
[np.cos(theta/2.), 0, 0, -1.j*np.sin(theta/2.)],
[0, np.cos(theta/2.), -1.j*np.sin(theta/2.), 0],
[0, -1.j*np.sin(theta/2.), np.cos(theta/2.), 0],
[-1.j*np.sin(theta/2.), 0, 0, np.cos(theta/2.)]
],
dims=[[2, 2], [2, 2]])
|
8b5e7bc221c4f785bd8747a5b04d4a9299ebeefc
| 3,645,705
|
import math
def get_pixel_dist(pixel, red, green, blue):
"""
Returns the color distance between pixel and mean RGB value
Input:
pixel (Pixel): pixel with RGB values to be compared
red (int): average red value across all images
green (int): average green value across all images
blue (int): average blue value across all images
Returns:
dist (int): color distance between red, green, and blue pixel values
"""
color_distance = math.sqrt((pixel.red - red)**2 + (pixel.green - green)**2 + (pixel.blue - blue)**2)
return color_distance
|
9ad0a30090e735daac4c7d470ea40e7d4dc0010f
| 3,645,706
|
def structure_pmu(array: np.ndarray) -> np.ndarray:
"""Helper function to convert 4 column array into structured array
representing 4-momenta of particles.
Parameters
----------
array : numpy ndarray of floats, with shape (num particles, 4)
The 4-momenta of the particles, arranged in columns.
Columns must be in order (x, y, z, e).
See also
--------
structure_pmu_components : structured array from seperate 1d arrays
of momentum components.
Notes
-----
As the data-type of the input needs to be recast, the output is
a copy of the original data, not a view on it. Therefore it uses
additional memory, so later changes to the original will not
affect the returned array, and vice versa.
"""
if array.dtype != _types.pmu:
struc_array = array.astype(_types.pmu[0][1])
struc_array = struc_array.view(dtype=_types.pmu, type=np.ndarray)
struc_pmu = struc_array.copy().squeeze()
else:
struc_pmu = array
return struc_pmu
|
519173b131d4120f940022b567faef018be2f2ed
| 3,645,707
|
import os
def _log_from_checkpoint(args):
"""Infer logging directory from checkpoint file."""
int_dir, checkpoint_name = os.path.split(args.checkpoint)
logdir = os.path.dirname(int_dir)
checkpoint_num = int(checkpoint_name.split('_')[1])
_log_args(logdir, args, modified_iter=checkpoint_num)
return logdir, checkpoint_num
|
d5198c276dce969f9245db520f30ee2d8bd42363
| 3,645,708
|
def url_query_parameter(url, parameter, default=None, keep_blank_values=0):
"""Return the value of a url parameter, given the url and parameter name
General case:
>>> import w3lib.url
>>> w3lib.url.url_query_parameter("product.html?id=200&foo=bar", "id")
'200'
>>>
Return a default value if the parameter is not found:
>>> w3lib.url.url_query_parameter("product.html?id=200&foo=bar", "notthere", "mydefault")
'mydefault'
>>>
Returns None if `keep_blank_values` not set or 0 (default):
>>> w3lib.url.url_query_parameter("product.html?id=", "id")
>>>
Returns an empty string if `keep_blank_values` set to 1:
>>> w3lib.url.url_query_parameter("product.html?id=", "id", keep_blank_values=1)
''
>>>
"""
queryparams = parse_qs(
urlsplit(str(url))[3],
keep_blank_values=keep_blank_values
)
return queryparams.get(parameter, [default])[0]
|
d2ed39b6d6054baa9f8be90dfe1e1c8a06e47746
| 3,645,709
|
def read_ground_stations_extended(filename_ground_stations_extended):
"""
Reads ground stations from the input file.
:param filename_ground_stations_extended: Filename of ground stations basic (typically /path/to/ground_stations.txt)
:return: List of ground stations
"""
ground_stations_extended = []
gid = 0
with open(filename_ground_stations_extended, 'r') as f:
for line in f:
split = line.split(',')
if len(split) != 8:
raise ValueError("Extended ground station file has 8 columns: " + line)
if int(split[0]) != gid:
raise ValueError("Ground station id must increment each line")
ground_station_basic = {
"gid": gid,
"name": split[1],
"latitude_degrees_str": split[2],
"longitude_degrees_str": split[3],
"elevation_m_float": float(split[4]),
"cartesian_x": float(split[5]),
"cartesian_y": float(split[6]),
"cartesian_z": float(split[7]),
}
ground_stations_extended.append(ground_station_basic)
gid += 1
return ground_stations_extended
|
2492dc8d5c55f124696aafbec11d74e609c3f397
| 3,645,710
|
import os
import configparser
import ssl
import smtplib
def send_email(destination, code):
"""
Send the validation email.
"""
if 'CLOUD' not in os.environ:
# If the application is running locally, use config.ini anf if not, set environment variables
config = configparser.ConfigParser()
config.read_file(open('config.ini'))
# Sender email and account password
sender = config['SENDER']['from']
password = config['SENDER_PASSWORD']['psw']
else:
sender = os.environ['SENDER']
password = os.environ['SENDER_PASSWORD']
ret = False
try:
text = "Code: {}".format(code)
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (sender, destination, 'Agnes', text)
# TODO Improve the email format. Let it more Readable
# Log in to server using secure context and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender, password)
server.sendmail(sender, destination, message)
logger.debug('Sending email to {}'.format(destination))
ret = True
except Exception as e:
logger.exception(e, exc_info=False)
finally:
return ret
|
e60c16137088b1ed1670628c59148c68d6cb49d5
| 3,645,711
|
import uuid
def shortPrescID():
"""Create R2 (short format) Prescription ID
Build the prescription ID and add the required checkdigit.
Checkdigit is selected from the PRESCRIPTION_CHECKDIGIT_VALUES constant
"""
_PRESC_CHECKDIGIT_VALUES = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ+'
hexString = str(uuid.uuid1()).replace('-', '').upper()
prescriptionID = hexString[:6] + '-Z' + hexString[6:11] + '-' + hexString[12:17]
prscID = prescriptionID.replace('-', '')
prscIDLength = len(prscID)
runningTotal = 0
for stringPosition in range(prscIDLength):
runningTotal = runningTotal + int(prscID[stringPosition], 36) * (2 ** (prscIDLength - stringPosition))
checkValue = (38 - runningTotal % 37) % 37
checkValue = _PRESC_CHECKDIGIT_VALUES[checkValue]
prescriptionID += checkValue
return prescriptionID
|
db491d3fe299adfbcd6f202eb46bc4669f829613
| 3,645,712
|
def rmse(predictions, verbose=True):
"""Compute RMSE (Root Mean Squared Error).
.. math::
\\text{RMSE} = \\sqrt{\\frac{1}{|\\hat{R}|} \\sum_{\\hat{r}_{ui} \in
\\hat{R}}(r_{ui} - \\hat{r}_{ui})^2}.
Args:
predictions (:obj:`list` of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Root Mean Squared Error of predictions.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError('Prediction list is empty.')
mse = np.mean([float((true_r - est)**2)
for (_, _, true_r, est, _) in predictions])
rmse_ = np.sqrt(mse)
if verbose:
print('RMSE: {0:1.4f}'.format(rmse_))
return rmse_
|
2898f98fba50d71ef20e66b9654e1d539531f17b
| 3,645,713
|
import ast
def get_module_docstring(path):
"""get a .py file docstring, without actually executing the file"""
with open(path) as f:
return ast.get_docstring(ast.parse(f.read()))
|
e253372bfb6f65907a5461332d14c414c2370c66
| 3,645,714
|
def get_authenticate_kwargs(oauth_credentials=None, http_=None):
"""Returns a dictionary with keyword arguments for use with discovery
Prioritizes oauth_credentials or a http client provided by the user
If none provided, falls back to default credentials provided by google's command line
utilities. If that also fails, tries using httplib2.Http()
Used by `gcs.GCSClient` and `bigquery.BigQueryClient` to initiate the API Client
"""
if oauth_credentials:
authenticate_kwargs = {
"credentials": oauth_credentials
}
elif http_:
authenticate_kwargs = {
"http": http_
}
else:
# neither http_ or credentials provided
try:
# try default credentials
oauth_credentials = oauth2client.client.GoogleCredentials.get_application_default()
authenticate_kwargs = {
"credentials": oauth_credentials
}
except oauth2client.client.GoogleCredentials.ApplicationDefaultCredentialsError:
# try http using httplib2
authenticate_kwargs = {
"http": httplib2.Http()
}
return authenticate_kwargs
|
da3cef34a51fe1bc74cb8ce221e9610160f0f176
| 3,645,715
|
from re import T
def get_transforms(size=128, mobilenet=False):
"""
Gets all the torchvision transforms we will be applying to the dataset.
"""
# These are the transformations that we will do to our dataset
# For X transforms, let's do some of the usual suspects and convert to tensor.
# Don't forget to normalize to [0.0, 1.0], FP32
# and don't forget to resize to the same size every time.
x_transforms = [
T.Resize((size, size)),
T.RandomApply([
T.RandomAffine(degrees=20, translate=(0.1, 0.1)),
T.RandomHorizontalFlip(p=0.5),
T.RandomRotation(degrees=(-30, 30)),
T.RandomVerticalFlip(p=0.5),
], p=0.5),
T.ColorJitter(brightness=0.5),
T.ToTensor(), # Converts to FP32 [0.0, 1.0], Tensor type
]
# Pretrained MobileNetV2 requires normalizing like this:
if mobilenet:
x_transforms.append(T.Normalize(mean=MOBILENET_MEAN, std=MOBILENET_STD))
# For Y transforms, we need to make sure that we do the same thing to the ground truth,
# since we are trying to recreate the image.
y_transforms = [
T.Resize((size, size), interpolation=Image.NEAREST), # Make sure we don't corrupt the labels
T.RandomApply([
T.RandomAffine(degrees=20, translate=(0.1, 0.1)),
T.RandomHorizontalFlip(p=0.5),
T.RandomRotation(degrees=(-30, 30)),
T.RandomVerticalFlip(p=0.5),
], p=0.5),
]
return x_transforms, y_transforms
|
ddacbf265ba12ac259ec35ef57798688a3e36f02
| 3,645,716
|
def transform(f, a, b, c, d):
"""
Transform a given function linearly.
If f(t) is the original function, and a, b, c, and d are the parameters in
order, then the return value is the function
F(t) = af(cx + d) + b
"""
return lambda x: a * f(c * x + d) + b
|
a47b3f4f3dc1e3ed5ddb6155bcd67b8297c298ed
| 3,645,717
|
def delete_rules(request):
"""
Deletes the rules with the given primary key.
"""
if request.method == 'POST':
rules_id = strip_tags(request.POST['post_id'])
post = HouseRules.objects.get(pk=rules_id)
post.filepath.delete() # Delete actual file
post.delete()
return redirect('archive-rules')
|
e6be9d39dfe07d17fdb18634b422262917fbe6eb
| 3,645,718
|
def display_word(word, secret_word, word_to_guess):
"""Function to edit the word to display and the word to guess (word to display
is the test word with its colored letter and the word to guess is the word
with spaces in it, for each missing letter).
Args:
word (str): the input word
secret_word (str): the secret word that the user have to find
word_to_guess (str): the word with spaces for each missing letter
Returns:
str: the word to guess, to update it at each try
"""
word_to_display = ""
indexes = []
# We need to do the dictio at each input because we need to edit it for
# each test word. It will be needed to not display several yellow letter
# when there should be only one.
dictio = letters_dict(secret_word)
# For each letter in the word
for letter_index in range(len(word)):
word_letter = word[letter_index]
# If the letter is the same at the same place in the secret_word
if word_letter==secret_word[letter_index]:
# Colors the letter in green
word_to_display += colored(word_letter, "green")
# Adds the index to a list
indexes.append(letter_index)
dictio[word_letter] -= 1
# If the letter is not the same at the same place in the secret word
# but is in the word anyway
elif word_letter in secret_word:
if dictio[word_letter]>0:
# Colors the letter in yellow and substract 1 to the dictionary
# of letters, if it's not 0
word_to_display += colored(word_letter, "yellow")
dictio[word_letter] -= 1
else:
# If there's 0 for the letter in the dictionary, it's because we
# already encountered them all, so we don't color it
word_to_display += word_letter
else:
word_to_display += word_letter
# Transforms the word to guess as a list, within each letter is one element
word_to_guess_list = list(word_to_guess)
for index in range(len(secret_word)):
if index in indexes:
# If the user have found a letter, replaces the space (_) by it
word_to_guess_list[index] = secret_word[index]
# Reforms the word
word_to_guess = "".join(word_to_guess_list)
return word_to_display, word_to_guess
|
e55ef943d5e3d837ca1698ba1e2e65d9062b16f0
| 3,645,719
|
def get_config_cache(course_pk: 'int') -> dict:
"""Cacheからコンフィグを取得する.存在しない場合,新たにキャッシュを生成して格納後,コンフィグを返す."""
cache_key = f"course-config-{course_pk}"
cached_config = cache.get(cache_key, None)
if cached_config is None:
config = Config.objects.filter(course_id=course_pk).first()
cached_config = set_config_from_instance(config)
return cached_config
|
a155ce5354d8ec00eab0da42c919ac15eac43bb4
| 3,645,720
|
import logging
def log_command(func):
"""
Logging decorator for logging bot commands and info
"""
def log_command(*args, **kwargs):
slack, command, event = args
user = slack.user_info(event["user"])
log_line = 'USER: %s | CHANNEL ID: %s | COMMAND: %s | TEXT: %s'
command_info = log_line % (user["user"]["name"],
event["channel"],
command,
event["text"])
logging.info(command_info)
command = func(*args, **kwargs)
return command
return log_command
|
8ab4f36ff6c01a3799061f532d0c25ec04d725e8
| 3,645,721
|
import os
def expand_home_folder(path):
"""Checks if path starts with ~ and expands it to the actual
home folder."""
if path.startswith("~"):
return os.environ.get('HOME') + path[1:]
return path
|
3746859cc16b77dcfd02c675db81bfe4a195a85f
| 3,645,722
|
import copy
def calc_stats(scores_summ, curr_lines, curr_idx, CI=0.95, ext_test=None,
stats="mean", shuffle=False):
"""
calc_stats(scores_summ, curr_lines, curr_idx)
Calculates statistics on scores from runs with specific analysis criteria
and records them in the summary scores dataframe.
Required args:
- scores_summ (pd DataFrame): DataFrame containing scores summary
- curr_lines (pd DataFrame) : DataFrame lines corresponding to specific
analysis criteria
- curr_idx (int) : Current row in the scores summary
DataFrame
Optional args:
- CI (num) : Confidence interval around which to collect
percentile values
default: 0.95
- extra_test (str): Name of extra test set, if any (None if none)
default: None
- stats (str) : stats to take, i.e., "mean" or "median"
default: "mean"
- shuffle (bool) : If True, data is for shuffled, and will be averaged
across runs before taking stats
default: False
Returns:
- scores_summ (pd DataFrame): Updated DataFrame containing scores, as
well as epoch_n, runs_total, runs_nan
summaries
"""
scores_summ = copy.deepcopy(scores_summ)
# score labels to perform statistics on
sc_labs = ["epoch_n"] + logreg_util.get_sc_labs(
True, ext_test_name=ext_test)
# avoids accidental nuisance dropping by pandas
curr_lines["epoch_n"] = curr_lines["epoch_n"].astype(float)
if shuffle: # group runs and take mean or median across
scores_summ.loc[curr_idx, "mouse_n"] = -1
keep_lines = \
[col for col in curr_lines.columns if col in sc_labs] + ["run_n"]
grped_lines = curr_lines[keep_lines].groupby("run_n", as_index=False)
if stats == "mean":
curr_lines = grped_lines.mean() # automatically skips NaNs
elif stats == "median":
curr_lines = grped_lines.median() # automatically skips NaNs
else:
gen_util.accepted_values_error("stats", stats, ["mean", "median"])
# calculate n_runs (without nans and with)
scores_summ.loc[curr_idx, "runs_total"] = len(curr_lines)
scores_summ.loc[curr_idx, "runs_nan"] = curr_lines["epoch_n"].isna().sum()
# percentiles to record
ps, p_names = math_util.get_percentiles(CI)
for sc_lab in sc_labs:
if sc_lab in curr_lines.keys():
cols = []
vals = []
data = curr_lines[sc_lab].astype(float)
for stat in ["mean", "median"]:
cols.extend([stat])
vals.extend(
[math_util.mean_med(data, stats=stat, nanpol="omit")])
for error in ["std", "sem"]:
cols.extend([error])
vals.extend([math_util.error_stat(
data, stats="mean", error=error, nanpol="omit")])
# get 25th and 75th quartiles
cols.extend(["q25", "q75"])
vals.extend(math_util.error_stat(
data, stats="median", error="std", nanpol="omit"))
# get other percentiles (for CI)
cols.extend(p_names)
vals.extend(math_util.error_stat(
data, stats="median", error="std", nanpol="omit", qu=ps))
# get MAD
cols.extend(["mad"])
vals.extend([math_util.error_stat(
data, stats="median", error="sem", nanpol="omit")])
# plug in values
cols = [f"{sc_lab}_{name}" for name in cols]
gen_util.set_df_vals(scores_summ, curr_idx, cols, vals)
return scores_summ
|
ddaa5b5a2c70c25488f572ad894f7aa0bedc7189
| 3,645,723
|
def report_date_time() -> str:
"""Return the report date requested as query parameter."""
report_date_string = dict(bottle.request.query).get("report_date")
return str(report_date_string).replace("Z", "+00:00") if report_date_string else iso_timestamp()
|
391db86e523c55f88c40c1bc8b9fb1ed6f3d97ff
| 3,645,724
|
def assign_colour_label_data(catl):
"""
Assign colour label to data
Parameters
----------
catl: pandas Dataframe
Data catalog
Returns
---------
catl: pandas Dataframe
Data catalog with colour label assigned as new column
"""
logmstar_arr = catl.logmstar.values
u_r_arr = catl.modelu_rcorr.values
colour_label_arr = np.empty(len(catl), dtype='str')
for idx, value in enumerate(logmstar_arr):
# Divisions taken from Moffett et al. 2015 equation 1
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
if value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
if value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
catl['colour_label'] = colour_label_arr
return catl
|
4f56cc4d4ac1ae722deffb92d63d5867a885fb0e
| 3,645,725
|
def get_policy(arn):
"""Get info about a policy."""
client = get_client("iam")
response = client.get_policy(PolicyArn=arn)
return response
|
c56d79bfd8cf744bbe010ad0d5dfbaeaa3d59e76
| 3,645,726
|
import os
def get_file_xml(filename):
"""
:param filename: the filename, without the .xml suffix, in the tests/xml directory
:return: returns the specified file's xml
"""
file = os.path.join(XML_DIR, filename + '.xml')
with open(file, 'r') as f:
xml = f.read()
return xml
|
c08ef33bc12faa6b7194e1e7bac304e9f350bb09
| 3,645,727
|
def _write_roadways(roadway_feature_class, condition):
"""Writes roadway feature class to STAMINA syntax
Arguments:
roads_feature_class {String} -- Path to feature class
condition {String} -- Existing, NoBuild, or Build. Determines fields to use from geospatial template
Returns:
[string] -- [roadways]
"""
roadway_count = len([row for row in shapefile.Reader(roadway_feature_class)])
with shapefile.Reader(roadway_feature_class) as roadways:
roadway_string = "2,{}\n".format(roadway_count)
flds = validate_roadway_field(condition)
for row in roadways.shapeRecords():
road = row.record["road_name"]
speed = row.record["speed"]
auto = round(row.record[flds[0]], 0)
medium = round(row.record[flds[1]], 0)
heavy = round(row.record[flds[2]], 0)
roadway_string += "{}\n".format(road)
roadway_string += "CARS {} {}\n".format(auto, speed)
roadway_string += "MT {} {}\n".format(medium, speed)
roadway_string += "HT {} {}\n".format(heavy, speed)
roadway_string += _write_roadway_points(row.shape)
roadway_string += roadway_separator()
return roadway_string
|
0c82db17f3632a2c7023a6437a3f7e8221e667ba
| 3,645,728
|
from .tf import TF_BACKEND
from .torch import TORCH_BACKEND
from .jax import JAX_BACKEND
from .math.backend import BACKENDS
def detect_backends() -> tuple:
"""
Registers all available backends and returns them.
This includes only backends for which the minimal requirements are fulfilled.
Returns:
`tuple` of `phi.math.backend.Backend`
"""
try:
except ImportError:
pass
try:
except ImportError:
pass
try:
except ImportError:
pass
return tuple(BACKENDS)
|
4d7fb7c80e8a931a614549539b9e157223602d31
| 3,645,729
|
import random
def mix_audio(word_path=None,
bg_path=None,
word_vol=1.0,
bg_vol=1.0,
sample_time=1.0,
sample_rate=16000):
"""
Read in a wav file and background noise file. Resample and adjust volume as
necessary.
"""
# If no word file is given, just return random background noise
if word_path == None:
waveform = [0] * int(sample_time * sample_rate)
fs = sample_rate
else:
# Open wav file, resample, mix to mono
waveform, fs = librosa.load(word_path, sr=sample_rate, mono=True)
# Pad 0s on the end if not long enough
if len(waveform) < sample_time * sample_rate:
waveform = np.append(waveform, np.zeros(int((sample_time *
sample_rate) - len(waveform))))
# Truncate if too long
waveform = waveform[:int(sample_time * sample_rate)]
# If no background noise is given, just return the waveform
if bg_path == None:
return waveform
# Open background noise file
bg_waveform, fs = librosa.load(bg_path, sr=fs)
# Pick a random starting point in background file
max_end = len(bg_waveform) - int(sample_time * sample_rate)
start_point = random.randint(0, max_end)
end_point = start_point + int(sample_time * sample_rate)
# Mix the two sound samples (and multiply by volume)
waveform = [0.5 * word_vol * i for i in waveform] + \
(0.5 * bg_vol * bg_waveform[start_point:end_point])
return waveform
|
fba93e1f0d13bab4b9a30fe2d849fa3b1cf99927
| 3,645,730
|
def analytical_pulse_width(ekev):
"""
Estimate analytical_pulse_width (FWHM) from radiation energy (assumes symmetrical beam)
:param ekev: radiation energy [keV]
:return sig: Radiation pulse width (FWHM) [m]
"""
sig = np.log((7.4e03/ekev))*6
return sig/1e6
|
c56f861d1a83147ff425de7760416e870e1a69d4
| 3,645,731
|
def progress_timeout(progress_bar):
"""
Update the progress of the timer on a timeout tick.
Parameters
----------
progress_bar : ProgressBar
The UI progress bar object
Returns
-------
bool
True if continuing timer, False if done.
"""
global time_remaining, time_total
time_remaining -= 1
new_val = 1 - (time_remaining / time_total)
if new_val >= 1:
progress_bar.pb.set_text("Coffee extraction done.")
play_endsound()
return False
progress_bar.pb.set_fraction(new_val)
progress_bar.pb.set_text("{0:.1f} % Brewed ({1:01d}:{2:02d} Remaining)"
.format(new_val * 100, time_remaining / 60, time_remaining % 60))
return True
|
1b7e4976a5d96b2ede671c413ff0a7702603c6d8
| 3,645,732
|
def socket_file(module_name):
"""
Get the absolute path to the socket file for the named module.
"""
module_name = realname(module_name)
return join(sockets_directory(), module_name + '.sock')
|
df92c1a23374296d96c6419f32cdffd55b6564cf
| 3,645,733
|
def postBuild(id: str):
"""Register a new build.
Args:
id: Identifier of Repository for which build is to be registered.
Returns:
build_id: Identifier of Build created.
"""
return register_builds(
id, request.headers["X-Project-Access-Token"], request.json
)
|
13b8aed703e9e5cf2191baaf98583374021fb494
| 3,645,734
|
import json
def submit(g_nocaptcha_response_value, secret_key, remoteip):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_response_field -- The value of recaptcha_response_field
from the form
secret_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
if not (g_nocaptcha_response_value and len(g_nocaptcha_response_value)):
return RecaptchaResponse(
is_valid=False,
error_codes=['incorrect-captcha-sol']
)
params = urlencode({
'secret': want_bytes(secret_key),
'remoteip': want_bytes(remoteip),
'response': want_bytes(g_nocaptcha_response_value),
})
if not PY2:
params = params.encode('utf-8')
req = Request(
url=VERIFY_URL, data=params,
headers={
'Content-type': 'application/x-www-form-urlencoded',
'User-agent': 'noReCAPTCHA Python'
}
)
httpresp = urlopen(req)
try:
res = force_text(httpresp.read())
return_values = json.loads(res)
except (ValueError, TypeError):
return RecaptchaResponse(
is_valid=False,
error_codes=['json-read-issue']
)
except:
return RecaptchaResponse(
is_valid=False,
error_codes=['unknown-network-issue']
)
finally:
httpresp.close()
return_code = return_values.get("success", False)
error_codes = return_values.get('error-codes', [])
logger.debug("%s - %s" % (return_code, error_codes))
if return_code is True:
return RecaptchaResponse(is_valid=True)
else:
return RecaptchaResponse(is_valid=False, error_codes=error_codes)
|
5dd56bd898862af8323a049e73e5cc3a9b983b26
| 3,645,735
|
def boundary(shape, n_size, n):
""" Shape boundaries & their neighborhoods
@param shape 2D_bool_numpy_array: True if pixel in shape
@return {index: neighborhood}
index: 2D_int_tuple = index of neighborhood center in shape
neighborhood: 2D_bool_numpy_array of size n_size
Boundaries are shape pixels inside the shape having 1 or more 4-neighbors
outside the shape.
"""
return {i: shape[n(i)]
for i in np.ndindex(shape.shape)
if is_boundary_pixel(shape,i,n_size)}
|
619050d3dfff50ccea204538a4cabcd7ef2190ab
| 3,645,736
|
def centered_mols(self, labels, return_trans=False):
"""
Return the molecules translated at the origin with a corresponding cell
Parameters
----------
labels : int or list of ints
The labels of the atoms to select
print_centro : bool
Print the translation vector which was detected as -centroid
Returns
-------
mol : Mol object
The selected molecules with their centroid at the origin
mod_cell : Mol object
The new confined cell corresponding to the now translated molecules
"""
mol, mod_cell = self.complete_mol(labels)
centro = mol.centroid()
mol.translate(-centro)
mod_cell.translate(-centro)
mod_cell = mod_cell.confined()
if return_trans:
return mol, mod_cell, -centro
else:
return mol, mod_cell
|
858fd2b43f0ac9eaca3db94108f9bec0dbf305c7
| 3,645,737
|
import torch
def binary_accuracy(output: torch.Tensor, target: torch.Tensor) -> float:
"""Computes the accuracy for binary classification"""
with torch.no_grad():
batch_size = target.size(0)
pred = (output >= 0.5).float().t().view(-1)
correct = pred.eq(target.view(-1)).float().sum()
correct.mul_(100.0 / batch_size)
return correct
|
306d7d0e85a617b8b4508f2dfbbbac1f5fb67bc5
| 3,645,738
|
def prepare_config(config):
"""
Prepares a dictionary to be stored as a json.
Converts all numpy arrays to regular arrays
Args:
config: The config with numpy arrays
Returns:
The numpy free config
"""
c = {}
for key, value in config.items():
if isinstance(value, np.ndarray):
value = value.tolist()
c[key] = value
return c
|
4ad31fc20fab7e3f7a7de9f50b0431d8000df029
| 3,645,739
|
import json
def load_config(path='config.json'):
"""
Loads configruation from config.json file.
Returns station mac address, interval, and units for data request
"""
# Open config JSON
with open(path) as f:
# Load JSON file to dictionary
config = json.load(f)
# Return mac address, interval, and units
return (config['station_max_address'], int(config['interval']), config['units'])
|
5522f023ed3293149613dcc2dc007e34d50f3fa8
| 3,645,740
|
import torch
def log_px_z(pred_logits, outcome):
"""
Returns Bernoulli log probability.
:param pred_logits: logits for outcome 1
:param outcome: datapoint
:return: log Bernoulli probability of outcome given logits in pred_logits
"""
pred = pred_logits.view(pred_logits.size(0), -1)
y = outcome.view(outcome.size(0), -1)
return -torch.sum(torch.max(pred, torch.tensor(0., device=pred.device)) - pred * y +
torch.log(1 + torch.exp(-torch.abs(pred))), 1)
|
6369d893cc9bfe5c3f642f819511798d01ae3ae9
| 3,645,741
|
def _sort_rows(matrix, num_rows):
"""Sort matrix rows by the last column.
Args:
matrix: a matrix of values (row,col).
num_rows: (int) number of sorted rows to return from the matrix.
Returns:
Tensor (num_rows, col) of the sorted matrix top K rows.
"""
tmatrix = tf.transpose(a=matrix, perm=[1, 0])
sorted_tmatrix = tf.nn.top_k(tmatrix, num_rows)[0]
return tf.transpose(a=sorted_tmatrix, perm=[1, 0])
|
e9e8fcb6275915e8a42798411c0712eb34bbbfe4
| 3,645,742
|
import functools
def partial_at(func, indices, *args):
"""Partial function application for arguments at given indices."""
@functools.wraps(func)
def wrapper(*fargs, **fkwargs):
nargs = len(args) + len(fargs)
iargs = iter(args)
ifargs = iter(fargs)
posargs = (next((ifargs, iargs)[i in indices]) for i in range(nargs))
# posargs = list( posargs )
# print( 'posargs', posargs )
return func(*posargs, **fkwargs)
return wrapper
|
1b45e0bd8baea869d80c6b5963c6063f6b8fbdd4
| 3,645,743
|
import importlib
def try_load_module(module_name):
"""
Import a module by name, print the version info and file name.
Return None on failure.
"""
try:
mod = importlib.import_module(module_name)
print green("%s %s:" % (module_name, mod.__version__)), mod.__file__
return mod
except ImportError:
print yellow("Could not find nltk")
return None
|
527c2fb3dbbb3ef8ee5800f492a727a2d565892d
| 3,645,744
|
def project_image(request, uid):
"""
GET request : return project image
PUT request : change project image
"""
project = Project.objects.filter(uid=uid).first()
imgpath = project.image.path if project.image else get_thumbnail()
if request.method == "PUT":
file_object = request.data.get("file")
imgpath = change_image(obj=project, file_object=file_object)
data = open(imgpath, "rb") .read()
return HttpResponse(content=data, content_type="image/jpeg")
|
f05db1026f41ab15eece1068fe182e0673e798e3
| 3,645,745
|
from typing import Optional
def validate(prefix: str, identifier: str) -> Optional[bool]:
"""Validate the identifier against the prefix's pattern, if it exists.
:param prefix: The prefix in the CURIE
:param identifier: The identifier in the CURIE
:return: Whether this identifier passes validation, after normalization
>>> validate("chebi", "1234")
True
>>> validate("chebi", "CHEBI:12345")
True
>>> validate("chebi", "CHEBI:ABCD")
False
"""
resource = get_resource(prefix)
if resource is None:
return None
return resource.validate_identifier(identifier)
|
bbdc0eef34a03670963354d0cdf6e414eaa2aa8d
| 3,645,746
|
import torch
def laplacian_positional_encoding(g, pos_enc_dim):
"""
Graph positional encoding v/ Laplacian eigenvectors
"""
# Laplacian
A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)
L = sp.eye(g.number_of_nodes()) - N * A * N
# Eigenvectors with scipy
#EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR')
EigVal, EigVec = sp.linalg.eigs(L, k=pos_enc_dim+1, which='SR', tol=1e-2)
EigVec = EigVec[:, EigVal.argsort()] # increasing order
out = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float()
return out
|
69b09e69f37fc870fa36510ef05172f35bfc0093
| 3,645,747
|
async def replace_chain():
""" replaces the current chain with the most recent and longest chain """
blockchain.replace_chain()
blockchain.is_chain_valid(chain=blockchain.chain)
return{'message': 'chain has been updated and is valid',
'longest chain': blockchain.chain}
|
3ef0797ca582dbd2cb7ab47b09c847a4380215d5
| 3,645,748
|
import copy
def ucb(bufferx,
objective_weights,
regression_models,
param_space,
scalarization_method,
objective_limits,
iteration_number,
model_type,
classification_model=None,
number_of_cpus=0):
"""
Multi-objective ucb acquisition function as detailed in https://arxiv.org/abs/1805.12168.
The mean and variance of the predictions are computed as defined by Hutter et al.: https://arxiv.org/pdf/1211.0906.pdf
:param bufferx: a list of tuples containing the points to predict and scalarize.
:param objective_weights: a list containing the weights for each objective.
:param regression_models: the surrogate models used to evaluate points.
:param param_space: a space object containing the search space.
:param scalarization_method: a string indicating which scalarization method to use.
:param evaluations_per_optimization_iteration: how many configurations to return.
:param objective_limits: a dictionary with estimated minimum and maximum values for each objective.
:param iteration_number: an integer for the current iteration number, used to compute the beta
:param classification_model: the surrogate model used to evaluate feasibility constraints
:param number_of_cpus: an integer for the number of cpus to be used in parallel.
:return: a list of scalarized values for each point in bufferx.
"""
beta = np.sqrt(0.125*np.log(2*iteration_number + 1))
augmentation_constant = 0.05
prediction_means = {}
prediction_variances = {}
number_of_predictions = len(bufferx)
tmp_objective_limits = copy.deepcopy(objective_limits)
prediction_means, prediction_variances = models.compute_model_mean_and_uncertainty(bufferx, regression_models, model_type, param_space, var=True)
if classification_model != None:
classification_prediction_results = models.model_probabilities(bufferx, classification_model, param_space)
feasible_parameter = param_space.get_feasible_parameter()[0]
true_value_index = classification_model[feasible_parameter].classes_.tolist().index(True)
feasibility_indicator = classification_prediction_results[feasible_parameter][:,true_value_index]
else:
feasibility_indicator = [1]*number_of_predictions # if no classification model is used, then all points are feasible
# Compute scalarization
if (scalarization_method == "linear"):
scalarized_predictions = np.zeros(number_of_predictions)
beta_factor = 0
for objective in regression_models:
scalarized_predictions += objective_weights[objective]*prediction_means[objective]
beta_factor += objective_weights[objective]*prediction_variances[objective]
scalarized_predictions -= beta*np.sqrt(beta_factor)
scalarized_predictions = scalarized_predictions*feasibility_indicator
# The paper does not propose this, I applied their methodology to the original tchebyshev to get the approach below
# Important: since this was not proposed in the paper, their proofs and bounds for the modified_tchebyshev may not be valid here.
elif(scalarization_method == "tchebyshev"):
scalarized_predictions = np.zeros(number_of_predictions)
total_values = np.zeros(number_of_predictions)
for objective in regression_models:
scalarized_values = objective_weights[objective] * np.absolute(prediction_means[objective] - beta*np.sqrt(prediction_variances[objective]))
total_values += scalarized_values
scalarized_predictions = np.maximum(scalarized_values, scalarized_predictions)
scalarized_predictions += augmentation_constant*total_values
scalarized_predictions = scalarized_predictions*feasibility_indicator
elif(scalarization_method == "modified_tchebyshev"):
scalarized_predictions = np.full((number_of_predictions), float("inf"))
reciprocated_weights = reciprocate_weights(objective_weights)
for objective in regression_models:
scalarized_value = reciprocated_weights[objective] * (prediction_means[objective] - beta*np.sqrt(prediction_variances[objective]))
scalarized_predictions = np.minimum(scalarized_value, scalarized_predictions)
scalarized_predictions = scalarized_predictions*feasibility_indicator
scalarized_predictions = -scalarized_predictions # We will minimize later, but we want to maximize instead, so we invert the sign
else:
print("Error: unrecognized scalarization method:", scalarization_method)
raise SystemExit
return scalarized_predictions, tmp_objective_limits
|
4ec8615d979fb9c3ee7539cd5e161ee920bc1c3a
| 3,645,749
|
def np_array_to_binary_vector(np_arr):
""" Converts a NumPy array to the RDKit ExplicitBitVector type. """
binary_vector = DataStructs.ExplicitBitVect(len(np_arr))
binary_vector.SetBitsFromList(np.where(np_arr)[0].tolist())
return binary_vector
|
c1865c47cd1abb71fbb3d3ce1b9a9cc75e87f70a
| 3,645,750
|
def augment_features(data, feature_augmentation):
"""
Augment features for a given data matrix.
:param data: Data matrix.
:param feature_augmentation: Function applied to augment the features.
:return: Augmented data matrix.
"""
if data is not None and feature_augmentation is not None:
if isinstance(feature_augmentation, list):
for augmentation_function in feature_augmentation:
data = augmentation_function(data)
else:
data = feature_augmentation(data)
return data
|
687a7ff2a4b61131f5d95e1f7d6eb77d75bd6f06
| 3,645,751
|
def _get_data_from_empty_list(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles empty lists. """
fields = get_field_list(fields, schema)
return {'cols': _get_cols(fields, schema), 'rows': []}, 0
|
bd1c219ed2ef738cf403b984cccc4aa4cd96aa2f
| 3,645,752
|
def copy_keys_except(dic, *keys):
"""Return a copy of the dict without the specified items.
"""
ret = dic.copy()
for key in keys:
try:
del ret[key]
except KeyError:
pass
return ret
|
b1e57db9dbacbc2a7c502c36082f40598a0f4b90
| 3,645,753
|
import random
import math
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
|
80838328fc9383731e1a853c8dc572228d1a4567
| 3,645,754
|
from typing import Any
async def start_time() -> Any:
"""
Returns the contest start time.
"""
return schemas.Timestamp(timestamp=settings.EVENT_START_TIME)
|
5613f6d8928c1d1ca49677e829617769a3e6f8c3
| 3,645,755
|
def reshape(v, shape):
"""Implement `reshape`."""
return np.reshape(v, shape)
|
249e17a4b503b3434c5ec0d3e14bef1208321e92
| 3,645,756
|
def generate_html_from_module(module):
"""
Extracts a module documentations from a module object into a HTML string
uses a pre-written builtins list in order to exclude built in functions
:param module: Module object type to extract documentation from
:return: String representation of an HTML file
"""
html_content = f"<html><head><title>{module.__name__} Doc</title></head><body><h1>Module {module.__name__}:</h1>"
html_content += f"Function {module.__doc__}"
for function in module.__dict__:
if callable(getattr(module, function)):
html_content += f"<h2>Function {function}:</h2>"
html_content += f"{getattr(module, function).__doc__}"
html_content += f"<h3>Annotations:</h3>"
for annotation in getattr(module, function).__annotations__.keys():
html_content += f"{annotation} <br>"
html_content += "</body></html>"
return html_content
|
3e59931f3716dd3c50dfdda3ba17807b62f04c14
| 3,645,757
|
def _phi(r, order):
"""Coordinate-wise nonlinearity used to define the order of the
interpolation.
See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.
Args:
r: input op
order: interpolation order
Returns:
phi_k evaluated coordinate-wise on r, for k = r
"""
# using EPSILON prevents log(0), sqrt0), etc.
# sqrt(0) is well-defined, but its gradient is not
with tf.name_scope("phi"):
if order == 1:
r = tf.maximum(r, EPSILON)
r = tf.sqrt(r)
return r
elif order == 2:
return 0.5 * r * tf.math.log(tf.maximum(r, EPSILON))
elif order == 4:
return 0.5 * tf.square(r) * tf.math.log(tf.maximum(r, EPSILON))
elif order % 2 == 0:
r = tf.maximum(r, EPSILON)
return 0.5 * tf.pow(r, 0.5 * order) * tf.math.log(r)
else:
r = tf.maximum(r, EPSILON)
return tf.pow(r, 0.5 * order)
|
b2270f17260e90b995c60b4bc0fb65f49be9c514
| 3,645,758
|
def updated_topology_description(topology_description, server_description):
"""Return an updated copy of a TopologyDescription.
:Parameters:
- `topology_description`: the current TopologyDescription
- `server_description`: a new ServerDescription that resulted from
a hello call
Called after attempting (successfully or not) to call hello on the
server at server_description.address. Does not modify topology_description.
"""
address = server_description.address
# These values will be updated, if necessary, to form the new
# TopologyDescription.
topology_type = topology_description.topology_type
set_name = topology_description.replica_set_name
max_set_version = topology_description.max_set_version
max_election_id = topology_description.max_election_id
server_type = server_description.server_type
# Don't mutate the original dict of server descriptions; copy it.
sds = topology_description.server_descriptions()
# Replace this server's description with the new one.
sds[address] = server_description
if topology_type == TOPOLOGY_TYPE.Single:
# Set server type to Unknown if replica set name does not match.
if (set_name is not None and
set_name != server_description.replica_set_name):
error = ConfigurationError(
"client is configured to connect to a replica set named "
"'%s' but this node belongs to a set named '%s'" % (
set_name, server_description.replica_set_name))
sds[address] = server_description.to_unknown(error=error)
# Single type never changes.
return TopologyDescription(
TOPOLOGY_TYPE.Single,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
if topology_type == TOPOLOGY_TYPE.Unknown:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.LoadBalancer):
if len(topology_description._topology_settings.seeds) == 1:
topology_type = TOPOLOGY_TYPE.Single
else:
# Remove standalone from Topology when given multiple seeds.
sds.pop(address)
elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost):
topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type]
if topology_type == TOPOLOGY_TYPE.Sharded:
if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown):
sds.pop(address)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type, set_name = _update_rs_no_primary_from_member(
sds, set_name, server_description)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
topology_type = _check_has_primary(sds)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type = _update_rs_with_primary_from_member(
sds, set_name, server_description)
else:
# Server type is Unknown or RSGhost: did we just lose the primary?
topology_type = _check_has_primary(sds)
# Return updated copy.
return TopologyDescription(topology_type,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
|
3fe6f527c8fdb177f608d5130c0ce239aef84c20
| 3,645,759
|
import os
def get_absolute_filepath(filepath: str) -> str:
"""Returns absolute filepath of the file/folder from the given `filepath` (along with the extension, if any)"""
absolute_filepath = os.path.realpath(path=filepath)
return absolute_filepath
|
be8ac0f55ed6fb039a746e0868bbd5ec2b8274ae
| 3,645,760
|
import numpy
def target_mask(image, path, num_grid_corners):
"""
Arguments:
image: grayscale image of shape (N, M)
path: pathlib.Path object for the image
Returns: Boolean mask of shape (N, M), which is True for pixels that
we think are on the calibration target.
"""
ret, corners = get_cached_corners(
image_path=path, gray=image, num_grid_corners=num_grid_corners
)
if ret:
# Take the hull to get the outer 2D shape
hull = ConvexHull(corners.squeeze())
points2d = hull.points[hull.vertices]
# Scale the points outward slightly
scale = 1.3
center = numpy.average(points2d, axis=0)
for i in range(len(points2d)):
points2d[i] = center + scale * (points2d[i] - center)
# Clip to edges, note corners are (axis1, axis0)
points2d[:, 0] = numpy.clip(points2d[:, 0], 0, image.shape[1] - 1)
points2d[:, 1] = numpy.clip(points2d[:, 1], 0, image.shape[0] - 1)
# Make a boolean mask
mask = numpy.zeros(image.shape[:2], dtype=numpy.int32)
# import ipdb; ipdb.set_trace()
mask = cv2.fillPoly(
mask, [points2d.reshape((-1, 1, 2)).astype(numpy.int32)], color=1.0
)
mask = mask.astype(bool)
else:
mask = numpy.ones(image.shape[:2], dtype=bool)
return mask
|
369a37b1cc5a49761413bac0a9b48a275bb76e59
| 3,645,761
|
from pathlib import Path
def read_data(spec: dict) -> (dict, DataFrame):
"""Creates Pandas DataFrame by reading file at path.
Appropriate read_* pandas method will be called based
on the extension of the input file specified."""
path = spec['input']['file']
ext = Path(path).suffix
kwargs = build_kwargs_read(spec, ext)
return spec, read_funcs[ext](path, **kwargs)
|
22e18a0702261c23e322fe03687864d694ecba98
| 3,645,762
|
import textwrap
def bunk_choose(bot, update, user_data):
"""Removes keyboardMarkup sent in previous handler.
Stores the response (for Lectures/Practicals message sent in previous handler) in a ``user_data``
dictionary with the key `"stype"`.
``user_data`` is a user relative dictionary which holds data between different handlers/functions
in a ConversationHandler.
Selects the appropriate table (Lecture or Practical) based on ``stype`` value.
Checks if records exist in the table for a user and sends a warning message or proceeds
to list names of all subjects in the table.
Passes control to :py:func:`bunk_input`
:param bot: Telegram Bot object
:type bot: telegram.bot.Bot
:param update: Telegram Update object
:type update: telegram.update.Update
:param user_data: User data dictionary
:type user_data: dict
:return: ConversationHandler.END if no records else INPUT
:rtype: int
"""
user_data['type'] = update.message.text
chat_id = update.message.chat_id
stype = user_data['type']
reply_markup = ReplyKeyboardRemove()
reply_text = "{}\nChoose `Cancel` to exit.".format(stype)
bot.sendMessage(chat_id=chat_id, text=reply_text, reply_markup=reply_markup, parse_mode='markdown')
if stype == "Lectures":
subject_data = Lecture.query.filter(Lecture.chatID == chat_id).all()
else:
subject_data = Practical.query.filter(Practical.chatID == chat_id).all()
if not subject_data: #If list is empty
messageContent = textwrap.dedent("""
No records found!
Please use /attendance to pull your attendance from the website first.
""")
bot.sendMessage(chat_id=chat_id, text=messageContent)
return ConversationHandler.END
messageContent = ""
for digit, subject in enumerate(subject_data):
subject_name = subject.name
messageContent += "{digit}. {subject_name}\n".format(digit=digit+1, subject_name=subject_name)
keyboard = build_menu(subject_data, 3, footer_buttons='Cancel')
reply_markup = ReplyKeyboardMarkup(keyboard)
user_data['reply_markup'] = reply_markup
bot.sendMessage(chat_id=chat_id, text=messageContent, reply_markup=reply_markup)
return INPUT
|
11d1249ec1953cc38be80470a21ba95b694c1ed5
| 3,645,763
|
import os
def schema_instance():
"""JSONSchema schema instance."""
schema_instance = JsonSchema(
schema=LOADED_SCHEMA_DATA,
filename="dns.yml",
root=os.path.join(FIXTURES_DIR, "schema", "schemas"),
)
return schema_instance
|
7c9b2453e9db531d38fcfe5852a3df49d6839cfa
| 3,645,764
|
def module_of(obj):
"""Return the Module given object is contained within.
"""
if isinstance(obj, Module):
return obj
elif isinstance(obj, (Function, Class)):
return obj.module
elif isinstance(obj, Method):
return module_of(obj.klass)
elif isinstance(obj, TestCase):
return module_of(obj.parent)
else:
raise TypeError("Don't know how to find the module of %r" % obj)
|
02c69c72d46e8448f7cdf41e18582508b431e4e7
| 3,645,765
|
def day(date, atmos=atmos):
"""
Returns a dataframe of daily aggregated data
Parameters
-------
date: str
Format yyyy/mm/dd
"""
path = f"{get_day_folder_path(date)}{date.replace('/','')}_daily_agg.csv.gz"
return load_agg(path, atmos)
|
84f56d078b0aec9605a261ed26656d4771e0eb11
| 3,645,766
|
async def find_deck_position(hcapi: OT3API, mount: OT3Mount) -> float:
"""
Find the true position of the deck in this mount's frame of reference.
The deck nominal position in deck coordinates is 0 (that's part of the
definition of deck coordinates) but if we have not yet calibrated a
particular tool on a particular mount, then the z deck coordinate that
will cause a collision is not 0. This routine finds that value.
"""
z_offset_settings = hcapi.config.calibration.z_offset
await hcapi.home_z()
here = await hcapi.gantry_position(mount)
z_prep_point = Point(*z_offset_settings.point)
above_point = z_prep_point._replace(z=here.z)
await hcapi.move_to(mount, above_point)
deck_z = await hcapi.capacitive_probe(
mount, OT3Axis.by_mount(mount), z_prep_point.z, z_offset_settings.pass_settings
)
LOG.info(f"autocalibration: found deck at {deck_z}")
await hcapi.move_to(mount, z_prep_point + Point(0, 0, CAL_TRANSIT_HEIGHT))
return deck_z
|
f75c3d066c367853036adc1de138755a2a1ee29b
| 3,645,767
|
import os
import glob
import json
def load_measure_defs(measure_ids=None):
"""Load measure definitions from JSON files.
Since the lpzomnibus measure depends on other LP measures having already been
calculated, it is important that the measures are returned in alphabetical order.
(This is a bit of a hack...)
"""
measures = []
errors = []
glob_path = os.path.join(settings.MEASURE_DEFINITIONS_PATH, "*.json")
for path in sorted(glob.glob(glob_path)):
measure_id = os.path.basename(path).split(".")[0]
with open(path) as f:
try:
measure_def = json.load(f)
except ValueError as e:
# Add the measure_id to the exception
errors.append("* {}: {}".format(measure_id, e.args[0]))
continue
if measure_ids is None:
if "skip" in measure_def:
continue
else:
if measure_id not in measure_ids:
continue
measure_def["id"] = measure_id
measures.append(measure_def)
if errors:
raise ValueError("Problems parsing JSON:\n" + "\n".join(errors))
return measures
|
4ff5eefceff2b5357c3b4c81f9de61148cff1745
| 3,645,768
|
import logging
import os
def setup_logger(name, warninglevel=logging.WARNING, logfilepath=path_to_log,
logformat='%(asctime)s %(levelname)s - %(name)-6s - %(message)s'):
"""Basic setup function to create a standard logging config. Default output
is to file in /tmp/dir."""
logfile=os.path.join(logfilepath,'magpy.log')
# Check file permission/existance
if not os.path.isfile(logfile):
pass
else:
if os.access(logfile, os.W_OK):
pass
else:
for count in range (1,100):
logfile=os.path.join(logfilepath,'magpy{:02}.log'.format(count))
value = os.access(logfile, os.W_OK)
if value or not os.path.isfile(logfile):
count = 100
break
try:
logging.basicConfig(filename=logfile,
filemode='w',
format=logformat,
level=logging.INFO)
except:
logging.basicConfig(format=logformat,
level=logging.INFO)
logger = logging.getLogger(name)
# Define a Handler which writes "setLevel" messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(warninglevel)
logger.addHandler(console)
return logger
|
87a00b5f0fd57e79e60cbf0847f86e35274aa9be
| 3,645,769
|
import random
def create_symbol_id(path_to_db: str) -> str:
"""
When creating a new symbol, need to ensure
that ID is not already used in the Physics Derivation Graph
Args:
path_to_db: filename of the SQL database containing
a JSON entry that returns a nested dictionary
Returns:
proposed_symbol_id
Raises:
>>> create_symbol_id("pdg.db")
"""
# trace_id = str(random.randint(1000000, 9999999))
# logger.info("[trace start " + trace_id + "]")
dat = clib.read_db(path_to_db)
symbol_ids_in_use = list(dat["symbols"].keys())
found_valid_id = False
loop_count = 0
while not found_valid_id:
loop_count += 1
proposed_symbol_id = str(random.randint(1000, 9999)) # 4 digits
if proposed_symbol_id not in symbol_ids_in_use:
found_valid_id = True
if loop_count > 100000:
logger.error("too many; this seems unlikely")
raise Exception("this seems unlikely")
# logger.info("[trace end " + trace_id + "]")
return proposed_symbol_id
|
1394f7f348abd43ee1cbb4fed8119fda39341028
| 3,645,770
|
import re
def get_file_name(content_disposition: str, ) -> str:
"""Content-Disposition has the filename between the `"`. get it.
Args:
content_disposition: the content disposition from download header
Returns:
the file name
"""
if match := re.search(r'"(.*?)"', content_disposition):
file_name = match.group(1)
else:
file_name = demisto.uniqueFile()
return file_name
|
f81c8ee80d341bf62b970565c062db348324905f
| 3,645,771
|
def get_object(node):
""" Parse rebaron AtomTrailers node into Python object (taken from ongoing conversion object)
Works for object and local scope """
if len(node) > 1 and (node[0].value == 'self' or node[0].value == 'self_next'):
var_t = super_getattr(convert_obj, str(node))
else:
# get the SOURCE function (where call is going on) from datamodel
def_parent = node.parent
while not isinstance(def_parent, DefNode):
def_parent = def_parent.parent
source_func_name = f'self.{def_parent.name}'
source_func_obj = super_getattr(convert_obj, str(source_func_name))
func_locals = source_func_obj.get_local_types()
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
struct = Struct(**func_locals)
var_t = super_getattr(struct, str(node), is_local=True)
return var_t
|
9b09dfaae08768e1544e676e93bcf5ce718c67d5
| 3,645,772
|
def read_label_from_txt(label_path):
"""Read label from txt file."""
text = np.fromfile(label_path)
bounding_box = []
with open(label_path, "r") as f:
labels = f.read().split("\n")
for label in labels:
if not label:
continue
label = label.split(" ")
if (label[0] == "DontCare"):
continue
if label[0] == ("Car" or "Van"): # or "Truck"
bounding_box.append(label[8:15])
if bounding_box:
data = np.array(bounding_box, dtype=np.float32)
return data[:, 3:6], data[:, :3], data[:, 6]
else:
return None, None, None
|
dd6158af3531ec003b57c4fa47282957c3cb72ea
| 3,645,773
|
from typing import Union
def _load_recipe(module, baked: bool = False) -> Union[BakedRecipe, Recipe]:
# load entry-point DAG
"""Load Queenbee plugin from Python package.
Usually you should not be using this function directly. Use ``load`` function
instead.
args:
module: Python module object for a Queenbee Recipe.
returns:
Recipe - A Queenbee recipe. It will be a baked recipe if baked is set to True.
"""
qb_info = module.__pollination__
package_name = module.__name__
main_dag_entry = qb_info.get('entry_point', None)
assert main_dag_entry, \
f'{package_name} __pollination__ info is missing the enetry_point key.'
main_dag = main_dag_entry()
# get metadata
metadata = _get_meta_data(module, 'recipe')
_dependencies = main_dag._dependencies
# create a queenbee Recipe object
# load dags
qb_dag = main_dag.queenbee
qb_dag.name = 'main'
dags = [qb_dag] + [dag.queenbee for dag in _dependencies['dag']]
# add dependencies
repo = _init_repo()
plugins = [
Dependency(
kind=DependencyKind.plugin, name=plugin['name'], tag=plugin['tag'],
source=repo.as_uri()
) for plugin in _dependencies['plugin']
]
recipes = [
Dependency(
kind=DependencyKind.recipe, name=recipe['name'], tag=recipe['tag'],
source=repo.as_uri()
) for recipe in _dependencies['recipe']
]
recipe = Recipe(metadata=metadata, dependencies=plugins + recipes, flow=dags)
if baked:
package_recipe_dependencies(recipe)
rf = RepositoryReference(
name='pollination-dsl', path='file:///' + repo.as_posix()
)
config = Config(repositories=[rf])
recipe = BakedRecipe.from_recipe(recipe=recipe, config=config)
return recipe
|
d0c400a234a777438418c4eb605723ea67509077
| 3,645,774
|
def compute_coeffs(shape, Aref, alfa):
"""Computes the lift and drag coefficients of the given shape at the given
angle of attack using the given reference area"""
alfa_vect = np.array([-np.sin(alfa),0,-np.cos(alfa)])
Fvect = np.array([0,0,0]) #Force coefficient vector
for panel in shape:
panel.alfa = np.arcsin(np.dot(alfa_vect,-panel.N)/ \
(np.linalg.norm(alfa_vect)*np.linalg.norm(panel.N)))
panel_Cpvect = (panel.A/Aref) * (2*np.sin(panel.alfa)**2) * (-panel.N/np.linalg.norm(panel.N))
Fvect = Fvect + panel_Cpvect
CN = -Fvect[0]#np.dot(Fvect,np.array([-1,0,0]))
CA = -Fvect[2]#np.dot(Fvect,np.array([0,0,-1]))
CL = CN * np.cos(alfa) - CA * np.sin(alfa)
CD = CA * np.cos(alfa) + CN * np.sin(alfa)
#return CA, CN
return CL, CD
|
48cd922a460c56961cf2ebc2a3ecfc121622fe26
| 3,645,775
|
def draw_pitch(axis, rotate=False):
"""
Plots the lines of a soccer pitch using matplotlib.
Arguments
---------
axis : matplotlib.axes._subplots.AxesSubplot
- matplotlib axis object on which to plot shot freeze frame
rotate : bool
- if set to True, pitch is horizontal,
default to False
Returns
-------
None
"""
line_width = 4
alpha = 0.5
r = 10
line_coords = [[[0, 0], [0, 120]], [[0, 80], [120, 120]],
[[80, 80], [120, 0]], [[0, 80], [0, 0]],
[[0, 80], [60, 60]], [[18, 18], [0, 18]],
[[18, 62], [18, 18]], [[62, 62], [0, 18]],
[[30, 30], [0, 6]], [[30, 50], [6, 6]], [[50, 50], [0, 6]],
[[18, 18], [120, 102]], [[18, 62], [102, 102]],
[[62, 62], [102, 120]], [[30, 30], [120, 114]],
[[30, 50], [114, 114]], [[50, 50], [120, 114]]]
if not rotate:
for lines in line_coords:
axis.plot(lines[0], lines[1], color='grey',
linewidth=line_width, alpha=alpha)
theta1 = np.linspace(0, 2*np.pi, 100)
theta2 = np.linspace(0.65, 2.47, 100)
theta3 = np.linspace(3.8, 5.6, 100)
x1 = r*np.cos(theta1) + 40
x2 = r*np.sin(theta1) + 60
x3 = r*np.cos(theta2) + 40
x4 = r*np.sin(theta2) + 12
x5 = r*np.cos(theta3) + 40
x6 = r*np.sin(theta3) + 108
axis.plot(x1, x2, color='grey', linewidth=line_width,
alpha=alpha)
axis.plot(x3, x4, color='grey', linewidth=line_width,
alpha=alpha)
axis.plot(x5, x6, color='grey', linewidth=line_width,
alpha=alpha)
else:
for lines in line_coords:
axis.plot([-(lines[1][0]-40) + 80, -(lines[1][1]-40) + 80],
[lines[0][0], lines[0][1]], color='grey',
linewidth=line_width, alpha=alpha)
theta1 = np.linspace(0, 2*np.pi, 100)
theta2 = np.linspace(5.4, 7.2, 100)
theta3 = np.linspace(2.2, 4, 100)
x1 = r*np.cos(theta1) + 60
x2 = r*np.sin(theta1) + 40
x3 = r*np.cos(theta2) + 12
x4 = r*np.sin(theta2) + 40
x5 = r*np.cos(theta3) + 108
x6 = r*np.sin(theta3) + 40
axis.plot(x1, x2, color='grey', linewidth=line_width,
alpha=alpha)
axis.plot(x3, x4, color='grey', linewidth=line_width,
alpha=alpha)
axis.plot(x5, x6, color='grey', linewidth=line_width,
alpha=alpha)
return axis
|
24a5b0de75ed70f6ce37afab8e04cf03afaa4651
| 3,645,776
|
import click
def file(filename, searchspec=None, searchtype=None, list_images=False,
sort_by=None, fields=None):
"""Examine images from a local file."""
if not list_images:
if searchtype is None or searchspec is None:
raise click.BadParameter(
'SEARCHTYPE and SEARCHSPEC must be specified when not listing '
'images')
try:
images = sources.read_images_file(filename)
except Exception as e:
abort(e)
if list_images:
return _list_images(images)
_process_images(searchtype, images, searchspec, sort_by, fields)
|
6162ac7553f04225a6cad0fe262f2cc97dad39a2
| 3,645,777
|
def clip_to_norm(array, clip):
"""Clips the examples of a 2-dimensional array to a given maximum norm.
Parameters
----------
array : np.ndarray
Array to be clipped. After clipping, all examples have a 2-norm of at most `clip`.
clip : float
Norm at which to clip each example
Returns
-------
array : np.ndarray
The clipped array.
"""
if not isinstance(array, np.ndarray):
raise TypeError(f"Input array must be a numpy array, got {type(array)}.")
if array.ndim != 2:
raise ValueError(f"input array must be 2-dimensional, got {array.ndim} dimensions.")
if not isinstance(clip, Real):
raise TypeError(f"Clip value must be numeric, got {type(clip)}.")
if clip <= 0:
raise ValueError(f"Clip value must be strictly positive, got {clip}.")
norms = np.linalg.norm(array, axis=1) / clip
norms[norms < 1] = 1
return array / norms[:, np.newaxis]
|
e7dca2cf9f129736ebc5f7909cb4fed41a4c7996
| 3,645,778
|
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
|
b8c8d6fb3ebb8784d10250a42526b31e185e9b7a
| 3,645,779
|
def _batch_sum(F, loss, batch_axis):
"""Return sum on the specified batch axis, not keeping the axis"""
if is_np_array():
axes = list(range(loss.ndim))
del axes[batch_axis]
return F.np.sum(loss, axis=axes)
else:
return F.sum(loss, axis=batch_axis, exclude=True)
|
71bad3e21c1905e81b0e40b5be08ebc0426e1ca7
| 3,645,780
|
def tree_cons(a, tree: Pytree) -> Pytree:
"""
Prepend ``a`` in all tuples of the given tree.
"""
return jax.tree_map(
lambda x: _OpaqueSequence((a,) + tuple(x)),
tree,
is_leaf=lambda x: isinstance(x, tuple),
)
|
1d82ca0a7b49d8e803fe8771f4f6b697826f5e27
| 3,645,781
|
from datetime import datetime
def add(moment: datetime) -> datetime:
"""Add one gigasecond to a given date and time."""
return moment + GIGASECOND
|
87e009e408088d6c91ceb409408608947c0b9fd3
| 3,645,782
|
import os
def get_test_hooks(test_files, cfg, tracer=None):
"""Returns a list of test hooks from a given list of test modules."""
results = []
dirs = set(map(os.path.dirname, test_files))
for dir in list(dirs):
if os.path.basename(dir) == 'ftests':
dirs.add(os.path.join(os.path.dirname(dir), 'tests'))
dirs = list(dirs)
dirs.sort()
for dir in dirs:
filename = os.path.join(dir, 'checks.py')
if os.path.exists(filename):
module = import_module(filename, cfg, tracer=tracer)
if tracer is not None:
hooks = tracer.runfunc(module.test_hooks)
else:
hooks = module.test_hooks()
results.extend(hooks)
return results
|
eafe4a15a9410b600813588280ef48188b16738c
| 3,645,783
|
def convert_to_short_log(log_level, message):
"""Convert a log message to its shorter format.
:param log_level: enum - 'LogLevel.<level>' e.g. 'LogLevel.Error'
:param message: str - log message
:return: enum - 'LogLevelInt.<value>` e.g. 'LogLevelInt.5'
"""
return f'{LogLevelInt[log_level.name].value}:{message}'
|
0d4c20ac4ec809dbb58494ef928586c95da88fbb
| 3,645,784
|
import zipfile
import io
import pathlib
import warnings
def load_map(path, callback=None, meta_override=None):
"""Load a set of zipped csv AFM workshop data
If you are recording quantitative force-maps (i.e. multiple
curves on an x-y-grid) with AFM workshop setups, then you
might have realized that you get *multiple* .csv files (one
file per indentation) instead of *one* file that contains all
the data (as you might be accustomed to from other
manufacturers). Since afmformats expects one file per
measurement, it would not be straight forward to obtain a
properly enumerated quantitative imaging group.
This function offers a workaround - it loads a zip archive
created from the the .csv files.
The files are structured like this::
Force-Distance Curve
File Format: 3
Date: Wednesday, August 1, 2018
Time: 1:07:47 PM
Mode: Mapping
Point: 16
X, um: 27.250000
Y, um: 27.250000
Extend Z-Sense(nm),Extend T-B(V),Retract Z-Sense(nm),Retract T-B(V)
13777.9288,0.6875,14167.9288,1.0917
13778.9288,0.6874,14166.9288,1.0722
13779.9288,0.6876,14165.9288,1.0693
13780.9288,0.6877,14164.9288,1.0824
13781.9288,0.6875,14163.9288,1.0989
...
Please make sure that the ``Point`` is enumerated from 1
onwards (and matches the alphanumerical order of the files in
the archive) and that ``Mode`` is ``Mapping``. The ``X`` and
``Y`` coordinates can be used by e.g. PyJibe to display QMap
data on a grid.
Parameters
----------
path: str or pathlib.Path
path to zip file containing AFM workshop .csv files
callback: callable
function for progress tracking; must accept a float in
[0, 1] as an argument.
meta_override: dict
if specified, contains key-value pairs of metadata that
are used when loading the files
(see :data:`afmformats.meta.META_FIELDS`)
"""
datasets = []
with zipfile.ZipFile(path) as arc:
names = sorted(arc.namelist())
for ii, name in enumerate(names):
with arc.open(name, "r") as fd:
tfd = io.TextIOWrapper(fd, encoding="utf-8")
dd = load_csv(
tfd,
# recurse into callback with None as default
callback=lambda x: callback((ii + x) / len(names))
if callback is not None else None,
meta_override=meta_override,
mode="mapping")
dd[0]["metadata"]["path"] = pathlib.Path(path)
cur_enum = dd[0]["metadata"]["enum"]
if cur_enum != ii + 1:
warnings.warn("Dataset 'Point' enumeration mismatch for "
f"'{name}' in '{path}' (expected {ii + 1}, "
f"got {cur_enum})!",
AFMWorkshopFormatWarning)
datasets += dd
# Populate missing grid metadata
xvals = list(set([ad["metadata"]["position x"] for ad in datasets]))
yvals = list(set([ad["metadata"]["position y"] for ad in datasets]))
mdgrid = {
"grid center x": np.mean(xvals),
"grid center y": np.mean(yvals),
"grid shape x": len(xvals),
"grid shape y": len(yvals),
# grid size in um includes boundaries of pixels
"grid size x": np.ptp(xvals)*(1 + 1/(len(xvals)-1)),
"grid size y": np.ptp(yvals)*(1 + 1/(len(yvals)-1)),
}
# Update with new metadata (note that grid index x/y is populated via
# MetaData._autocomplete_grid_metadata)
[ad["metadata"].update(mdgrid) for ad in datasets]
return datasets
|
bdf98f10a5decfc9a4dbd4b9cc90c75c7c95e76d
| 3,645,785
|
def format_len(x):
"""
>>> format_len('abc')
3
>>> format_len(('(', ('(', 'def', ')'), 'yz', ')'))
11
"""
if not isinstance(x, (list, tuple)): return len(x)
if len(x) > 3: sep_len = 2 * (len(x) - 3)
else: sep_len = 0
return sum(map(format_len, x)) + sep_len
|
723afb58bfed0cfb7fbd25a12b86b257bf8b40df
| 3,645,786
|
import os
import platform
def build_user_agent():
"""Build the charmcraft's user agent."""
if any(key.startswith(prefix) for prefix in TESTING_ENV_PREFIXES for key in os.environ.keys()):
testing = " (testing) "
else:
testing = " "
os_platform = "{0.system}/{0.release} ({0.machine})".format(utils.get_os_platform())
return "charmcraft/{}{}{} python/{}".format(
__version__, testing, os_platform, platform.python_version()
)
|
dcb036451388a900eb9f3d684167652247c720b4
| 3,645,787
|
def set_doi_ark(page_number, records_per_page, sort_on, doi_ark_value):
"""
Retrieve all metadata records for admin view. Retrieval is done
via POST because we must pass a session id so that the user is
authenticated.
Access control is done here. A user can modify only their own records
because their session_id sent with the request.
"""
username = _authenticate_admin_from_session(request)
#pageNumber is 0 based index. Need first page to start at 0 for math for setting arrayLowerBound and arrayUpperBound.
try:
if username:
if request.method == 'POST':
#need to do input sanitization on all these values! Separating variables so outside does not have direct access to
#database query.
sort_by = validate_admin_sort_by(sort_on)
record_list = Metadata.objects(__raw__={'published':'pending'}).order_by(sort_by)
arrayLowerBound = int(page_number) * int(records_per_page)
arrayUpperBound = int(page_number) * int(records_per_page) + int(records_per_page)
#Only return array elements between indicies. Don't want to return all possible values
#and overload browser with too much data. This is a version of 'pagination.'
return jsonify(dict(results=record_list[arrayLowerBound:arrayUpperBound], num_entries=(len(record_list)/int(records_per_page))))
else:
return Response('Bad or missing session id.', status=401)
except:
return Response('Bad request for records', 400)
|
2b718232463a632dc07f73c1e6e5c3299ff57f18
| 3,645,788
|
from datetime import datetime
import argparse
def check_datetime(value):
"""
Check and convert "value" to a datetime object. Value can have multiple formats,
according to the argparse.ArgumentParser doc (defined in :func:`parse_cmd_args`)
Args:
value (str): The input value
Returns:
datetime.datetime: the input value converted to a datetime object
Raises:
argparse.ArgumentTypeError
"""
# Case "now"
if value == 'now':
return datetime.datetime.now()
# Case "+hh" and "-hh"
if value.startswith('+') or value.startswith('-'):
if not value[1:].isdigit():
raise argparse.ArgumentTypeError('"%s": format admitted "[+|-]nn" (e.g +24)' % value)
hours = int(value)
return datetime.datetime.now() + datetime.timedelta(0, hours * 3600)
# Case "%y/%m/%d-%H:%M"
try:
return datetime.datetime.strptime(value, '%y/%m/%d-%H:%M')
except ValueError:
pass
# Case "%y/%m/%d"
try:
return datetime.datetime.strptime(value, '%y/%m/%d')
except ValueError:
pass
raise argparse.ArgumentTypeError(
'"%s": not a valid format (admitted: "now", "[+|-]hh" (e.g. "+24" or "-4") or "yy/mm/dd[-HH:MM]")' % value)
|
7b85a85478f79f6e8ebb2a8663fd8d4860f16d18
| 3,645,789
|
def empiriline(x,p,L):
"""
Use the line L (which is an EmissionLine object) as a template.
The line is shifted, then interpolated, then rescaled, and allowed
to float.
"""
xnew = x - p[1]
yout = sp.zeros(len(xnew))
m = (xnew >= L.wv.min())*(xnew <= L.wv.max() )
ynew,znew = L.interp(xnew[m])
yout[m] = p[0]*ynew + p[2]
return yout
|
a0199a4623e834524711d0e78787d409da29d3ef
| 3,645,790
|
def _get_z_slice_fn(z, data_dir):
"""Get array slice map to be applied to z dimension
Args:
z: String or 1-based index selector for z indexes constructed as any of the following:
- "best": Indicates that z slices should be inferred based on focal quality
- "all": Indicates that a slice for all z-planes should be used
- str or int: A single value will be interpreted as a single index
- tuple: A 2-item or 3-item tuple forming the slice (start, stop[, step]); stop is inclusive
- list: A list of integers will be used as is
data_dir: Data directory necessary to infer 'best' z planes
Returns:
A function with signature (region_index, tile_x, tile_y) -> slice_for_array where slice_for_array
will either be a slice instance or a list of z-indexes (Note: all indexes are 0-based)
"""
if not z:
raise ValueError('Z slice cannot be defined as empty value (given = {})'.format(z))
# Look for keyword strings
if isinstance(z, str) and z == 'best':
map = function_data.get_best_focus_coord_map(data_dir)
return lambda ri, tx, ty: [map[(ri, tx, ty)]]
if isinstance(z, str) and z == 'all':
return lambda ri, tx, ty: slice(None)
# Parse argument as 1-based index list and then convert to 0-based
zi = cli.resolve_index_list_arg(z, zero_based=True)
return lambda ri, tx, ty: zi
|
1982258bb98205fe4552de4db8b4e049e09af4bf
| 3,645,791
|
def bar_data_wrapper(func):
"""Standardizes column names for any bar data"""
def wrapper(*args, **kwargs):
assert Ticker(args[0])
res: pd.DataFrame = func(*args, **kwargs)
return res.rename(columns=COL_NAMES).iterrows()
return wrapper
|
4cfa9614ec430ba53ac3e86dc68c6a84ee3cfbee
| 3,645,792
|
import torch
def rgb_to_grayscale(
image: Tensor, rgb_weights: list[float] = [0.299, 0.587, 0.114]
) -> Tensor:
"""Convert an RGB image to grayscale version of image. Image data is
assumed to be in the range of [0.0, 1.0].
Args:
image (Tensor[B, 3, H, W]):
RGB image to be converted to grayscale.
rgb_weights (list[float]):
Weights that will be applied on each channel (RGB). Sum of the
weights should add up to one.
Returns:
grayscale (Tensor[B, 1, H, W]):
Grayscale version of the image.
"""
rgb_weights = torch.FloatTensor(rgb_weights)
if not isinstance(rgb_weights, Tensor):
raise TypeError(f"`rgb_weights` must be a `Tensor`. "
f"But got: {type(rgb_weights)}.")
if rgb_weights.shape[-1] != 3:
raise ValueError(f"`rgb_weights` must have a shape of [*, 3]. "
f"But got: {rgb_weights.shape}.")
r = image[..., 0:1, :, :]
g = image[..., 1:2, :, :]
b = image[..., 2:3, :, :]
if not torch.is_floating_point(image) and (image.dtype != rgb_weights.dtype):
raise ValueError(f"`image` and `rgb_weights` must have the same dtype. "
f"But got: {image.dtype} and {rgb_weights.dtype}.")
w_r, w_g, w_b = rgb_weights.to(image).unbind()
return w_r * r + w_g * g + w_b * b
|
d135a92e7189a745b2d9658b2b60a91c238fdbf8
| 3,645,793
|
from typing import Dict
from typing import Union
from datetime import datetime
import logging
def parse(msg: str) -> Dict[str, Union[str, int, float, bool, datetime]]:
"""Parse message from the feed output by dump1090 on port 30003
A dict is returned withAn SBS-1 message has the following attributes:
messageType : string
transmissionType : sbs1.TransmissionType
sessionID : int
aircraftID : int
icao24 : string
flightID : int
generatedDate : datetime
loggedDate : datetime
callsign : string
altitude : int
groundSpeed : int
track : int
lat : float
lon : float
verticalRate : int
squawk : int
alert : bool
emergency : bool
spi : bool
onGround : bool
None is returned if the message was not valid
A field not present in the parsed message will be set to None. For a
description of the attributes, please see github.com/wiseman/node-sbs1
"""
if msg is None:
return None
sbs1 = {}
parts = msg.lstrip().rstrip().split(',')
try:
# logging.debug("%s %s %s" % (parts[1], parts[4], ",".join(parts[10:])))
sbs1["messageType"] = __parseString(parts, 0)
if sbs1["messageType"] != "MSG":
return None
sbs1["transmissionType"] = __parseInt(parts, 1)
sbs1["sessionID"] = __parseString(parts, 2)
sbs1["aircraftID"] = __parseString(parts, 3)
sbs1["icao24"] = __parseString(parts, 4)
sbs1["flightID"] = __parseString(parts, 5)
sbs1["generatedDate"] = __parseDateTime(parts, 6, 7)
sbs1["loggedDate"] = __parseDateTime(parts, 8, 9)
sbs1["callsign"] = __parseString(parts, 10)
if sbs1["callsign"]:
sbs1["callsign"] = sbs1["callsign"].rstrip()
sbs1["altitude"] = __parseInt(parts, 11)
sbs1["groundSpeed"] = __parseFloat(parts, 12)
sbs1["track"] = __parseFloat(parts, 13)
sbs1["lat"] = __parseFloat(parts, 14)
sbs1["lon"] = __parseFloat(parts, 15)
sbs1["verticalRate"] = __parseInt(parts, 16)
sbs1["squawk"] = __parseInt(parts, 17)
sbs1["alert"] = __parseBool(parts, 18)
sbs1["emergency"] = __parseBool(parts, 19)
sbs1["spi"] = __parseBool(parts, 20)
sbs1["onGround"] = __parseBool(parts, 21)
except IndexError as e:
logging.error("Failed to init sbs1 message from '%s'" % (msg), exc_info=True)
return None
return sbs1
|
9fd7d3b15eb08ebf11d77ac98a3ed215c9409cdd
| 3,645,794
|
def _aware_to_agnostic(fr: NDFrame) -> NDFrame:
"""Recalculate values in tz-aware series or dataframe, to get a tz-agnostic one.
(i.e., A to B)."""
if not fr.index.tz:
raise ValueError("``fr`` must be tz-aware.")
idx_out = _idx_after_conversion(fr, None)
# Convert daily or longer.
if stamps.freq_shortest(idx_out.freq, "D") == "D":
# One-to-one correspondence between the timestamps in input and ouput frames.
# --> Simply replace the index.
return fr.set_axis(idx_out)
# Convert hourly or shorter.
# There are timestamps in the output that do not exist in the input. In that case,
# repeat the value of the previous hour.
partly = fr.tz_localize(None)
partly = partly[~partly.index.duplicated()] # remove duplicates
def value(ts): # Take value of prev hour if current time not found in the input.
try:
return partly.loc[ts]
except KeyError:
return partly.loc[ts - pd.Timedelta(hours=1)]
return fr.__class__([value(ts) for ts in idx_out], index=idx_out)
|
c97b190828d5364033336b2dcc48d352aafe1132
| 3,645,795
|
import math
def cumulative_prob_to_value(prob, hp):
"""Convert a value from [0, 1] to a hyperparameter value."""
if isinstance(hp, Fixed):
return hp.value
elif isinstance(hp, Boolean):
return bool(prob >= 0.5)
elif isinstance(hp, Choice):
ele_prob = 1 / len(hp.values)
index = math.floor(prob / ele_prob)
# Can happen when `prob` is very close to 1.
if index == len(hp.values):
index = index - 1
return hp.values[index]
elif isinstance(hp, (Int, Float)):
sampling = hp.sampling or 'linear'
if sampling == 'linear':
value = prob * (hp.max_value - hp.min_value) + hp.min_value
elif sampling == 'log':
value = hp.min_value * math.pow(hp.max_value / hp.min_value, prob)
elif sampling == 'reverse_log':
value = (hp.max_value + hp.min_value -
hp.min_value * math.pow(hp.max_value / hp.min_value, 1 - prob))
else:
raise ValueError('Unrecognized sampling value: {}'.format(sampling))
if hp.step is not None:
values = np.arange(hp.min_value, hp.max_value + 1e-7, step=hp.step)
closest_index = np.abs(values - value).argmin()
value = values[closest_index]
if isinstance(hp, Int):
return int(value)
return value
else:
raise ValueError('Unrecognized HyperParameter type: {}'.format(hp))
|
d66e69f4cb580f8d3ce3004c082239717fd2854a
| 3,645,796
|
import podpac
import podpac.datalib # May not be imported by default
import inspect
def get_ui_node_spec(module=None, category="default"):
"""
Returns a dictionary describing the specifications for each Node in a module.
Parameters
-----------
module: module
The Python module for which the ui specs should be summarized. Only the top-level
classes will be included in the spec. (i.e. no recursive search through submodules)
category: str, optional
Default is "default". Top-level category name for the group of Nodes.
Returns
--------
dict
Dictionary of {category: {Node1: spec_1, Node2: spec2, ...}} describing the specs for each Node.
"""
spec = {}
def get_ui_spec(cls):
filter = []
spec = {"help": cls.__doc__, "module": cls.__module__ + "." + cls.__name__, "attrs": {}}
for attr in dir(cls):
if attr in filter:
continue
attrt = getattr(cls, attr)
if not isinstance(attrt, tl.TraitType):
continue
if "attr" not in attrt.metadata:
continue
type_ = attrt.__class__.__name__
type_extra = str(attrt)
if type_ == "Union":
type_ = [t.__class__.__name__ for t in attrt.trait_types]
type_extra = "Union"
elif type_ == "Instance":
type_ = attrt.klass.__name__
type_extra = attrt.klass
default_val = attrt.default()
if not isinstance(type_extra, str):
type_extra = str(type_extra)
try:
if np.isnan(default_val):
default_val = 'nan'
except:
pass
if default_val == tl.Undefined:
default_val = None
spec["attrs"][attr] = {
"type": type_,
"type_str": type_extra, # May remove this if not needed
"values": getattr(attrt, "values", None),
"default": default_val,
"help": attrt.help,
}
spec.update(getattr(cls, "_ui_spec", {}))
return spec
if module is None:
modcat = zip(
[podpac.data, podpac.algorithm, podpac.compositor, podpac.datalib],
["data", "algorithms", "compositors", "datalib"],
)
for mod, cat in modcat:
spec.update(get_ui_node_spec(mod, cat))
return spec
spec[category] = {}
for obj in dir(module):
ob = getattr(module, obj)
if not inspect.isclass(ob):
continue
if not issubclass(ob, podpac.Node):
continue
spec[category][obj] = get_ui_spec(ob)
return spec
|
17978a9ebb50696990f601f449a0539bcf67c3dd
| 3,645,797
|
def parse_branch_name(branch_name):
"""Split up a branch name of the form 'ocm-X.Y[-mce-M.N].
:param branch_name: A branch name. If of the form [remote/]ocm-X.Y[-mce-M.N] we will parse
it as noted below; otherwise the first return will be False.
:return parsed (bool): True if the branch_name was parseable; False otherwise.
:return remote (str): If parsed and the branch_name contained a remote/ prefix, it is
returned here; otherwise this is the empty string.
:return prefix (str): Two-digit semver prefix of the bundle to be generated. If the branch
name is of the form [remote/]ocm-X.Y, this will be X.Y; if of the form
[remote/]ocm-X.Y-mce-M.N it will be M.N. If not parseable, it will be the empty string.
:return channel (str): The name of the channel in which we'll include the bundle. If the
branch name is of the form [remote/]ocm-X.Y, this will be ocm-X.Y; if of the form
[remote/]ocm-X.Y-mce-M.N it will be mce-M.N. If not parseable, it will be the empty
string.
"""
m = MCE_BRANCH_RE.match(branch_name)
if m:
return True, m.group(1), m.group(2), m.group(3)
m = OCM_BRANCH_RE.match(branch_name)
if m:
return True, m.group(1), m.group(3), m.group(2)
return False, '', '', ''
|
2cb53aef0754c815dd61d4a1b640e12db64fbf83
| 3,645,798
|
import itertools
def symmetric_padding(
arr,
width):
"""
Pad an array using symmetric values.
This is equivalent to `np.pad(mode='symmetric')`, but should be faster.
Also, the `width` parameter is interpreted in a more general way.
Args:
arr (np.ndarray): The input array.
width (int|float|Iterable[int|float]): Size of the padding to use.
This is used with `flyingcircus.base.multi_scale_to_int()`.
The shape of the array is used for the scales.
Returns:
result (np.ndarray): The padded array.
Examples:
>>> arr = arange_nd((2, 3)) + 1
>>> print(arr)
[[1 2 3]
[4 5 6]]
>>> new_arr = symmetric_padding(arr, (1, 2))
>>> print(new_arr)
[[2 1 1 2 3 3 2]
[2 1 1 2 3 3 2]
[5 4 4 5 6 6 5]
[5 4 4 5 6 6 5]]
>>> new_arr = symmetric_padding(arr, ((0, 1), 2))
>>> print(new_arr)
[[2 1 1 2 3 3 2]
[5 4 4 5 6 6 5]
[5 4 4 5 6 6 5]]
>>> new_arr = symmetric_padding(arr, ((1, 0), 2))
>>> print(new_arr)
[[2 1 1 2 3 3 2]
[2 1 1 2 3 3 2]
[5 4 4 5 6 6 5]]
>>> new_arr = symmetric_padding(arr, ((0, 1.0),))
>>> print(new_arr)
[[1 2 3 3 2 1]
[4 5 6 6 5 4]
[4 5 6 6 5 4]
[1 2 3 3 2 1]]
>>> arr = arange_nd((5, 7, 11)) + 1
>>> np.all(symmetric_padding(arr, 17) == np.pad(arr, 17, 'symmetric'))
True
"""
width = fc.base.multi_scale_to_int(width, arr.shape)
if any(any(size for size in sizes) for sizes in width):
shape = tuple(
low + dim + up for dim, (low, up) in zip(arr.shape, width))
result = np.zeros(shape, dtype=arr.dtype)
target_slices = tuple(
tuple(
slice(
max((i - (1 if low % dim else 0)) * dim + low % dim, 0),
min((i + 1 - (1 if low % dim else 0)) * dim + low % dim,
low + dim + up))
for i in range(
fc.base.div_ceil(low, dim) + fc.base.div_ceil(up,
dim) + 1))
for dim, (low, up) in zip(arr.shape, width))
len_target_slices = tuple(len(items) for items in target_slices)
parities = tuple(
fc.base.div_ceil(low, dim) % 2
for dim, (low, up) in zip(arr.shape, width))
for i, target_slicing in enumerate(itertools.product(*target_slices)):
ij = np.unravel_index(i, len_target_slices)
source_slicing = []
for idx, target_slice, parity, dim in \
zip(ij, target_slicing, parities, arr.shape):
step = 1 if idx % 2 == parity else -1
start = stop = None
span = target_slice.stop - target_slice.start
if span != dim:
if target_slice.start == 0:
start = \
(dim - span) if idx % 2 == parity else (span - 1)
else:
stop = \
span if idx % 2 == parity else (dim - span - 1)
source_slicing.append(slice(start, stop, step))
source_slicing = tuple(source_slicing)
result[target_slicing] = arr[source_slicing]
else:
result = arr
return result
|
dd91b4f3641332ecfffa734584fd87293c7169ee
| 3,645,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.