hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e4c4a1a9b4555cbf8478e7bf4e73c7328720aa90
| 530
|
py
|
Python
|
server/server.py
|
straayke/ClassReport
|
5c4fbb0422142c4784a6eba99bbebe8418444898
|
[
"MIT"
] | 7
|
2018-11-25T12:47:04.000Z
|
2019-08-02T14:14:54.000Z
|
server/server.py
|
straayke/ClassReport
|
5c4fbb0422142c4784a6eba99bbebe8418444898
|
[
"MIT"
] | 3
|
2020-09-06T18:23:54.000Z
|
2022-02-12T18:13:28.000Z
|
server/server.py
|
straayke/ClassReport
|
5c4fbb0422142c4784a6eba99bbebe8418444898
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
from flask import jsonify
import os
from openpose import nb_raised
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/hand-raised', methods=['POST'])
def hand_raised():
file = request.files["file"]
file.save(os.path.join("uploaded.jpg"))
ret, people, points = nb_raised()
hands = {"count":ret, "skeletonCount":people, "positions":points}
response = jsonify(hands)
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 23.043478
| 69
| 0.69434
|
24f38975b2b1d1be7c6b11bf38071662038084ed
| 110
|
py
|
Python
|
comb/mq/mysql.py
|
nextoa/comb
|
9bddd6c7366bd71b06d0ad7c28188abec8a874b0
|
[
"MIT"
] | null | null | null |
comb/mq/mysql.py
|
nextoa/comb
|
9bddd6c7366bd71b06d0ad7c28188abec8a874b0
|
[
"MIT"
] | 2
|
2015-06-30T10:59:58.000Z
|
2016-01-14T07:15:15.000Z
|
comb/mq/mysql.py
|
kbonez/comb
|
9bddd6c7366bd71b06d0ad7c28188abec8a874b0
|
[
"MIT"
] | 1
|
2019-11-09T20:34:54.000Z
|
2019-11-09T20:34:54.000Z
|
# -*- coding: utf-8 -*-
def token(handle):
# @todo
pass
def release(handle):
# @todo
pass
| 9.166667
| 23
| 0.509091
|
22d9e7f02b2ae6267e6489c7aae8ca82ff5ab691
| 2,146
|
py
|
Python
|
fcoclient/api.py
|
b-77/cloudify-flexiant-plugin
|
72b8bd98995331972a404d4e22c2df415d9d9e9e
|
[
"Apache-2.0"
] | null | null | null |
fcoclient/api.py
|
b-77/cloudify-flexiant-plugin
|
72b8bd98995331972a404d4e22c2df415d9d9e9e
|
[
"Apache-2.0"
] | null | null | null |
fcoclient/api.py
|
b-77/cloudify-flexiant-plugin
|
72b8bd98995331972a404d4e22c2df415d9d9e9e
|
[
"Apache-2.0"
] | null | null | null |
# coding=UTF-8
"""Abstraction of FCO API in the form of a Python wrapper."""
import fcoclient.clients as clients
import fcoclient.exceptions as exceptions
import resttypes.endpoints as endpoints
import json
class REST(object):
"""FCO REST API Interface."""
def __init__(self, auth, logger):
"""Initialise FCP API Interface."""
self.client = clients.get_client(auth, logger=logger)
self.logger = logger
self.logger.debug('REST API initialised with auth: %s', auth)
def __getattr__(self, item):
"""Get relevant Endpoint object when accessed."""
def wrapper(*args, **kwargs):
self.logger.debug('REST API endpoint request: %s', item)
return self.query(item, *args, **kwargs)
return wrapper
def query(self, endpoint, parameters=None, data=None, validate=False,
**kwargs):
"""Perform an API query to the given endpoint."""
endpoint = endpoint[0].capitalize() + endpoint[1:]
endpoint = getattr(endpoints, endpoint)(parameters, data, **kwargs)
type_, url = endpoint.endpoint
self.logger.info('%s', endpoint)
payload = endpoint.untype()
self.logger.info('%s', payload)
if not len(payload):
payload = None
self.logger.debug('REST API generated endpoint:\nTYPE: %s\nURL: %s\n'
'DATA: %s', type_, url, payload)
if type_ is endpoints.Verbs.PUT:
fn = self.client.put
elif type_ is endpoints.Verbs.GET:
fn = self.client.get
elif type_ is endpoints.Verbs.POST:
fn = self.client.post
if payload:
payload = json.JSONEncoder().encode(payload)
elif type_ is endpoints.Verbs.DELETE:
fn = self.client.delete
else:
raise exceptions.NonRecoverableError('unsupported API verb')
rv = fn(url, payload)
self.logger.debug('REST API return value: %s', rv)
if validate:
return rv, endpoint.validate_return(rv)
else:
return endpoint.RETURNS.items()[0][1](rv)
| 32.515152
| 77
| 0.603448
|
ab9e1413030083c82cb05a882dd3b761c0a7b44a
| 517
|
py
|
Python
|
discordbot.py
|
shiitake08/discordpy-startup
|
becd187061d15b2fc8b9dc12f3e6fbb9e85ab8bd
|
[
"MIT"
] | null | null | null |
discordbot.py
|
shiitake08/discordpy-startup
|
becd187061d15b2fc8b9dc12f3e6fbb9e85ab8bd
|
[
"MIT"
] | null | null | null |
discordbot.py
|
shiitake08/discordpy-startup
|
becd187061d15b2fc8b9dc12f3e6fbb9e85ab8bd
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
import os
import traceback
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
@bot.command()
async def ping(ctx):
await ctx.send('pong')
@bot.command()
async def kamaneko(ctx):
await ctx.send('boko')
bot.run(token)
| 20.68
| 89
| 0.72147
|
d281f4fa442827272cc456b77af21d3bcc763f22
| 2,769
|
py
|
Python
|
examples/src/main/python/ml/simple_text_classification_pipeline.py
|
liuhb86/spark
|
18f2098433e0bfef9497bacd601fdf098ed03eab
|
[
"Apache-2.0"
] | 24
|
2017-10-11T02:59:45.000Z
|
2021-12-06T05:01:13.000Z
|
examples/src/main/python/ml/simple_text_classification_pipeline.py
|
liuhb86/spark
|
18f2098433e0bfef9497bacd601fdf098ed03eab
|
[
"Apache-2.0"
] | 1
|
2022-03-21T18:44:10.000Z
|
2022-03-21T18:44:10.000Z
|
examples/src/main/python/ml/simple_text_classification_pipeline.py
|
liuhb86/spark
|
18f2098433e0bfef9497bacd601fdf098ed03eab
|
[
"Apache-2.0"
] | 18
|
2017-10-26T14:16:10.000Z
|
2022-03-30T02:18:14.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import SparkContext
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import Row, SQLContext
"""
A simple text classification pipeline that recognizes "spark" from
input text. This is to show how to create and configure a Spark ML
pipeline in Python. Run with:
bin/spark-submit examples/src/main/python/ml/simple_text_classification_pipeline.py
"""
if __name__ == "__main__":
sc = SparkContext(appName="SimpleTextClassificationPipeline")
sqlCtx = SQLContext(sc)
# Prepare training documents, which are labeled.
LabeledDocument = Row("id", "text", "label")
training = sc.parallelize([(0L, "a b c d e spark", 1.0),
(1L, "b d", 0.0),
(2L, "spark f g h", 1.0),
(3L, "hadoop mapreduce", 0.0)]) \
.map(lambda x: LabeledDocument(*x)).toDF()
# Configure an ML pipeline, which consists of tree stages: tokenizer, hashingTF, and lr.
tokenizer = Tokenizer(inputCol="text", outputCol="words")
hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features")
lr = LogisticRegression(maxIter=10, regParam=0.01)
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
# Fit the pipeline to training documents.
model = pipeline.fit(training)
# Prepare test documents, which are unlabeled.
Document = Row("id", "text")
test = sc.parallelize([(4L, "spark i j k"),
(5L, "l m n"),
(6L, "mapreduce spark"),
(7L, "apache hadoop")]) \
.map(lambda x: Document(*x)).toDF()
# Make predictions on test documents and print columns of interest.
prediction = model.transform(test)
selected = prediction.select("id", "text", "prediction")
for row in selected.collect():
print row
sc.stop()
| 39.557143
| 92
| 0.675334
|
128461083691a6de3b31088e462523e87f8699e6
| 1,567
|
py
|
Python
|
confusion_test.py
|
cocoaaa/ml_gesture
|
a23dd7b9d13bbcb5a1ee049a7f1b026f81a4ba9d
|
[
"MIT"
] | null | null | null |
confusion_test.py
|
cocoaaa/ml_gesture
|
a23dd7b9d13bbcb5a1ee049a7f1b026f81a4ba9d
|
[
"MIT"
] | null | null | null |
confusion_test.py
|
cocoaaa/ml_gesture
|
a23dd7b9d13bbcb5a1ee049a7f1b026f81a4ba9d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 10:12:44 2015
Confusion matrix studies on the Iris data
@author: LLP-admin
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
from sklearn import datasets
df =datasets.load_iris()
data_x = pd.DataFrame( df.data, columns = df.feature_names )
f0 = data_x[ data_x.columns[[0]] ]; f1 = data_x [data_x.columns[1]]
f2 = data_x[ data_x.columns[2] ]
data_y = df.target
x_min = np.min(f0) - 0.5; x_max = np.max(f0) + 0.5;
y_min = np.min(f1) - 0.5; y_max = np.max(f1) + 0.5;
plt.figure(0,figsize = (8,6));
plt.clf()
x = np.arange(0,11,step = 0.1);
y = [el**2 for el in x]
plt.scatter(f0, f1, c = data_y, cmap = plt.cm.Paired)
plt.xlim = [x_min, x_max]; plt.xlabel('sepal length (cm)')
plt.ylim = [y_min, y_max]; plt.ylabel('sepal width (cm) ')
##3d
fig1 = plt.figure(1, figsize = (8,6));
ax1 = Axes3D(fig1)
ax1.scatter(f0, f1, f2, c = data_y, cmap = plt.cm.Paired)
ax1.set_xlabel (data_x.columns[0]); ax1.set_ylabel (data_x.columns[1]); ax1.set_zlabel(data_x.columns[2]);
ax1.set_title('raw data plotted in 3D')
##pca
PCA_data_x = PCA(n_components = 3).fit_transform(data_x)
pc0 = PCA_data_x[:, 0]; pc1 = PCA_data_x[:, 1]; pc2 = PCA_data_x[:, 2];
fig2 = plt.figure(2, figsize = (8,6));
ax2 = Axes3D(fig2);
ax2.scatter(pc0, pc1, pc2, c = data_y, cmap = plt.cm.Paired);
ax2.set_title("First three Principle components");
ax2.set_xlabel('first pc'); ax2.set_ylabel("second pc"); ax2.set_zlabel("third pc")
plt.show()
| 29.566038
| 106
| 0.681557
|
b6ca608f06b523d93f3ac2201c9bf0481993de52
| 4,321
|
py
|
Python
|
examples/image/resnet50.py
|
vsl9/NeMo
|
4137c2b4e3cba0ec5ca1da7b58b3ff97fdb25e50
|
[
"Apache-2.0"
] | 2
|
2021-03-04T16:37:46.000Z
|
2021-03-04T16:40:22.000Z
|
examples/image/resnet50.py
|
vsl9/NeMo
|
4137c2b4e3cba0ec5ca1da7b58b3ff97fdb25e50
|
[
"Apache-2.0"
] | null | null | null |
examples/image/resnet50.py
|
vsl9/NeMo
|
4137c2b4e3cba0ec5ca1da7b58b3ff97fdb25e50
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 NVIDIA Corporation
import argparse
import os
import sys
from tensorboardX import SummaryWriter
import nemo
from nemo.backends.pytorch.torchvision.helpers import compute_accuracy, eval_epochs_done_callback, eval_iter_callback
from nemo.utils.lr_policies import SquareAnnealing
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
parser = argparse.ArgumentParser(description='ResNet50 on ImageNet')
parser.add_argument("--local_rank", default=None, type=int)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--num_epochs", default=100, type=int)
parser.add_argument("--max_steps", default=None, type=int)
parser.add_argument("--learning_rate", default=0.1, type=float)
parser.add_argument("--weight_decay", default=0.0001, type=float)
parser.add_argument("--momentum", default=0.91, type=float)
parser.add_argument("--num_gpus", default=1, type=int)
parser.add_argument("--data_root", default=None, type=str)
parser.add_argument("--tb_folder", default=None, type=str)
args = parser.parse_args()
if args.local_rank is not None:
device = nemo.core.DeviceType.AllGpu
else:
device = nemo.core.DeviceType.GPU
batch_size = args.batch_size
num_epochs = args.num_epochs
learning_rate = args.learning_rate
max_steps = args.max_steps
weight_decay = args.weight_decay
momentum = args.momentum
num_gpus = args.num_gpus
if args.tb_folder is None:
tb_folder = 'resnet50_fp32'
else:
tb_folder = args.tb_folder
tb_writer = SummaryWriter(tb_folder)
# instantiate Neural Factory with supported backend
neural_factory = nemo.core.NeuralModuleFactory(
backend=nemo.core.Backend.PyTorch,
local_rank=args.local_rank,
# Set this to nemo.core.Optimization.mxprO1
# if you have Volta or Turing GPU
optimization_level=nemo.core.Optimization.mxprO0,
)
resnet = neural_factory.get_module(
name="resnet50", params={"placement": device}, collection="torchvision", pretrained=False,
)
dl_train = neural_factory.get_module(
name="ImageFolderDataLayer",
collection="torchvision",
params={
"batch_size": batch_size,
"input_size": resnet.inputs["x"].axis2type[2].dim,
"shuffle": True,
"path": args.data_root + "train",
# "path": "/mnt/D1/Data/ImageNet/ImageFolder/train",
"placement": device,
},
)
L_train = neural_factory.get_module(name="CrossEntropyLoss", collection="toys", params={"placement": device})
dl_eval = neural_factory.get_module(
name="ImageFolderDataLayer",
collection="torchvision",
params={
"batch_size": batch_size,
"input_size": resnet.inputs["x"].axis2type[2].dim,
"shuffle": False,
"is_eval": True,
"path": args.data_root + "val",
# "path": "/mnt/D1/Data/ImageNet/ImageFolder/val",
# "path": "/raid/okuchaiev/Data/ImageNet/ImageFolder/val",
"placement": device,
},
)
L_eval = neural_factory.get_module(name="CrossEntropyLoss", collection="toys", params={"placement": device})
step_per_epoch = int(len(dl_train) / (batch_size * num_gpus))
images, labels = dl_train()
outputs = resnet(x=images)
train_loss = L_train(predictions=outputs, labels=labels)
e_images, e_labels = dl_eval()
e_outputs = resnet(x=e_images)
e_loss = L_eval(predictions=e_outputs, labels=e_labels)
callback = nemo.core.SimpleLossLoggerCallback(
step_freq=50,
tb_writer=tb_writer,
tensor_list2str=lambda x: str(x[0].item()),
tensor_list2str_evl=lambda x: compute_accuracy(x),
)
callback_eval = nemo.core.EvaluatorCallback(
eval_tensors=[e_loss, e_outputs, e_labels],
user_iter_callback=eval_iter_callback,
user_epochs_done_callback=eval_epochs_done_callback,
eval_step=10000,
tb_writer=tb_writer,
)
# Instantiate an optimizer to perform `train` action
optimizer = neural_factory.get_trainer(
params={
"optimization_params": {
"num_epochs": num_epochs,
"lr": learning_rate,
"max_steps": max_steps,
"weight_decay": weight_decay,
"momentum": momentum,
}
}
)
optimizer.train(
tensors_to_optimize=[train_loss],
tensors_to_evaluate=[outputs, labels],
callbacks=[callback, callback_eval],
lr_policy=SquareAnnealing(num_epochs * step_per_epoch),
)
| 31.540146
| 117
| 0.722055
|
87ae0ffa42dc7530b60e89e4df7abeed9e43a735
| 821
|
py
|
Python
|
pytorch-SAC/Hyperparameters.py
|
Fable67/Soft-Actor-Critic-Pytorch
|
034c5cc37904f568773cdf3c25caf1a5d28a6cee
|
[
"MIT"
] | 4
|
2019-05-08T23:18:26.000Z
|
2019-09-05T19:59:47.000Z
|
pytorch-SAC/Hyperparameters.py
|
Fable67/Soft-Actor-Critic-Pytorch
|
034c5cc37904f568773cdf3c25caf1a5d28a6cee
|
[
"MIT"
] | null | null | null |
pytorch-SAC/Hyperparameters.py
|
Fable67/Soft-Actor-Critic-Pytorch
|
034c5cc37904f568773cdf3c25caf1a5d28a6cee
|
[
"MIT"
] | 1
|
2022-01-19T06:47:56.000Z
|
2022-01-19T06:47:56.000Z
|
import torch
from ReplayBuffer import ReplayBuffer
from CombinedReplayBuffer import CombinedReplayBuffer
import torch.optim as optim
from ranger import Ranger
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
POLICY_LEARNING_RATE = 3e-4
Q_LEARNING_RATE = 3e-4
ALPHA_LEARNING_RATE = 3e-4
POLICY_OPTIM = optim.Adam # Ranger
Q_OPTIM = optim.Adam # Ranger
ALPHA_OPTIM = optim.Adam # Ranger
GAMMA = 0.99
TAU = 0.005
LOGSTD_MIN = -20
LOGSTD_MAX = 2
INITIAL_REPLAY_SIZE = 10000
REPLAY_SIZE = 1000000
REPLAY_BUFFER = ReplayBuffer
HIDDEN_SIZE = 256
BATCH_SIZE = 256
NUM_ITERATIONS = 10000000
EVAL_FREQ = 5000
NUM_EVAL_GAMES = 10
SUMMARY_FREQ = 1000
SAVE_FREQ = 500000
MAX_STEPS = 1000
NUM_TRAINS_PER_TRAIN_LOOP = 1000
NUM_EXPL_STEPS_PER_TRAIN_LOOP = 1000
MUNCHAUSEN = False
M_ALPHA = 0.9
M_TAU = 0.03
M_L0 = -1
| 20.02439
| 55
| 0.784409
|
eecd58e988f0a937e5c4cc571a3f68a1d15835b6
| 2,057
|
py
|
Python
|
src/connectedvmware/azext_connectedvmware/vendored_sdks/models/tracked_resource.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/connectedvmware/azext_connectedvmware/vendored_sdks/models/tracked_resource.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/connectedvmware/azext_connectedvmware/vendored_sdks/models/tracked_resource.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top
level resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. E.g.
"Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(self, **kwargs):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs.get('location', None)
| 36.087719
| 139
| 0.601361
|
5a42bc515555091b62a329de7b2e4547c65b0aee
| 703
|
py
|
Python
|
server/catalog/management/commands/update_mapping.py
|
icapora/django-elasticsearch
|
4ae00c84b66a927c33eaffcdb86fedb0e100728d
|
[
"MIT"
] | null | null | null |
server/catalog/management/commands/update_mapping.py
|
icapora/django-elasticsearch
|
4ae00c84b66a927c33eaffcdb86fedb0e100728d
|
[
"MIT"
] | null | null | null |
server/catalog/management/commands/update_mapping.py
|
icapora/django-elasticsearch
|
4ae00c84b66a927c33eaffcdb86fedb0e100728d
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand, CommandError
from elasticsearch_dsl import connections
from catalog.constants import ES_INDEX, ES_MAPPING
class Command(BaseCommand):
help = 'Updates a mapping on an Elasticsearch index.'
def handle(self, *args, **kwargs):
self.stdout.write(f'Updating mapping on "{ES_INDEX}" index...')
connection = connections.get_connection()
if connection.indices.exists(index=ES_INDEX):
connection.indices.put_mapping(index=ES_INDEX, body=ES_MAPPING)
self.stdout.write(f'Updated mapping on "{ES_INDEX}" successfully')
else:
raise CommandError(f'Index "{ES_INDEX}" does not exist')
| 37
| 78
| 0.709815
|
76a8732468efb01e91b3adb0789286ba6392b6ad
| 6,281
|
py
|
Python
|
tests/test_dpkg_evironment.py
|
adelton/swidGenerator
|
0d0941d261925cc01638e88df748c0f2c4395131
|
[
"MIT"
] | 13
|
2015-10-16T18:28:32.000Z
|
2021-08-29T09:36:08.000Z
|
tests/test_dpkg_evironment.py
|
adelton/swidGenerator
|
0d0941d261925cc01638e88df748c0f2c4395131
|
[
"MIT"
] | 11
|
2018-07-03T13:34:08.000Z
|
2019-04-10T10:29:29.000Z
|
tests/test_dpkg_evironment.py
|
adelton/swidGenerator
|
0d0941d261925cc01638e88df748c0f2c4395131
|
[
"MIT"
] | 12
|
2017-02-22T14:51:10.000Z
|
2022-03-23T16:55:20.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import unittest
from swid_generator.command_manager import CommandManager
from tests.fixtures.command_manager_mock import CommandManagerMock
from swid_generator.environments.dpkg_environment import DpkgEnvironment
from swid_generator.package_info import PackageInfo, FileInfo
from swid_generator.environments.common import CommonEnvironment
from mock import patch
class DpkgEnvironmentTests(unittest.TestCase):
def setUp(self):
self.command_manager_run_check_output_patch = patch.object(CommandManager, 'run_command_check_output')
self.command_manager_run_command_patch = patch.object(CommandManager, 'run_command')
self.common_environment_is_file_patch = patch.object(CommonEnvironment, '_is_file')
self.os_path_getsize_patch = patch.object(os.path, 'getsize')
self.command_manager_run_check_output_mock = self.command_manager_run_check_output_patch.start()
self.common_environment_is_file_mock = self.common_environment_is_file_patch.start()
self.os_path_getsize_mock = self.os_path_getsize_patch.start()
self.command_manager_run_command_mock = self.command_manager_run_command_patch.start()
self.command_manager_run_check_output_mock.side_effect = CommandManagerMock.run_command_check_output
self.command_manager_run_command_mock.side_effect = CommandManagerMock.run_command
self.common_environment_is_file_mock.return_value = True
self.os_path_getsize_mock.return_value = 1
self.dpkg_environment = DpkgEnvironment()
def tearDown(self):
self.command_manager_run_check_output_patch.stop()
self.command_manager_run_command_patch.stop()
self.common_environment_is_file_patch.stop()
self.os_path_getsize_patch.stop()
def test_get_package_list(self):
result_list = self.dpkg_environment.get_package_list()
expected_package_list = list()
expected_package_list.append(PackageInfo(package="adduser", version="3.113+nmu3ubuntu4"))
expected_package_list.append(PackageInfo(package="apt", version="1.2.19"))
expected_package_list.append(PackageInfo(package="base-files", version="9.4ubuntu4.4"))
for index, result_package in enumerate(result_list):
print(result_package.package)
print(result_package.version)
assert result_package.package == expected_package_list[index].package
assert result_package.version == expected_package_list[index].version
def test_get_package_arch_list(self):
result_list = self.dpkg_environment.get_package_list({ "dpkg_include_package_arch": True })
expected_package_list = list()
expected_package_list.append(PackageInfo(package="adduser", version="3.113+nmu3ubuntu4.all"))
expected_package_list.append(PackageInfo(package="apt", version="1.2.19.amd64"))
expected_package_list.append(PackageInfo(package="base-files", version="9.4ubuntu4.4.amd64"))
for index, result_package in enumerate(result_list):
print(result_package.package)
print(result_package.version)
assert result_package.package == expected_package_list[index].package
assert result_package.version == expected_package_list[index].version
def test_get_files_for_package(self):
package_info = PackageInfo(package="docker")
result_list = self.dpkg_environment.get_files_for_package(package_info)
expected_file_list = list()
expected_file_list.append(FileInfo("/etc/apt/apt.conf.d/01autoremove"))
expected_file_list.append(FileInfo("/etc/cron.daily/apt-compat"))
expected_file_list.append(FileInfo("/etc/kernel/postinst.d/apt-auto-removal"))
expected_file_list.append(FileInfo("/usr/share/doc/docker"))
expected_file_list.append(FileInfo("/usr/share/doc/docker/changelog.Debian.gz"))
expected_file_list.append(FileInfo("/usr/share/menu"))
expected_file_list.append(FileInfo("/usr/share/menu/docker"))
for index, result_file in enumerate(result_list):
assert result_file.name == expected_file_list[index].name
assert result_file.location == expected_file_list[index].location
if index <= 2: # These are configuration-files
assert result_file.mutable is True
assert result_file.location == expected_file_list[index].location
assert result_file.full_pathname == expected_file_list[index].full_pathname
def test_get_packageinfo_from_packagefile(self):
result_package = self.dpkg_environment.get_packageinfo_from_packagefile("/tmp/docker.pkg")
print(result_package.package)
assert result_package.package == 'docker'
assert result_package.version == '1.0-5'
def test_get_packageinfo_arch_from_packagefile(self):
result_package = self.dpkg_environment.get_packageinfo_from_packagefile("/tmp/docker.pkg", { "dpkg_include_package_arch": True })
print(result_package.package)
assert result_package.package == 'docker'
assert result_package.version == '1.0-5.amd64'
def test_get_files_from_packagefile(self):
all_files = self.dpkg_environment.get_files_from_packagefile("/tmp/docker.pkg")
for f in all_files:
print(f.full_pathname)
expected_file_list = list()
expected_file_list.append(FileInfo("/usr/share/bug/docker-bin/control"))
expected_file_list.append(FileInfo("/usr/share/bug/docker/control"))
expected_file_list.append(FileInfo("/usr/share/doc/docker/README.backtrace"))
expected_file_list.append(FileInfo("/usr/share/man/man8/docker.8.gz"))
expected_file_list.append(FileInfo("/usr/share/man/man8/dockerctl.8.gz"))
for index, result_file in enumerate(all_files):
assert result_file.name == expected_file_list[index].name
assert result_file.location == expected_file_list[index].location
assert result_file.location == expected_file_list[index].location
assert result_file.full_pathname == expected_file_list[index].full_pathname
| 48.689922
| 137
| 0.741124
|
31ada8c9a12ca55750ecb63f23f6f61d3d33db48
| 623
|
py
|
Python
|
AutoLog/setup.py
|
gouzil/Learn-DeepFM
|
138971145617ace4c8bb5ff153a2c38723e181f7
|
[
"Apache-2.0"
] | 1
|
2022-02-24T10:20:06.000Z
|
2022-02-24T10:20:06.000Z
|
AutoLog/setup.py
|
gouzil/Learn-DeepFM
|
138971145617ace4c8bb5ff153a2c38723e181f7
|
[
"Apache-2.0"
] | null | null | null |
AutoLog/setup.py
|
gouzil/Learn-DeepFM
|
138971145617ace4c8bb5ff153a2c38723e181f7
|
[
"Apache-2.0"
] | 1
|
2022-02-24T10:20:08.000Z
|
2022-02-24T10:20:08.000Z
|
from setuptools import setup
# python3.7 setup.py bdist_wheel
with open('requirements.txt', encoding="utf-8-sig") as f:
requirements = f.readlines()
setup(name='auto_log',
version='1.0.0',
install_requires=requirements,
license='Apache License 2.0',
keywords='auto log',
description="The AutoLog Contains automatic timing, statistics on CPU memory, GPU memory and other information, since generating logs and other functions.",
url='https://github.com/LDOUBLEV/AutoLog',
author='DoubleV',
author_email='liuvv0203@gmail.com',
packages=['auto_log'],
)
| 32.789474
| 162
| 0.682183
|
a7b903257023fa9f3ce89c0dfbfd044f2c12c7e1
| 49,450
|
py
|
Python
|
nova/api/openstack/compute/volumes.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/volumes.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/volumes.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2
|
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'# Copyright 2011 Justin Santa Barbara'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
string|'"""The volumes extension."""'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'strutils'
newline|'\n'
name|'from'
name|'webob'
name|'import'
name|'exc'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'api_version_request'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'common'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
op|'.'
name|'compute'
op|'.'
name|'schemas'
name|'import'
name|'volumes'
name|'as'
name|'volumes_schema'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'extensions'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'wsgi'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
name|'import'
name|'validation'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'compute'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'compute'
name|'import'
name|'vm_states'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'volume'
newline|'\n'
nl|'\n'
DECL|variable|ALIAS
name|'ALIAS'
op|'='
string|'"os-volumes"'
newline|'\n'
DECL|variable|authorize
name|'authorize'
op|'='
name|'extensions'
op|'.'
name|'os_compute_authorizer'
op|'('
name|'ALIAS'
op|')'
newline|'\n'
DECL|variable|authorize_attach
name|'authorize_attach'
op|'='
name|'extensions'
op|'.'
name|'os_compute_authorizer'
op|'('
string|"'os-volumes-attachments'"
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_translate_volume_detail_view
name|'def'
name|'_translate_volume_detail_view'
op|'('
name|'context'
op|','
name|'vol'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Maps keys for volumes details view."""'
newline|'\n'
nl|'\n'
name|'d'
op|'='
name|'_translate_volume_summary_view'
op|'('
name|'context'
op|','
name|'vol'
op|')'
newline|'\n'
nl|'\n'
comment|'# No additional data / lookups at the moment'
nl|'\n'
nl|'\n'
name|'return'
name|'d'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_translate_volume_summary_view
dedent|''
name|'def'
name|'_translate_volume_summary_view'
op|'('
name|'context'
op|','
name|'vol'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Maps keys for volumes summary view."""'
newline|'\n'
name|'d'
op|'='
op|'{'
op|'}'
newline|'\n'
nl|'\n'
name|'d'
op|'['
string|"'id'"
op|']'
op|'='
name|'vol'
op|'['
string|"'id'"
op|']'
newline|'\n'
name|'d'
op|'['
string|"'status'"
op|']'
op|'='
name|'vol'
op|'['
string|"'status'"
op|']'
newline|'\n'
name|'d'
op|'['
string|"'size'"
op|']'
op|'='
name|'vol'
op|'['
string|"'size'"
op|']'
newline|'\n'
name|'d'
op|'['
string|"'availabilityZone'"
op|']'
op|'='
name|'vol'
op|'['
string|"'availability_zone'"
op|']'
newline|'\n'
name|'d'
op|'['
string|"'createdAt'"
op|']'
op|'='
name|'vol'
op|'['
string|"'created_at'"
op|']'
newline|'\n'
nl|'\n'
name|'if'
name|'vol'
op|'['
string|"'attach_status'"
op|']'
op|'=='
string|"'attached'"
op|':'
newline|'\n'
comment|'# NOTE(ildikov): The attachments field in the volume info that'
nl|'\n'
comment|'# Cinder sends is converted to an OrderedDict with the'
nl|'\n'
comment|'# instance_uuid as key to make it easier for the multiattach'
nl|'\n'
comment|'# feature to check the required information. Multiattach will'
nl|'\n'
comment|'# be enable in the Nova API in Newton.'
nl|'\n'
comment|'# The format looks like the following:'
nl|'\n'
comment|"# attachments = {'instance_uuid': {"
nl|'\n'
comment|"# 'attachment_id': 'attachment_uuid',"
nl|'\n'
comment|"# 'mountpoint': '/dev/sda/"
nl|'\n'
comment|'# }'
nl|'\n'
comment|'# }'
nl|'\n'
indent|' '
name|'attachment'
op|'='
name|'vol'
op|'['
string|"'attachments'"
op|']'
op|'.'
name|'items'
op|'('
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'d'
op|'['
string|"'attachments'"
op|']'
op|'='
op|'['
name|'_translate_attachment_detail_view'
op|'('
name|'vol'
op|'['
string|"'id'"
op|']'
op|','
nl|'\n'
name|'attachment'
op|'['
number|'0'
op|']'
op|','
nl|'\n'
name|'attachment'
op|'['
number|'1'
op|']'
op|'.'
name|'get'
op|'('
string|"'mountpoint'"
op|')'
op|')'
op|']'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'d'
op|'['
string|"'attachments'"
op|']'
op|'='
op|'['
op|'{'
op|'}'
op|']'
newline|'\n'
nl|'\n'
dedent|''
name|'d'
op|'['
string|"'displayName'"
op|']'
op|'='
name|'vol'
op|'['
string|"'display_name'"
op|']'
newline|'\n'
name|'d'
op|'['
string|"'displayDescription'"
op|']'
op|'='
name|'vol'
op|'['
string|"'display_description'"
op|']'
newline|'\n'
nl|'\n'
name|'if'
name|'vol'
op|'['
string|"'volume_type_id'"
op|']'
name|'and'
name|'vol'
op|'.'
name|'get'
op|'('
string|"'volume_type'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'d'
op|'['
string|"'volumeType'"
op|']'
op|'='
name|'vol'
op|'['
string|"'volume_type'"
op|']'
op|'['
string|"'name'"
op|']'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'d'
op|'['
string|"'volumeType'"
op|']'
op|'='
name|'vol'
op|'['
string|"'volume_type_id'"
op|']'
newline|'\n'
nl|'\n'
dedent|''
name|'d'
op|'['
string|"'snapshotId'"
op|']'
op|'='
name|'vol'
op|'['
string|"'snapshot_id'"
op|']'
newline|'\n'
nl|'\n'
name|'if'
name|'vol'
op|'.'
name|'get'
op|'('
string|"'volume_metadata'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'d'
op|'['
string|"'metadata'"
op|']'
op|'='
name|'vol'
op|'.'
name|'get'
op|'('
string|"'volume_metadata'"
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'d'
op|'['
string|"'metadata'"
op|']'
op|'='
op|'{'
op|'}'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'d'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|VolumeController
dedent|''
name|'class'
name|'VolumeController'
op|'('
name|'wsgi'
op|'.'
name|'Controller'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""The Volumes API controller for the OpenStack API."""'
newline|'\n'
nl|'\n'
DECL|member|__init__
name|'def'
name|'__init__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'volume_api'
op|'='
name|'volume'
op|'.'
name|'API'
op|'('
op|')'
newline|'\n'
name|'super'
op|'('
name|'VolumeController'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|show
name|'def'
name|'show'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return data about the given volume."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'vol'
op|'='
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'get'
op|'('
name|'context'
op|','
name|'id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'VolumeNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
op|'{'
string|"'volume'"
op|':'
name|'_translate_volume_detail_view'
op|'('
name|'context'
op|','
name|'vol'
op|')'
op|'}'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'wsgi'
op|'.'
name|'response'
op|'('
number|'202'
op|')'
newline|'\n'
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|delete
name|'def'
name|'delete'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Delete a volume."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'delete'
op|'('
name|'context'
op|','
name|'id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'VolumeNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
op|')'
op|')'
newline|'\n'
DECL|member|index
name|'def'
name|'index'
op|'('
name|'self'
op|','
name|'req'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns a summary list of volumes."""'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'_items'
op|'('
name|'req'
op|','
name|'entity_maker'
op|'='
name|'_translate_volume_summary_view'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
op|')'
op|')'
newline|'\n'
DECL|member|detail
name|'def'
name|'detail'
op|'('
name|'self'
op|','
name|'req'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns a detailed list of volumes."""'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'_items'
op|'('
name|'req'
op|','
name|'entity_maker'
op|'='
name|'_translate_volume_detail_view'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_items
dedent|''
name|'def'
name|'_items'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'entity_maker'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns a list of volumes, transformed through entity_maker."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'volumes'
op|'='
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'get_all'
op|'('
name|'context'
op|')'
newline|'\n'
name|'limited_list'
op|'='
name|'common'
op|'.'
name|'limited'
op|'('
name|'volumes'
op|','
name|'req'
op|')'
newline|'\n'
name|'res'
op|'='
op|'['
name|'entity_maker'
op|'('
name|'context'
op|','
name|'vol'
op|')'
name|'for'
name|'vol'
name|'in'
name|'limited_list'
op|']'
newline|'\n'
name|'return'
op|'{'
string|"'volumes'"
op|':'
name|'res'
op|'}'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
number|'400'
op|','
number|'403'
op|','
number|'404'
op|')'
op|')'
newline|'\n'
op|'@'
name|'validation'
op|'.'
name|'schema'
op|'('
name|'volumes_schema'
op|'.'
name|'create'
op|')'
newline|'\n'
DECL|member|create
name|'def'
name|'create'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'body'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Creates a new volume."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'vol'
op|'='
name|'body'
op|'['
string|"'volume'"
op|']'
newline|'\n'
nl|'\n'
name|'vol_type'
op|'='
name|'vol'
op|'.'
name|'get'
op|'('
string|"'volume_type'"
op|')'
newline|'\n'
name|'metadata'
op|'='
name|'vol'
op|'.'
name|'get'
op|'('
string|"'metadata'"
op|')'
newline|'\n'
name|'snapshot_id'
op|'='
name|'vol'
op|'.'
name|'get'
op|'('
string|"'snapshot_id'"
op|','
name|'None'
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'snapshot_id'
name|'is'
name|'not'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'snapshot'
op|'='
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'get_snapshot'
op|'('
name|'context'
op|','
name|'snapshot_id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'SnapshotNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'snapshot'
op|'='
name|'None'
newline|'\n'
nl|'\n'
dedent|''
name|'size'
op|'='
name|'vol'
op|'.'
name|'get'
op|'('
string|"'size'"
op|','
name|'None'
op|')'
newline|'\n'
name|'if'
name|'size'
name|'is'
name|'None'
name|'and'
name|'snapshot'
name|'is'
name|'not'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'size'
op|'='
name|'snapshot'
op|'['
string|"'volume_size'"
op|']'
newline|'\n'
nl|'\n'
dedent|''
name|'availability_zone'
op|'='
name|'vol'
op|'.'
name|'get'
op|'('
string|"'availability_zone'"
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'new_volume'
op|'='
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'create'
op|'('
nl|'\n'
name|'context'
op|','
nl|'\n'
name|'size'
op|','
nl|'\n'
name|'vol'
op|'.'
name|'get'
op|'('
string|"'display_name'"
op|')'
op|','
nl|'\n'
name|'vol'
op|'.'
name|'get'
op|'('
string|"'display_description'"
op|')'
op|','
nl|'\n'
name|'snapshot'
op|'='
name|'snapshot'
op|','
nl|'\n'
name|'volume_type'
op|'='
name|'vol_type'
op|','
nl|'\n'
name|'metadata'
op|'='
name|'metadata'
op|','
nl|'\n'
name|'availability_zone'
op|'='
name|'availability_zone'
nl|'\n'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InvalidInput'
name|'as'
name|'err'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'err'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'OverQuota'
name|'as'
name|'err'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPForbidden'
op|'('
name|'explanation'
op|'='
name|'err'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# TODO(vish): Instance should be None at db layer instead of'
nl|'\n'
comment|'# trying to lazy load, but for now we turn it into'
nl|'\n'
comment|'# a dict to avoid an error.'
nl|'\n'
dedent|''
name|'retval'
op|'='
name|'_translate_volume_detail_view'
op|'('
name|'context'
op|','
name|'dict'
op|'('
name|'new_volume'
op|')'
op|')'
newline|'\n'
name|'result'
op|'='
op|'{'
string|"'volume'"
op|':'
name|'retval'
op|'}'
newline|'\n'
nl|'\n'
name|'location'
op|'='
string|"'%s/%s'"
op|'%'
op|'('
name|'req'
op|'.'
name|'url'
op|','
name|'new_volume'
op|'['
string|"'id'"
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'return'
name|'wsgi'
op|'.'
name|'ResponseObject'
op|'('
name|'result'
op|','
name|'headers'
op|'='
name|'dict'
op|'('
name|'location'
op|'='
name|'location'
op|')'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_translate_attachment_detail_view
dedent|''
dedent|''
name|'def'
name|'_translate_attachment_detail_view'
op|'('
name|'volume_id'
op|','
name|'instance_uuid'
op|','
name|'mountpoint'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Maps keys for attachment details view."""'
newline|'\n'
nl|'\n'
name|'d'
op|'='
name|'_translate_attachment_summary_view'
op|'('
name|'volume_id'
op|','
nl|'\n'
name|'instance_uuid'
op|','
nl|'\n'
name|'mountpoint'
op|')'
newline|'\n'
nl|'\n'
comment|'# No additional data / lookups at the moment'
nl|'\n'
name|'return'
name|'d'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_translate_attachment_summary_view
dedent|''
name|'def'
name|'_translate_attachment_summary_view'
op|'('
name|'volume_id'
op|','
name|'instance_uuid'
op|','
name|'mountpoint'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Maps keys for attachment summary view."""'
newline|'\n'
name|'d'
op|'='
op|'{'
op|'}'
newline|'\n'
nl|'\n'
comment|'# NOTE(justinsb): We use the volume id as the id of the attachment object'
nl|'\n'
name|'d'
op|'['
string|"'id'"
op|']'
op|'='
name|'volume_id'
newline|'\n'
nl|'\n'
name|'d'
op|'['
string|"'volumeId'"
op|']'
op|'='
name|'volume_id'
newline|'\n'
nl|'\n'
name|'d'
op|'['
string|"'serverId'"
op|']'
op|'='
name|'instance_uuid'
newline|'\n'
name|'if'
name|'mountpoint'
op|':'
newline|'\n'
indent|' '
name|'d'
op|'['
string|"'device'"
op|']'
op|'='
name|'mountpoint'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'d'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_check_request_version
dedent|''
name|'def'
name|'_check_request_version'
op|'('
name|'req'
op|','
name|'min_version'
op|','
name|'method'
op|','
name|'server_id'
op|','
name|'server_state'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'not'
name|'api_version_request'
op|'.'
name|'is_supported'
op|'('
name|'req'
op|','
name|'min_version'
op|'='
name|'min_version'
op|')'
op|':'
newline|'\n'
indent|' '
name|'exc_inv'
op|'='
name|'exception'
op|'.'
name|'InstanceInvalidState'
op|'('
nl|'\n'
name|'attr'
op|'='
string|"'vm_state'"
op|','
nl|'\n'
name|'instance_uuid'
op|'='
name|'server_id'
op|','
nl|'\n'
name|'state'
op|'='
name|'server_state'
op|','
nl|'\n'
name|'method'
op|'='
name|'method'
op|')'
newline|'\n'
name|'common'
op|'.'
name|'raise_http_conflict_for_instance_invalid_state'
op|'('
nl|'\n'
name|'exc_inv'
op|','
nl|'\n'
name|'method'
op|','
nl|'\n'
name|'server_id'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|VolumeAttachmentController
dedent|''
dedent|''
name|'class'
name|'VolumeAttachmentController'
op|'('
name|'wsgi'
op|'.'
name|'Controller'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""The volume attachment API controller for the OpenStack API.\n\n A child resource of the server. Note that we use the volume id\n as the ID of the attachment (though this is not guaranteed externally)\n\n """'
newline|'\n'
nl|'\n'
DECL|member|__init__
name|'def'
name|'__init__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'compute_api'
op|'='
name|'compute'
op|'.'
name|'API'
op|'('
name|'skip_policy_check'
op|'='
name|'True'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'volume_api'
op|'='
name|'volume'
op|'.'
name|'API'
op|'('
op|')'
newline|'\n'
name|'super'
op|'('
name|'VolumeAttachmentController'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|index
name|'def'
name|'index'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns the list of volume attachments for a given instance."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize_attach'
op|'('
name|'context'
op|','
name|'action'
op|'='
string|"'index'"
op|')'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'_items'
op|'('
name|'req'
op|','
name|'server_id'
op|','
nl|'\n'
name|'entity_maker'
op|'='
name|'_translate_attachment_summary_view'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|show
name|'def'
name|'show'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return data about the given volume attachment."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
name|'authorize_attach'
op|'('
name|'context'
op|','
name|'action'
op|'='
string|"'show'"
op|')'
newline|'\n'
nl|'\n'
name|'volume_id'
op|'='
name|'id'
newline|'\n'
name|'instance'
op|'='
name|'common'
op|'.'
name|'get_instance'
op|'('
name|'self'
op|'.'
name|'compute_api'
op|','
name|'context'
op|','
name|'server_id'
op|')'
newline|'\n'
nl|'\n'
name|'bdms'
op|'='
name|'objects'
op|'.'
name|'BlockDeviceMappingList'
op|'.'
name|'get_by_instance_uuid'
op|'('
nl|'\n'
name|'context'
op|','
name|'instance'
op|'.'
name|'uuid'
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'not'
name|'bdms'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"Instance %s is not attached."'
op|')'
op|'%'
name|'server_id'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'assigned_mountpoint'
op|'='
name|'None'
newline|'\n'
nl|'\n'
name|'for'
name|'bdm'
name|'in'
name|'bdms'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'bdm'
op|'.'
name|'volume_id'
op|'=='
name|'volume_id'
op|':'
newline|'\n'
indent|' '
name|'assigned_mountpoint'
op|'='
name|'bdm'
op|'.'
name|'device_name'
newline|'\n'
name|'break'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'if'
name|'assigned_mountpoint'
name|'is'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"volume_id not found: %s"'
op|')'
op|'%'
name|'volume_id'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
op|'{'
string|"'volumeAttachment'"
op|':'
name|'_translate_attachment_detail_view'
op|'('
nl|'\n'
name|'volume_id'
op|','
nl|'\n'
name|'instance'
op|'.'
name|'uuid'
op|','
nl|'\n'
name|'assigned_mountpoint'
op|')'
op|'}'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
number|'400'
op|','
number|'404'
op|','
number|'409'
op|')'
op|')'
newline|'\n'
op|'@'
name|'validation'
op|'.'
name|'schema'
op|'('
name|'volumes_schema'
op|'.'
name|'create_volume_attachment'
op|')'
newline|'\n'
DECL|member|create
name|'def'
name|'create'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|','
name|'body'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Attach a volume to an instance."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
name|'authorize_attach'
op|'('
name|'context'
op|','
name|'action'
op|'='
string|"'create'"
op|')'
newline|'\n'
nl|'\n'
name|'volume_id'
op|'='
name|'body'
op|'['
string|"'volumeAttachment'"
op|']'
op|'['
string|"'volumeId'"
op|']'
newline|'\n'
name|'device'
op|'='
name|'body'
op|'['
string|"'volumeAttachment'"
op|']'
op|'.'
name|'get'
op|'('
string|"'device'"
op|')'
newline|'\n'
nl|'\n'
name|'instance'
op|'='
name|'common'
op|'.'
name|'get_instance'
op|'('
name|'self'
op|'.'
name|'compute_api'
op|','
name|'context'
op|','
name|'server_id'
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'instance'
op|'.'
name|'vm_state'
name|'in'
op|'('
name|'vm_states'
op|'.'
name|'SHELVED'
op|','
nl|'\n'
name|'vm_states'
op|'.'
name|'SHELVED_OFFLOADED'
op|')'
op|':'
newline|'\n'
indent|' '
name|'_check_request_version'
op|'('
name|'req'
op|','
string|"'2.20'"
op|','
string|"'attach_volume'"
op|','
nl|'\n'
name|'server_id'
op|','
name|'instance'
op|'.'
name|'vm_state'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'try'
op|':'
newline|'\n'
indent|' '
name|'device'
op|'='
name|'self'
op|'.'
name|'compute_api'
op|'.'
name|'attach_volume'
op|'('
name|'context'
op|','
name|'instance'
op|','
nl|'\n'
name|'volume_id'
op|','
name|'device'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceUnknownCell'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'VolumeNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceIsLocked'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPConflict'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceInvalidState'
name|'as'
name|'state_error'
op|':'
newline|'\n'
indent|' '
name|'common'
op|'.'
name|'raise_http_conflict_for_instance_invalid_state'
op|'('
name|'state_error'
op|','
nl|'\n'
string|"'attach_volume'"
op|','
name|'server_id'
op|')'
newline|'\n'
dedent|''
name|'except'
op|'('
name|'exception'
op|'.'
name|'InvalidVolume'
op|','
nl|'\n'
name|'exception'
op|'.'
name|'InvalidDevicePath'
op|')'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# The attach is async'
nl|'\n'
dedent|''
name|'attachment'
op|'='
op|'{'
op|'}'
newline|'\n'
name|'attachment'
op|'['
string|"'id'"
op|']'
op|'='
name|'volume_id'
newline|'\n'
name|'attachment'
op|'['
string|"'serverId'"
op|']'
op|'='
name|'server_id'
newline|'\n'
name|'attachment'
op|'['
string|"'volumeId'"
op|']'
op|'='
name|'volume_id'
newline|'\n'
name|'attachment'
op|'['
string|"'device'"
op|']'
op|'='
name|'device'
newline|'\n'
nl|'\n'
comment|'# NOTE(justinsb): And now, we have a problem...'
nl|'\n'
comment|"# The attach is async, so there's a window in which we don't see"
nl|'\n'
comment|'# the attachment (until the attachment completes). We could also'
nl|'\n'
comment|'# get problems with concurrent requests. I think we need an'
nl|'\n'
comment|"# attachment state, and to write to the DB here, but that's a bigger"
nl|'\n'
comment|'# change.'
nl|'\n'
comment|"# For now, we'll probably have to rely on libraries being smart"
nl|'\n'
nl|'\n'
comment|'# TODO(justinsb): How do I return "accepted" here?'
nl|'\n'
name|'return'
op|'{'
string|"'volumeAttachment'"
op|':'
name|'attachment'
op|'}'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'wsgi'
op|'.'
name|'response'
op|'('
number|'202'
op|')'
newline|'\n'
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
number|'400'
op|','
number|'404'
op|','
number|'409'
op|')'
op|')'
newline|'\n'
op|'@'
name|'validation'
op|'.'
name|'schema'
op|'('
name|'volumes_schema'
op|'.'
name|'update_volume_attachment'
op|')'
newline|'\n'
DECL|member|update
name|'def'
name|'update'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|','
name|'id'
op|','
name|'body'
op|')'
op|':'
newline|'\n'
indent|' '
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
name|'authorize_attach'
op|'('
name|'context'
op|','
name|'action'
op|'='
string|"'update'"
op|')'
newline|'\n'
nl|'\n'
name|'old_volume_id'
op|'='
name|'id'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'old_volume'
op|'='
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'get'
op|'('
name|'context'
op|','
name|'old_volume_id'
op|')'
newline|'\n'
nl|'\n'
name|'new_volume_id'
op|'='
name|'body'
op|'['
string|"'volumeAttachment'"
op|']'
op|'['
string|"'volumeId'"
op|']'
newline|'\n'
name|'new_volume'
op|'='
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'get'
op|'('
name|'context'
op|','
name|'new_volume_id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'VolumeNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'instance'
op|'='
name|'common'
op|'.'
name|'get_instance'
op|'('
name|'self'
op|'.'
name|'compute_api'
op|','
name|'context'
op|','
name|'server_id'
op|')'
newline|'\n'
nl|'\n'
name|'bdms'
op|'='
name|'objects'
op|'.'
name|'BlockDeviceMappingList'
op|'.'
name|'get_by_instance_uuid'
op|'('
nl|'\n'
name|'context'
op|','
name|'instance'
op|'.'
name|'uuid'
op|')'
newline|'\n'
name|'found'
op|'='
name|'False'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'bdm'
name|'in'
name|'bdms'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'bdm'
op|'.'
name|'volume_id'
op|'!='
name|'old_volume_id'
op|':'
newline|'\n'
indent|' '
name|'continue'
newline|'\n'
dedent|''
name|'try'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'compute_api'
op|'.'
name|'swap_volume'
op|'('
name|'context'
op|','
name|'instance'
op|','
name|'old_volume'
op|','
nl|'\n'
name|'new_volume'
op|')'
newline|'\n'
name|'found'
op|'='
name|'True'
newline|'\n'
name|'break'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'VolumeUnattached'
op|':'
newline|'\n'
comment|'# The volume is not attached. Treat it as NotFound'
nl|'\n'
comment|'# by falling through.'
nl|'\n'
indent|' '
name|'pass'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InvalidVolume'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceIsLocked'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPConflict'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceInvalidState'
name|'as'
name|'state_error'
op|':'
newline|'\n'
indent|' '
name|'common'
op|'.'
name|'raise_http_conflict_for_instance_invalid_state'
op|'('
name|'state_error'
op|','
nl|'\n'
string|"'swap_volume'"
op|','
name|'server_id'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'not'
name|'found'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"The volume was either invalid or not attached to the "'
nl|'\n'
string|'"instance."'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'wsgi'
op|'.'
name|'response'
op|'('
number|'202'
op|')'
newline|'\n'
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
number|'400'
op|','
number|'403'
op|','
number|'404'
op|','
number|'409'
op|')'
op|')'
newline|'\n'
DECL|member|delete
name|'def'
name|'delete'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Detach a volume from an instance."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
name|'authorize_attach'
op|'('
name|'context'
op|','
name|'action'
op|'='
string|"'delete'"
op|')'
newline|'\n'
nl|'\n'
name|'volume_id'
op|'='
name|'id'
newline|'\n'
nl|'\n'
name|'instance'
op|'='
name|'common'
op|'.'
name|'get_instance'
op|'('
name|'self'
op|'.'
name|'compute_api'
op|','
name|'context'
op|','
name|'server_id'
op|')'
newline|'\n'
name|'if'
name|'instance'
op|'.'
name|'vm_state'
name|'in'
op|'('
name|'vm_states'
op|'.'
name|'SHELVED'
op|','
nl|'\n'
name|'vm_states'
op|'.'
name|'SHELVED_OFFLOADED'
op|')'
op|':'
newline|'\n'
indent|' '
name|'_check_request_version'
op|'('
name|'req'
op|','
string|"'2.20'"
op|','
string|"'detach_volume'"
op|','
nl|'\n'
name|'server_id'
op|','
name|'instance'
op|'.'
name|'vm_state'
op|')'
newline|'\n'
dedent|''
name|'try'
op|':'
newline|'\n'
indent|' '
name|'volume'
op|'='
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'get'
op|'('
name|'context'
op|','
name|'volume_id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'VolumeNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'bdms'
op|'='
name|'objects'
op|'.'
name|'BlockDeviceMappingList'
op|'.'
name|'get_by_instance_uuid'
op|'('
nl|'\n'
name|'context'
op|','
name|'instance'
op|'.'
name|'uuid'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'bdms'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"Instance %s is not attached."'
op|')'
op|'%'
name|'server_id'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'found'
op|'='
name|'False'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'bdm'
name|'in'
name|'bdms'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'bdm'
op|'.'
name|'volume_id'
op|'!='
name|'volume_id'
op|':'
newline|'\n'
indent|' '
name|'continue'
newline|'\n'
dedent|''
name|'if'
name|'bdm'
op|'.'
name|'is_root'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"Can\'t detach root device volume"'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPForbidden'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
dedent|''
name|'try'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'compute_api'
op|'.'
name|'detach_volume'
op|'('
name|'context'
op|','
name|'instance'
op|','
name|'volume'
op|')'
newline|'\n'
name|'found'
op|'='
name|'True'
newline|'\n'
name|'break'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'VolumeUnattached'
op|':'
newline|'\n'
comment|'# The volume is not attached. Treat it as NotFound'
nl|'\n'
comment|'# by falling through.'
nl|'\n'
indent|' '
name|'pass'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InvalidVolume'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceUnknownCell'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InvalidInput'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceIsLocked'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPConflict'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceInvalidState'
name|'as'
name|'state_error'
op|':'
newline|'\n'
indent|' '
name|'common'
op|'.'
name|'raise_http_conflict_for_instance_invalid_state'
op|'('
name|'state_error'
op|','
nl|'\n'
string|"'detach_volume'"
op|','
name|'server_id'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'not'
name|'found'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
name|'_'
op|'('
string|'"volume_id not found: %s"'
op|')'
op|'%'
name|'volume_id'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_items
dedent|''
dedent|''
name|'def'
name|'_items'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|','
name|'entity_maker'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns a list of attachments, transformed through entity_maker."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'instance'
op|'='
name|'common'
op|'.'
name|'get_instance'
op|'('
name|'self'
op|'.'
name|'compute_api'
op|','
name|'context'
op|','
name|'server_id'
op|')'
newline|'\n'
nl|'\n'
name|'bdms'
op|'='
name|'objects'
op|'.'
name|'BlockDeviceMappingList'
op|'.'
name|'get_by_instance_uuid'
op|'('
nl|'\n'
name|'context'
op|','
name|'instance'
op|'.'
name|'uuid'
op|')'
newline|'\n'
name|'limited_list'
op|'='
name|'common'
op|'.'
name|'limited'
op|'('
name|'bdms'
op|','
name|'req'
op|')'
newline|'\n'
name|'results'
op|'='
op|'['
op|']'
newline|'\n'
nl|'\n'
name|'for'
name|'bdm'
name|'in'
name|'limited_list'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'bdm'
op|'.'
name|'volume_id'
op|':'
newline|'\n'
indent|' '
name|'results'
op|'.'
name|'append'
op|'('
name|'entity_maker'
op|'('
name|'bdm'
op|'.'
name|'volume_id'
op|','
nl|'\n'
name|'bdm'
op|'.'
name|'instance_uuid'
op|','
nl|'\n'
name|'bdm'
op|'.'
name|'device_name'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
name|'return'
op|'{'
string|"'volumeAttachments'"
op|':'
name|'results'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_translate_snapshot_detail_view
dedent|''
dedent|''
name|'def'
name|'_translate_snapshot_detail_view'
op|'('
name|'context'
op|','
name|'vol'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Maps keys for snapshots details view."""'
newline|'\n'
nl|'\n'
name|'d'
op|'='
name|'_translate_snapshot_summary_view'
op|'('
name|'context'
op|','
name|'vol'
op|')'
newline|'\n'
nl|'\n'
comment|'# NOTE(gagupta): No additional data / lookups at the moment'
nl|'\n'
name|'return'
name|'d'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_translate_snapshot_summary_view
dedent|''
name|'def'
name|'_translate_snapshot_summary_view'
op|'('
name|'context'
op|','
name|'vol'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Maps keys for snapshots summary view."""'
newline|'\n'
name|'d'
op|'='
op|'{'
op|'}'
newline|'\n'
nl|'\n'
name|'d'
op|'['
string|"'id'"
op|']'
op|'='
name|'vol'
op|'['
string|"'id'"
op|']'
newline|'\n'
name|'d'
op|'['
string|"'volumeId'"
op|']'
op|'='
name|'vol'
op|'['
string|"'volume_id'"
op|']'
newline|'\n'
name|'d'
op|'['
string|"'status'"
op|']'
op|'='
name|'vol'
op|'['
string|"'status'"
op|']'
newline|'\n'
comment|'# NOTE(gagupta): We map volume_size as the snapshot size'
nl|'\n'
name|'d'
op|'['
string|"'size'"
op|']'
op|'='
name|'vol'
op|'['
string|"'volume_size'"
op|']'
newline|'\n'
name|'d'
op|'['
string|"'createdAt'"
op|']'
op|'='
name|'vol'
op|'['
string|"'created_at'"
op|']'
newline|'\n'
name|'d'
op|'['
string|"'displayName'"
op|']'
op|'='
name|'vol'
op|'['
string|"'display_name'"
op|']'
newline|'\n'
name|'d'
op|'['
string|"'displayDescription'"
op|']'
op|'='
name|'vol'
op|'['
string|"'display_description'"
op|']'
newline|'\n'
name|'return'
name|'d'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|SnapshotController
dedent|''
name|'class'
name|'SnapshotController'
op|'('
name|'wsgi'
op|'.'
name|'Controller'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""The Snapshots API controller for the OpenStack API."""'
newline|'\n'
nl|'\n'
DECL|member|__init__
name|'def'
name|'__init__'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'volume_api'
op|'='
name|'volume'
op|'.'
name|'API'
op|'('
op|')'
newline|'\n'
name|'super'
op|'('
name|'SnapshotController'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|show
name|'def'
name|'show'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Return data about the given snapshot."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'vol'
op|'='
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'get_snapshot'
op|'('
name|'context'
op|','
name|'id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'SnapshotNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
op|'{'
string|"'snapshot'"
op|':'
name|'_translate_snapshot_detail_view'
op|'('
name|'context'
op|','
name|'vol'
op|')'
op|'}'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'wsgi'
op|'.'
name|'response'
op|'('
number|'202'
op|')'
newline|'\n'
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|delete
name|'def'
name|'delete'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Delete a snapshot."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'delete_snapshot'
op|'('
name|'context'
op|','
name|'id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'SnapshotNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
op|')'
op|')'
newline|'\n'
DECL|member|index
name|'def'
name|'index'
op|'('
name|'self'
op|','
name|'req'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns a summary list of snapshots."""'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'_items'
op|'('
name|'req'
op|','
name|'entity_maker'
op|'='
name|'_translate_snapshot_summary_view'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
op|')'
op|')'
newline|'\n'
DECL|member|detail
name|'def'
name|'detail'
op|'('
name|'self'
op|','
name|'req'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns a detailed list of snapshots."""'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'_items'
op|'('
name|'req'
op|','
name|'entity_maker'
op|'='
name|'_translate_snapshot_detail_view'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_items
dedent|''
name|'def'
name|'_items'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'entity_maker'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns a list of snapshots, transformed through entity_maker."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'snapshots'
op|'='
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'get_all_snapshots'
op|'('
name|'context'
op|')'
newline|'\n'
name|'limited_list'
op|'='
name|'common'
op|'.'
name|'limited'
op|'('
name|'snapshots'
op|','
name|'req'
op|')'
newline|'\n'
name|'res'
op|'='
op|'['
name|'entity_maker'
op|'('
name|'context'
op|','
name|'snapshot'
op|')'
name|'for'
name|'snapshot'
name|'in'
name|'limited_list'
op|']'
newline|'\n'
name|'return'
op|'{'
string|"'snapshots'"
op|':'
name|'res'
op|'}'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
number|'400'
op|','
number|'403'
op|')'
op|')'
newline|'\n'
op|'@'
name|'validation'
op|'.'
name|'schema'
op|'('
name|'volumes_schema'
op|'.'
name|'snapshot_create'
op|')'
newline|'\n'
DECL|member|create
name|'def'
name|'create'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'body'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Creates a new snapshot."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'snapshot'
op|'='
name|'body'
op|'['
string|"'snapshot'"
op|']'
newline|'\n'
name|'volume_id'
op|'='
name|'snapshot'
op|'['
string|"'volume_id'"
op|']'
newline|'\n'
nl|'\n'
name|'force'
op|'='
name|'snapshot'
op|'.'
name|'get'
op|'('
string|"'force'"
op|','
name|'False'
op|')'
newline|'\n'
name|'force'
op|'='
name|'strutils'
op|'.'
name|'bool_from_string'
op|'('
name|'force'
op|','
name|'strict'
op|'='
name|'True'
op|')'
newline|'\n'
name|'if'
name|'force'
op|':'
newline|'\n'
indent|' '
name|'create_func'
op|'='
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'create_snapshot_force'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'create_func'
op|'='
name|'self'
op|'.'
name|'volume_api'
op|'.'
name|'create_snapshot'
newline|'\n'
nl|'\n'
dedent|''
name|'try'
op|':'
newline|'\n'
indent|' '
name|'new_snapshot'
op|'='
name|'create_func'
op|'('
name|'context'
op|','
name|'volume_id'
op|','
nl|'\n'
name|'snapshot'
op|'.'
name|'get'
op|'('
string|"'display_name'"
op|')'
op|','
nl|'\n'
name|'snapshot'
op|'.'
name|'get'
op|'('
string|"'display_description'"
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'OverQuota'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPForbidden'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'retval'
op|'='
name|'_translate_snapshot_detail_view'
op|'('
name|'context'
op|','
name|'new_snapshot'
op|')'
newline|'\n'
name|'return'
op|'{'
string|"'snapshot'"
op|':'
name|'retval'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|Volumes
dedent|''
dedent|''
name|'class'
name|'Volumes'
op|'('
name|'extensions'
op|'.'
name|'V21APIExtensionBase'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Volumes support."""'
newline|'\n'
nl|'\n'
DECL|variable|name
name|'name'
op|'='
string|'"Volumes"'
newline|'\n'
DECL|variable|alias
name|'alias'
op|'='
name|'ALIAS'
newline|'\n'
DECL|variable|version
name|'version'
op|'='
number|'1'
newline|'\n'
nl|'\n'
DECL|member|get_resources
name|'def'
name|'get_resources'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'resources'
op|'='
op|'['
op|']'
newline|'\n'
nl|'\n'
name|'res'
op|'='
name|'extensions'
op|'.'
name|'ResourceExtension'
op|'('
nl|'\n'
name|'ALIAS'
op|','
name|'VolumeController'
op|'('
op|')'
op|','
name|'collection_actions'
op|'='
op|'{'
string|"'detail'"
op|':'
string|"'GET'"
op|'}'
op|')'
newline|'\n'
name|'resources'
op|'.'
name|'append'
op|'('
name|'res'
op|')'
newline|'\n'
nl|'\n'
name|'res'
op|'='
name|'extensions'
op|'.'
name|'ResourceExtension'
op|'('
string|"'os-volumes_boot'"
op|','
nl|'\n'
name|'inherits'
op|'='
string|"'servers'"
op|')'
newline|'\n'
name|'resources'
op|'.'
name|'append'
op|'('
name|'res'
op|')'
newline|'\n'
nl|'\n'
name|'res'
op|'='
name|'extensions'
op|'.'
name|'ResourceExtension'
op|'('
string|"'os-volume_attachments'"
op|','
nl|'\n'
name|'VolumeAttachmentController'
op|'('
op|')'
op|','
nl|'\n'
name|'parent'
op|'='
name|'dict'
op|'('
nl|'\n'
name|'member_name'
op|'='
string|"'server'"
op|','
nl|'\n'
name|'collection_name'
op|'='
string|"'servers'"
op|')'
op|')'
newline|'\n'
name|'resources'
op|'.'
name|'append'
op|'('
name|'res'
op|')'
newline|'\n'
nl|'\n'
name|'res'
op|'='
name|'extensions'
op|'.'
name|'ResourceExtension'
op|'('
nl|'\n'
string|"'os-snapshots'"
op|','
name|'SnapshotController'
op|'('
op|')'
op|','
nl|'\n'
name|'collection_actions'
op|'='
op|'{'
string|"'detail'"
op|':'
string|"'GET'"
op|'}'
op|')'
newline|'\n'
name|'resources'
op|'.'
name|'append'
op|'('
name|'res'
op|')'
newline|'\n'
nl|'\n'
name|'return'
name|'resources'
newline|'\n'
nl|'\n'
DECL|member|get_controller_extensions
dedent|''
name|'def'
name|'get_controller_extensions'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
op|']'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 12.751418
| 229
| 0.596704
|
7e3e8356438dcf119531f901f7966495b18a8b87
| 4,682
|
py
|
Python
|
rl_trainer/episode_serializer/proto/episode_pb2.py
|
Roboy/nips-2018-ai-for-prosthetics
|
acb69f267a0cc852842828edbbfb47d1840c0a17
|
[
"BSD-3-Clause"
] | 3
|
2018-08-31T15:04:53.000Z
|
2019-07-13T01:11:10.000Z
|
rl_trainer/episode_serializer/proto/episode_pb2.py
|
Roboy/nips-2018-ai-for-prosthetics
|
acb69f267a0cc852842828edbbfb47d1840c0a17
|
[
"BSD-3-Clause"
] | null | null | null |
rl_trainer/episode_serializer/proto/episode_pb2.py
|
Roboy/nips-2018-ai-for-prosthetics
|
acb69f267a0cc852842828edbbfb47d1840c0a17
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: episode.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='episode.proto',
package='rl_trainer',
syntax='proto3',
serialized_pb=_b('\n\repisode.proto\x12\nrl_trainer\"p\n\x0f\x45xperienceTuple\x12\x0f\n\x07state_1\x18\x01 \x03(\x01\x12\x0e\n\x06\x61\x63tion\x18\x02 \x03(\x01\x12\x0e\n\x06reward\x18\x03 \x01(\x01\x12\x0f\n\x07state_2\x18\x04 \x03(\x01\x12\x1b\n\x13state_2_is_terminal\x18\x05 \x01(\x08\"A\n\x07\x45pisode\x12\x36\n\x11\x65xperience_tuples\x18\x01 \x03(\x0b\x32\x1b.rl_trainer.ExperienceTupleb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EXPERIENCETUPLE = _descriptor.Descriptor(
name='ExperienceTuple',
full_name='rl_trainer.ExperienceTuple',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state_1', full_name='rl_trainer.ExperienceTuple.state_1', index=0,
number=1, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='action', full_name='rl_trainer.ExperienceTuple.action', index=1,
number=2, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reward', full_name='rl_trainer.ExperienceTuple.reward', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state_2', full_name='rl_trainer.ExperienceTuple.state_2', index=3,
number=4, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state_2_is_terminal', full_name='rl_trainer.ExperienceTuple.state_2_is_terminal', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=141,
)
_EPISODE = _descriptor.Descriptor(
name='Episode',
full_name='rl_trainer.Episode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='experience_tuples', full_name='rl_trainer.Episode.experience_tuples', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=208,
)
_EPISODE.fields_by_name['experience_tuples'].message_type = _EXPERIENCETUPLE
DESCRIPTOR.message_types_by_name['ExperienceTuple'] = _EXPERIENCETUPLE
DESCRIPTOR.message_types_by_name['Episode'] = _EPISODE
ExperienceTuple = _reflection.GeneratedProtocolMessageType('ExperienceTuple', (_message.Message,), dict(
DESCRIPTOR = _EXPERIENCETUPLE,
__module__ = 'episode_pb2'
# @@protoc_insertion_point(class_scope:rl_trainer.ExperienceTuple)
))
_sym_db.RegisterMessage(ExperienceTuple)
Episode = _reflection.GeneratedProtocolMessageType('Episode', (_message.Message,), dict(
DESCRIPTOR = _EPISODE,
__module__ = 'episode_pb2'
# @@protoc_insertion_point(class_scope:rl_trainer.Episode)
))
_sym_db.RegisterMessage(Episode)
# @@protoc_insertion_point(module_scope)
| 33.927536
| 410
| 0.745622
|
ad10d97006a669a48e3800d706022f0230c55e8b
| 5,767
|
py
|
Python
|
install_viewer.py
|
gruzzlymug/ddg-2018
|
76f598f7548ad51b126ec9efb7da0fd0d4a306c2
|
[
"MIT"
] | 1
|
2018-02-11T03:32:22.000Z
|
2018-02-11T03:32:22.000Z
|
install_viewer.py
|
gruzzlymug/ddg-2018
|
76f598f7548ad51b126ec9efb7da0fd0d4a306c2
|
[
"MIT"
] | null | null | null |
install_viewer.py
|
gruzzlymug/ddg-2018
|
76f598f7548ad51b126ec9efb7da0fd0d4a306c2
|
[
"MIT"
] | null | null | null |
import os
import json
import zipfile
import shutil
import sys
from urllib.request import urlretrieve, urlopen
#def downloadFile(url, outFile):
# urlretrieve(url, outFile)
def isNewer(new, original):
majorMult, minorMult, patchMult = 10000,100,1
major1, minor1, patch1 = original.split('.')
major2, minor2, patch2 = new.split('.')
return majorMult * major2 + minorMult * minor2 + patchMult * minor2 > majorMult * major1 + minorMult * minor1 + patchMult * minor1
def downloadProgress(count, blockSize, totalSize):
if count % 1000 == 0:
percentDone = float(count) * blockSize / totalSize
print("%4.2f%%" % percentDone,end='\b\b\b\b\b\b',flush=True)
def main():
currentInstallFileName = "viewer_currentInstall.json"
versionAndChangeLogUrl = "http://s3.amazonaws.com/battlecode-2018/viewer/"
versionFileName = "version.txt"
changelogFileName = "changelog.json"
baseUrl = "http://s3.amazonaws.com/battlecode-2018/viewer/"
directory = os.path.dirname(os.path.realpath(__file__))
zipFileName = "viewer_latest.zip"
viewerDirectory = "viewer_latest/"
currentInfoFileLocation = os.path.join(directory, currentInstallFileName)
if os.path.exists(currentInfoFileLocation):
currentInfoFile = open(currentInfoFileLocation)
currentInfo = json.load(currentInfoFile)
currentInfoFile.close()
else:
possibleSystems = [
("1", "Windows (64-bit)", "Win64"),
("2", "Windows (32-bit)", "Win32"),
("3", "Linux (64-bit)", "Linux64"),
("4", "Linux (32-bit)", "Linux32"),
("5", "Mac OS X", "Mac")
]
print("It looks like this is your first time installing the viewer. What system are you using?")
for optionNum, optionName, actualName in possibleSystems:
print("%s) %s" % (optionNum, optionName))
systemInp = input("> ")
try:
systemInp = int(systemInp)
if systemInp <= 0 or systemInp > len(possibleSystems):
raise Exception()
currentInfo = {
'version': '0.0.0',
'system': possibleSystems[systemInp - 1][2]
}
print("Done setup! You've selected the system %s. \nIf you ever want to change this setup, delete the file %s " % (possibleSystems[systemInp-1][1], currentInstallFileName))
except:
print("Invalid input. Exiting..")
sys.exit(1)
versionFileUrl = versionAndChangeLogUrl + versionFileName
latestVersion = urlopen(versionFileUrl).read().decode()
print("Checking for updates...")
if isNewer(latestVersion, currentInfo['version']):
print("There is a newer version available.\nCurrent version is: %s. The new version is %s." % (currentInfo['version'], latestVersion))
shouldDownload = input("Download? (Y/N) > ").lower() == "y"
if shouldDownload:
newestUrl = baseUrl + ("%s/%s.zip" % (latestVersion, currentInfo['system']))
downloadLocation = os.path.join(directory, zipFileName)
if os.path.exists(downloadLocation):
print("Removing previous archive...")
os.remove(downloadLocation)
print("Deleted old archive.")
print("Downloading new client... This could take a while.")
urlretrieve(newestUrl, downloadLocation, downloadProgress)
print()
print("Successfully downloaded files. ")
outputDirectory = os.path.join(directory, viewerDirectory)
if os.path.exists(outputDirectory):
print("Removing previous client")
shutil.rmtree(outputDirectory, True)
print("Successfully removed previous client.")
print("Extracting from archive...")
zip_ref = zipfile.ZipFile(downloadLocation, "r")
zip_ref.extractall(outputDirectory)
zip_ref.close()
print("Extracted fully!")
if os.path.exists(downloadLocation):
print("Cleaning up downloads...")
os.remove(downloadLocation)
print("Cleaned up")
try:
if currentInfo['system'] == 'Linux32':
print("Fixing permissions...You'll need to provide elevated privileges for this to work.")
os.system("sudo chmod 777 viewer_latest/Linux32/battleclient18.x86")
print("Done fixing permissions!")
elif currentInfo['system'] == 'Linux64':
print("Fixing permissions...You'll need to provide elevated privileges for this to work.")
os.system("sudo chmod 777 viewer_latest/Linux64/battleclient18.x86_64")
print("Done fixing permissions!")
if currentInfo['system'] == 'Mac':
print("Fixing permissions...You'll need to provide elevated privileges for this to work.")
os.system("sudo chmod -R 777 viewer_latest/Mac/battleclient18.app")
print("Done fixing permissions!")
except:
pass
print("Updating current version number...")
newInfo = {}
newInfo['version'] = latestVersion
newInfo['system'] = currentInfo['system']
currentInfoFile = open(currentInfoFileLocation, "w")
currentInfo = json.dump(newInfo, currentInfoFile)
currentInfoFile.close()
print("All set! The viewer is in: %s" % outputDirectory)
else:
print("Not downloading - your system has not been changed.")
else:
print("No updates!")
if __name__ == "__main__":
main()
| 42.718519
| 184
| 0.601873
|
ac91986cfa31d1b802c5b1e44e1b4aad9d0a55de
| 253
|
py
|
Python
|
app/recipe/serializers.py
|
NicolefAvella/api-maquina
|
2f8301d364a57baf16c92cdff734b9a43b676289
|
[
"MIT"
] | null | null | null |
app/recipe/serializers.py
|
NicolefAvella/api-maquina
|
2f8301d364a57baf16c92cdff734b9a43b676289
|
[
"MIT"
] | null | null | null |
app/recipe/serializers.py
|
NicolefAvella/api-maquina
|
2f8301d364a57baf16c92cdff734b9a43b676289
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from core.models import Tag
class TagSerializer(serializers.ModelSerializer):
"""Serializer para tag """
class Meta:
model = Tag
fields = ('id','name')
read_only_fields = ('id',)
| 21.083333
| 49
| 0.656126
|
499d1be71a050c1d83ce97089f15d280acc05bf2
| 11,654
|
py
|
Python
|
ppcls/modeling/architectures/xception.py
|
vslyu/PaddleClas
|
1b6799cf508ec48a8b76da202f22fb7961f52ee3
|
[
"Apache-2.0"
] | null | null | null |
ppcls/modeling/architectures/xception.py
|
vslyu/PaddleClas
|
1b6799cf508ec48a8b76da202f22fb7961f52ee3
|
[
"Apache-2.0"
] | null | null | null |
ppcls/modeling/architectures/xception.py
|
vslyu/PaddleClas
|
1b6799cf508ec48a8b76da202f22fb7961f52ee3
|
[
"Apache-2.0"
] | null | null | null |
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
from paddle.nn.initializer import Uniform
import math
__all__ = ['Xception41', 'Xception65', 'Xception71']
class ConvBNLayer(nn.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
name=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
bn_name = "bn_" + name
self._batch_norm = BatchNorm(
num_filters,
act=act,
param_attr=ParamAttr(name=bn_name + "_scale"),
bias_attr=ParamAttr(name=bn_name + "_offset"),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def forward(self, inputs):
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class SeparableConv(nn.Layer):
def __init__(self, input_channels, output_channels, stride=1, name=None):
super(SeparableConv, self).__init__()
self._pointwise_conv = ConvBNLayer(
input_channels, output_channels, 1, name=name + "_sep")
self._depthwise_conv = ConvBNLayer(
output_channels,
output_channels,
3,
stride=stride,
groups=output_channels,
name=name + "_dw")
def forward(self, inputs):
x = self._pointwise_conv(inputs)
x = self._depthwise_conv(x)
return x
class EntryFlowBottleneckBlock(nn.Layer):
def __init__(self,
input_channels,
output_channels,
stride=2,
name=None,
relu_first=False):
super(EntryFlowBottleneckBlock, self).__init__()
self.relu_first = relu_first
self._short = Conv2D(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=1,
stride=stride,
padding=0,
weight_attr=ParamAttr(name + "_branch1_weights"),
bias_attr=False)
self._conv1 = SeparableConv(
input_channels,
output_channels,
stride=1,
name=name + "_branch2a_weights")
self._conv2 = SeparableConv(
output_channels,
output_channels,
stride=1,
name=name + "_branch2b_weights")
self._pool = MaxPool2D(kernel_size=3, stride=stride, padding=1)
def forward(self, inputs):
conv0 = inputs
short = self._short(inputs)
if self.relu_first:
conv0 = F.relu(conv0)
conv1 = self._conv1(conv0)
conv2 = F.relu(conv1)
conv2 = self._conv2(conv2)
pool = self._pool(conv2)
return paddle.add(x=short, y=pool)
class EntryFlow(nn.Layer):
def __init__(self, block_num=3):
super(EntryFlow, self).__init__()
name = "entry_flow"
self.block_num = block_num
self._conv1 = ConvBNLayer(
3, 32, 3, stride=2, act="relu", name=name + "_conv1")
self._conv2 = ConvBNLayer(32, 64, 3, act="relu", name=name + "_conv2")
if block_num == 3:
self._conv_0 = EntryFlowBottleneckBlock(
64, 128, stride=2, name=name + "_0", relu_first=False)
self._conv_1 = EntryFlowBottleneckBlock(
128, 256, stride=2, name=name + "_1", relu_first=True)
self._conv_2 = EntryFlowBottleneckBlock(
256, 728, stride=2, name=name + "_2", relu_first=True)
elif block_num == 5:
self._conv_0 = EntryFlowBottleneckBlock(
64, 128, stride=2, name=name + "_0", relu_first=False)
self._conv_1 = EntryFlowBottleneckBlock(
128, 256, stride=1, name=name + "_1", relu_first=True)
self._conv_2 = EntryFlowBottleneckBlock(
256, 256, stride=2, name=name + "_2", relu_first=True)
self._conv_3 = EntryFlowBottleneckBlock(
256, 728, stride=1, name=name + "_3", relu_first=True)
self._conv_4 = EntryFlowBottleneckBlock(
728, 728, stride=2, name=name + "_4", relu_first=True)
else:
sys.exit(-1)
def forward(self, inputs):
x = self._conv1(inputs)
x = self._conv2(x)
if self.block_num == 3:
x = self._conv_0(x)
x = self._conv_1(x)
x = self._conv_2(x)
elif self.block_num == 5:
x = self._conv_0(x)
x = self._conv_1(x)
x = self._conv_2(x)
x = self._conv_3(x)
x = self._conv_4(x)
return x
class MiddleFlowBottleneckBlock(nn.Layer):
def __init__(self, input_channels, output_channels, name):
super(MiddleFlowBottleneckBlock, self).__init__()
self._conv_0 = SeparableConv(
input_channels,
output_channels,
stride=1,
name=name + "_branch2a_weights")
self._conv_1 = SeparableConv(
output_channels,
output_channels,
stride=1,
name=name + "_branch2b_weights")
self._conv_2 = SeparableConv(
output_channels,
output_channels,
stride=1,
name=name + "_branch2c_weights")
def forward(self, inputs):
conv0 = F.relu(inputs)
conv0 = self._conv_0(conv0)
conv1 = F.relu(conv0)
conv1 = self._conv_1(conv1)
conv2 = F.relu(conv1)
conv2 = self._conv_2(conv2)
return paddle.add(x=inputs, y=conv2)
class MiddleFlow(nn.Layer):
def __init__(self, block_num=8):
super(MiddleFlow, self).__init__()
self.block_num = block_num
self._conv_0 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_0")
self._conv_1 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_1")
self._conv_2 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_2")
self._conv_3 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_3")
self._conv_4 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_4")
self._conv_5 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_5")
self._conv_6 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_6")
self._conv_7 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_7")
if block_num == 16:
self._conv_8 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_8")
self._conv_9 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_9")
self._conv_10 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_10")
self._conv_11 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_11")
self._conv_12 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_12")
self._conv_13 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_13")
self._conv_14 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_14")
self._conv_15 = MiddleFlowBottleneckBlock(
728, 728, name="middle_flow_15")
def forward(self, inputs):
x = self._conv_0(inputs)
x = self._conv_1(x)
x = self._conv_2(x)
x = self._conv_3(x)
x = self._conv_4(x)
x = self._conv_5(x)
x = self._conv_6(x)
x = self._conv_7(x)
if self.block_num == 16:
x = self._conv_8(x)
x = self._conv_9(x)
x = self._conv_10(x)
x = self._conv_11(x)
x = self._conv_12(x)
x = self._conv_13(x)
x = self._conv_14(x)
x = self._conv_15(x)
return x
class ExitFlowBottleneckBlock(nn.Layer):
def __init__(self, input_channels, output_channels1, output_channels2,
name):
super(ExitFlowBottleneckBlock, self).__init__()
self._short = Conv2D(
in_channels=input_channels,
out_channels=output_channels2,
kernel_size=1,
stride=2,
padding=0,
weight_attr=ParamAttr(name + "_branch1_weights"),
bias_attr=False)
self._conv_1 = SeparableConv(
input_channels,
output_channels1,
stride=1,
name=name + "_branch2a_weights")
self._conv_2 = SeparableConv(
output_channels1,
output_channels2,
stride=1,
name=name + "_branch2b_weights")
self._pool = MaxPool2D(kernel_size=3, stride=2, padding=1)
def forward(self, inputs):
short = self._short(inputs)
conv0 = F.relu(inputs)
conv1 = self._conv_1(conv0)
conv2 = F.relu(conv1)
conv2 = self._conv_2(conv2)
pool = self._pool(conv2)
return paddle.add(x=short, y=pool)
class ExitFlow(nn.Layer):
def __init__(self, class_dim):
super(ExitFlow, self).__init__()
name = "exit_flow"
self._conv_0 = ExitFlowBottleneckBlock(
728, 728, 1024, name=name + "_1")
self._conv_1 = SeparableConv(1024, 1536, stride=1, name=name + "_2")
self._conv_2 = SeparableConv(1536, 2048, stride=1, name=name + "_3")
self._pool = AdaptiveAvgPool2D(1)
stdv = 1.0 / math.sqrt(2048 * 1.0)
self._out = Linear(
2048,
class_dim,
weight_attr=ParamAttr(
name="fc_weights", initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs):
conv0 = self._conv_0(inputs)
conv1 = self._conv_1(conv0)
conv1 = F.relu(conv1)
conv2 = self._conv_2(conv1)
conv2 = F.relu(conv2)
pool = self._pool(conv2)
pool = paddle.flatten(pool, start_axis=1, stop_axis=-1)
out = self._out(pool)
return out
class Xception(nn.Layer):
def __init__(self,
entry_flow_block_num=3,
middle_flow_block_num=8,
class_dim=1000):
super(Xception, self).__init__()
self.entry_flow_block_num = entry_flow_block_num
self.middle_flow_block_num = middle_flow_block_num
self._entry_flow = EntryFlow(entry_flow_block_num)
self._middle_flow = MiddleFlow(middle_flow_block_num)
self._exit_flow = ExitFlow(class_dim)
def forward(self, inputs):
x = self._entry_flow(inputs)
x = self._middle_flow(x)
x = self._exit_flow(x)
return x
def Xception41(**args):
model = Xception(entry_flow_block_num=3, middle_flow_block_num=8, **args)
return model
def Xception65(**args):
model = Xception(entry_flow_block_num=3, middle_flow_block_num=16, **args)
return model
def Xception71(**args):
model = Xception(entry_flow_block_num=5, middle_flow_block_num=16, **args)
return model
| 33.77971
| 78
| 0.578514
|
8baae63de707231476240997e8146840c6816dce
| 3,912
|
py
|
Python
|
thrift/test/py/ForwardCompatibility.py
|
lucyge/FBThrift
|
2cb49e1c1ee1712416db9cc1f4b833382b04d8cd
|
[
"Apache-2.0"
] | 1
|
2018-02-28T06:45:51.000Z
|
2018-02-28T06:45:51.000Z
|
thrift/test/py/ForwardCompatibility.py
|
lucyge/FBThrift
|
2cb49e1c1ee1712416db9cc1f4b833382b04d8cd
|
[
"Apache-2.0"
] | null | null | null |
thrift/test/py/ForwardCompatibility.py
|
lucyge/FBThrift
|
2cb49e1c1ee1712416db9cc1f4b833382b04d8cd
|
[
"Apache-2.0"
] | 1
|
2018-02-28T06:45:18.000Z
|
2018-02-28T06:45:18.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from thrift.protocol import TBinaryProtocol, \
TCompactProtocol, TSimpleJSONProtocol
from thrift.util import Serializer
from ForwardCompatibility.ForwardCompatibility.ttypes import \
NewStructure, OldStructure, \
NewStructureNested, OldStructureNested
class AbstractTest():
def _serialize(self, obj):
return Serializer.serialize(self.protocol_factory, obj)
def _deserialize(self, objtype, data):
return Serializer.deserialize(self.protocol_factory, data, objtype())
class TestForwardCompatibilityAbstract(AbstractTest):
def assertFeaturesAlmostEqual(self, a, b):
self.assertTrue(abs(a - b) < 1e-3)
def testPrimitiveType(self):
old = OldStructure()
old.features = {}
old.features[1] = 100.1
old.features[217] = 314.5
sOld = self._serialize(old)
new = self._deserialize(NewStructure, sOld)
self.assertFeaturesAlmostEqual(new.features[1], 100.1)
self.assertFeaturesAlmostEqual(new.features[217], 314.5)
sNew = self._serialize(new)
new2 = self._deserialize(NewStructure, sNew)
self.assertFeaturesAlmostEqual(new2.features[1], 100.1)
self.assertFeaturesAlmostEqual(new2.features[217], 314.5)
def testNested(self):
old = OldStructureNested()
old.features = [{}]
old.features[0][1] = 100.1
old.features[0][217] = 314.5
sOld = self._serialize(old)
new = self._deserialize(NewStructureNested, sOld)
self.assertFeaturesAlmostEqual(new.features[0][1], 100.1)
self.assertFeaturesAlmostEqual(new.features[0][217], 314.5)
sNew = self._serialize(new)
new2 = self._deserialize(NewStructureNested, sNew)
self.assertFeaturesAlmostEqual(new2.features[0][1], 100.1)
self.assertFeaturesAlmostEqual(new2.features[0][217], 314.5)
class TestForwardCompatibilityBinary(TestForwardCompatibilityAbstract,
unittest.TestCase):
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
class TestForwardCompatibilityCompact(TestForwardCompatibilityAbstract,
unittest.TestCase):
protocol_factory = TCompactProtocol.TCompactProtocolFactory()
class TestForwardCompatibilityBinaryAccelerated(TestForwardCompatibilityAbstract,
unittest.TestCase):
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
class TestForwardCompatibilityCompactAccelerated(TestForwardCompatibilityAbstract,
unittest.TestCase):
protocol_factory = TCompactProtocol.TCompactProtocolAcceleratedFactory()
class TestForwardCompatibilityJSON(TestForwardCompatibilityAbstract,
unittest.TestCase):
protocol_factory = TSimpleJSONProtocol.TSimpleJSONProtocolFactory()
if __name__ == "__main__":
unittest.main()
| 37.980583
| 82
| 0.716002
|
c974364539921c1e2ea1ca130c4cc03e817cf818
| 7,192
|
py
|
Python
|
login_sonicwall.py
|
NathanLundner/Sonic-Wall-Login
|
c57608ddc6fbc9030c184caf459e11892cebb8d3
|
[
"BSD-3-Clause"
] | null | null | null |
login_sonicwall.py
|
NathanLundner/Sonic-Wall-Login
|
c57608ddc6fbc9030c184caf459e11892cebb8d3
|
[
"BSD-3-Clause"
] | null | null | null |
login_sonicwall.py
|
NathanLundner/Sonic-Wall-Login
|
c57608ddc6fbc9030c184caf459e11892cebb8d3
|
[
"BSD-3-Clause"
] | null | null | null |
# from def_funtions import (setup_session, login, persist, update_rem_time,keep_alive)
'''I have modified this code for my schools use. The code did not function as
advertised so I changed a few funtions and how the main executes so it provides
constant access to wifi as long as the program is open.
BSD 3-Clause License
Copyright (c) 2017, Shubham Sopan Dighe
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import time
import re
from hashlib import md5
import requests
from html.parser import HTMLParser
import os
import sys
import logging
import json
import getpass
import errno
from string import digits
from hashlib import md5
import random
import urllib3
# Removes insecure https connection error
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Reads file with credentials and processes them for login
file = open("credentials.txt", "r")
cred_unproc = ''
for word in file.readlines():
cred_unproc += word.strip("\n")
file.close()
cred = cred_unproc.split(" ")
UNAME = cred[0]
PASSWORD = cred[1]
print(f"[+] Logging In as: {UNAME}")
BEAT_INTERVAL = 10
MIN_RELOGIN = 10
DOMAIN = 'https://10.20.51.1/'
def snooze(factor):
ONE_MINUTE = 30
time.sleep(ONE_MINUTE * factor)
def generate_cookie():
seed = ''.join(random.choice(digits) for _ in range(16))
value = md5(seed.encode()).hexdigest()
return value
def is_logged_in(response):
if "refresh=true" in response.text:
return False
return True
def remaining_time(response):
time = 0
pos = response.text.find("remTime=")
if pos != -1:
time = response.text[pos+8:pos+11]
time = time.split(';')[0]
try:
time = int(time)
except ValueError:
time = 0;
return time
def set_cookies(session):
domain = '10.20.51.1'
session.cookies.set(name='SessId', value=generate_cookie().upper(), domain=domain)
session.cookies.set(name='PageSeed', value=generate_cookie(), domain=domain)
def read_credentials():
print("\n[+] Reading credentials ...")
creds = {}
creds['uName'] = [UNAME]
creds['pass'] = [PASSWORD]
return creds
def login(session):
payload = read_credentials()
print("[+] Authenticating with SonicWall ...")
login_attempt = 6
while login_attempt > 0:
t = session.get(DOMAIN + 'auth1.html')
t = session.post(DOMAIN +'auth.cgi', data=payload)
session.get(DOMAIN + "loginStatusTop(eng).html")
t = session.post(DOMAIN + "usrHeartbeat.cgi", verify=False)
if is_logged_in(t):
print("[+] Logged in successfully !! :)")
current_time = time.strftime("%H:%M:%S %d-%m-%Y", time.localtime())
print("[+] Login time :- %s " % current_time)
print("[+] (Keep this window open for a persistent connection and minimize it.)")
return True
else:
login_attempt -= 1
print("[-] Login failed !! :( \n")
return False
def persist(session):
logged_in = True
while logged_in:
try:
t = session.post(DOMAIN + "usrHeartbeat.cgi", verify=False)
logged_in = is_logged_in(t)
rem_time = remaining_time(t)
if rem_time <= 30:
print("\n[*] Session will expire soon. Logging in again ...")
set_cookies(session)
logged_in = login(session)
else:
snooze(1)
except (requests.exceptions.ConnectionError):
snooze(1)
print("[-] Seems like something went wrong !!")
print("[-] You have been logged out of SonicWall Wifi portal.")
def setup_session():
s = requests.Session()
http_adapter = requests.adapters.HTTPAdapter(max_retries=6)
https_adapter = requests.adapters.HTTPAdapter(max_retries=6)
s.mount('http://', http_adapter)
s.mount('https://', https_adapter)
s.verify = False
set_cookies(s)
return s
def keep_alive(session):
logged_in = True
while logged_in:
try:
t = session.post(DOMAIN + "usrHeartbeat.cgi", verify=False)
logged_in = is_logged_in(t)
if logged_in:
snooze(1)
except (requests.exceptions.ConnectionError):
snooze(1)
print("[+] You have been logged out of Dell SonicWall")
def update_rem_time(session, rem_time):
if rem_time <=0:
rem_time = 1
payload = {'maxSessionTime': rem_time}
t = session.post(DOMAIN + 'userSettings.cgi', data=payload)
session.post(DOMAIN + "usrHeartbeat.cgi", verify=False)
def main():
while True:
try:
print(
"[+] Logging in for 85 min. You will be automtaically relogged in at the end of the 85 min. To disconnect, Exit this window.")
print(
"[*] By running this program, you agree that the author will not be held responsible if there is a malfunction (in middle of a zoom class).")
session = setup_session()
login_time = 85
if login(session):
try:
if login_time:
update_rem_time(session, login_time)
keep_alive(session)
else:
persist(session)
print("[+] Setting up Session")
except KeyboardInterrupt:
print("\n[-] Exiting ...\n")
exit()
except KeyboardInterrupt:
print("\n[-] Exiting ...\n")
exit()
main()
| 32.990826
| 158
| 0.631257
|
66dc7694c677d3671bdfe011b1fe3fd503d0fcb7
| 16,885
|
py
|
Python
|
portfolyo/core/pfline/tests/test_interop.py
|
rwijtvliet/portfolyo
|
b22948fbc55264ec5d69824e791ca7ef45c6e49c
|
[
"BSD-3-Clause"
] | null | null | null |
portfolyo/core/pfline/tests/test_interop.py
|
rwijtvliet/portfolyo
|
b22948fbc55264ec5d69824e791ca7ef45c6e49c
|
[
"BSD-3-Clause"
] | null | null | null |
portfolyo/core/pfline/tests/test_interop.py
|
rwijtvliet/portfolyo
|
b22948fbc55264ec5d69824e791ca7ef45c6e49c
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Dict
from pint import DimensionalityError, UndefinedUnitError
from portfolyo.core.pfline import interop as io
from portfolyo.tools.nits import Q_
import pandas as pd
import numpy as np
import pytest
idx1 = pd.date_range("2020", freq="MS", periods=12)
val1 = 100 + 20 * np.random.random(len(idx1))
s1 = pd.Series(val1, idx1)
idx2 = pd.date_range("2020-08", freq="MS", periods=12)
val2 = 200 + 50 * np.random.random(len(idx2))
s2 = pd.Series(val2, idx2)
idx_i = idx1.intersection(idx2).sort_values()
s1_i = s1.loc[idx_i]
s2_i = s2.loc[idx_i]
idx_u = idx1.union(idx2).sort_values()
s1_u = pd.Series((s1.get(i) for i in idx_u), idx_u)
s2_u = pd.Series((s2.get(i) for i in idx_u), idx_u)
def id_fn(data):
if isinstance(data, Dict):
return str({key: id_fn(val) for key, val in data.items()})
if isinstance(data, pd.Series):
if isinstance(data.index, pd.DatetimeIndex):
return "ts"
else:
return f"series (idx: {''.join(str(i) for i in data.index)})"
if isinstance(data, pd.DataFrame):
return f"df (columns: {''.join(str(c) for c in data.columns)})"
if isinstance(data, io.InOp):
return ""
return str(data)
@pytest.mark.parametrize(
("data_in", "expected_io", "expected_io2"),
[
# One value
# . unit-agnostic
(
23.0,
io.InOp(agn=23.0),
ValueError,
),
# . unitless
(
Q_(23.0, ""),
io.InOp(nodim=23.0),
ValueError,
),
# . known unit
(
Q_(-120.0, "MW"),
io.InOp(w=-120),
ValueError,
),
(
Q_(120e-3, "GW"),
io.InOp(w=120),
ValueError,
),
(
Q_(432e9, "J/h"),
io.InOp(w=120),
ValueError,
),
(
Q_(90_000.0, "MWh"),
io.InOp(q=90_000),
ValueError,
),
(
Q_(90.0, "GWh"),
io.InOp(q=90_000),
ValueError,
),
(
Q_(50.0, "Eur/MWh"),
io.InOp(p=50),
ValueError,
),
(
Q_(5.0, "ctEur/kWh"),
io.InOp(p=50),
ValueError,
),
(
Q_(4_500_000.0, "Eur"),
io.InOp(r=4_500_000),
ValueError,
),
(
Q_(4.5, "MEur"),
io.InOp(r=4_500_000),
ValueError,
),
# . unknown unit
(
Q_(4.5, "MWh/Eur"),
UndefinedUnitError,
None,
),
# One or several values
# . name but no unit
(
{"nodim": 120.0},
io.InOp(nodim=120),
ValueError,
),
(
pd.Series({"nodim": 120.0}),
io.InOp(nodim=120),
ValueError,
),
(
{"w": 120.0},
io.InOp(w=120),
ValueError,
),
(
pd.Series({"w": 120.0}),
io.InOp(w=120),
ValueError,
),
(
{"q": -90_000.0},
io.InOp(q=-90_000),
ValueError,
),
(
pd.Series({"q": -90_000.0}),
io.InOp(q=-90_000),
ValueError,
),
(
{"p": 50.0},
io.InOp(p=50),
ValueError,
),
(
pd.Series({"p": 50.0}),
io.InOp(p=50),
ValueError,
),
(
{"r": 4.5e6},
io.InOp(r=4_500_000),
ValueError,
),
(
pd.Series({"r": 4.5e6}),
io.InOp(r=4_500_000),
ValueError,
),
(
{"w": 120.0, "q": -90_000},
io.InOp(w=120, q=-90_000),
ValueError,
),
(
pd.Series({"w": 120.0, "q": -90_000}),
io.InOp(w=120.0, q=-90_000),
ValueError,
),
(
{"w": 120.0, "p": 50},
io.InOp(w=120.0, p=50),
ValueError,
),
(
pd.Series({"w": 120.0, "p": 50}),
io.InOp(w=120.0, p=50),
ValueError,
),
(
{"w": 120.0, "p": 50.0, "r": 4.5e6},
io.InOp(w=120.0, p=50.0, r=4.5e6),
ValueError,
),
(
pd.Series({"w": 120.0, "p": 50.0, "r": 4.5e6}),
io.InOp(w=120.0, p=50.0, r=4.5e6),
ValueError,
),
(
{"w": 120.0, "p": 50.0, "r": 4.5e6},
io.InOp(w=120.0, p=50.0, r=4.5e6),
ValueError,
),
(
pd.Series({"w": 120.0, "p": 50.0, "r": 4.5e6}),
io.InOp(w=120.0, p=50.0, r=4.5e6),
ValueError,
),
# . name and correct unit
(
{"p": Q_(50.0, "Eur/MWh")},
io.InOp(p=50),
ValueError,
),
(
pd.Series({"p": Q_(50.0, "Eur/MWh")}),
io.InOp(p=50),
ValueError,
),
(
pd.Series({"p": 50}).astype("pint[Eur/MWh]"),
io.InOp(p=50),
ValueError,
),
(
{"r": Q_(4.5, "MEur")},
io.InOp(r=4_500_000),
ValueError,
),
(
pd.Series({"r": Q_(4.5, "MEur")}),
io.InOp(r=4_500_000),
ValueError,
),
(
pd.Series({"r": 4.5}).astype("pint[MEur]"),
io.InOp(r=4_500_000),
ValueError,
),
(
{"w": 120.0, "q": Q_(-90_000.0, "MWh")},
io.InOp(w=120.0, q=-90_000),
ValueError,
),
(
pd.Series({"w": 120.0, "q": Q_(-90_000.0, "MWh")}),
io.InOp(w=120.0, q=-90_000),
ValueError,
),
(
pd.Series({"w": 120.0, "q": Q_(-90.0, "GWh")}),
io.InOp(w=120.0, q=-90_000),
ValueError,
),
# . unknown name -> KeyError
(
{"z": 28.0},
KeyError,
None,
),
(
pd.Series({"z": 28.0}),
KeyError,
None,
),
(
{"z": Q_(120.0, "MWh")},
KeyError,
None,
),
(
pd.Series({"z": Q_(120.0, "MWh")}),
KeyError,
None,
),
# . mix of know and unknown names -> KeyError
(
{"w": 120.0, "z": 28.0},
KeyError,
None,
),
(
pd.Series({"w": 120.0, "z": 28.0}),
KeyError,
None,
),
(
{"w": 120.0, "p": 50.0, "z": 28.0},
KeyError,
None,
),
(
pd.Series({"w": 120.0, "p": 50.0, "z": 28.0}),
KeyError,
None,
),
# . combination of name with incorrect unit -> error
(
{"w": Q_(90.0, "MWh")},
DimensionalityError,
None,
),
(
pd.Series({"w": Q_(90.0, "MWh")}),
DimensionalityError,
None,
),
(
pd.Series({"w": 90}).astype("pint[MWh]"),
DimensionalityError,
None,
),
(
{"p": Q_(90.0, "MWh")},
DimensionalityError,
None,
),
(
pd.Series({"p": Q_(90.0, "MWh")}),
DimensionalityError,
None,
),
(
{"p": 50.0, "w": Q_(90.0, "MWh")},
DimensionalityError,
None,
),
(
pd.Series({"p": 50.0, "w": Q_(90.0, "MWh")}),
DimensionalityError,
None,
),
# One timeseries
# . unit-agnostic
(
s1,
io.InOp(agn=s1),
io.InOp(agn=s1),
),
# . unitless
# (s1.astype("pint[dimensionless]"), io.InterOp(nodim=s1)), # TODO: fix
# . known unit
(
s1.astype("pint[MW]"),
io.InOp(w=s1),
io.InOp(w=s1),
),
(
(s1 / 1000).astype("pint[GW]"), # series with pint unit
io.InOp(w=s1),
io.InOp(w=s1),
),
(
pd.Series([Q_(v, "MW") for v in val1], idx1), # series of Quantities
io.InOp(w=s1),
io.InOp(w=s1),
),
(
s1.astype("pint[GWh]"),
io.InOp(q=s1 * 1000),
io.InOp(q=s1 * 1000),
),
(
s1.astype("pint[Eur/MWh]"),
io.InOp(p=s1),
io.InOp(p=s1),
),
(
s1.astype("pint[MEur]"),
io.InOp(r=s1 * 1e6),
io.InOp(r=s1 * 1e6),
),
# . unknown unit
(
s1.astype("pint[Wh/MEur]"),
UndefinedUnitError,
None,
),
# One or several timeseries
# . name but no unit
(
{"w": s1},
io.InOp(w=s1),
io.InOp(w=s1),
),
(
pd.DataFrame({"w": s1}),
io.InOp(w=s1),
io.InOp(w=s1),
),
(
{"q": -s1},
io.InOp(q=-s1),
io.InOp(q=-s1),
),
(
pd.DataFrame({"q": -s1}),
io.InOp(q=-s1),
io.InOp(q=-s1),
),
(
{"r": s1},
io.InOp(r=s1),
io.InOp(r=s1),
),
(
pd.DataFrame({"r": s1}),
io.InOp(r=s1),
io.InOp(r=s1),
),
(
{"w": s1, "q": -s2},
io.InOp(w=s1, q=-s2),
io.InOp(w=s1_i, q=-s2_i),
),
(
pd.DataFrame({"w": s1, "q": -s2}),
io.InOp(w=s1_u, q=-s2_u),
io.InOp(w=s1_u, q=-s2_u),
),
(
{"w": s1, "p": s2, "r": s1 * 4},
io.InOp(w=s1, p=s2, r=s1 * 4),
io.InOp(w=s1_i, p=s2_i, r=s1_i * 4),
),
(
pd.DataFrame({"w": s1, "p": s2, "r": s1 * 4}),
io.InOp(w=s1_u, p=s2_u, r=s1_u * 4),
io.InOp(w=s1_u, p=s2_u, r=s1_u * 4),
),
# . name and correct unit
(
{"p": s1.astype("pint[Eur/MWh]")},
io.InOp(p=s1),
io.InOp(p=s1),
),
(
pd.DataFrame({"p": s1.astype("pint[Eur/MWh]")}),
io.InOp(p=s1),
io.InOp(p=s1),
),
(
pd.DataFrame({"p": [Q_(v, "Eur/MWh") for v in val1]}, idx1),
io.InOp(p=s1),
io.InOp(p=s1),
),
(
{"r": s1.astype("pint[MEur]")},
io.InOp(r=s1 * 1e6),
io.InOp(r=s1 * 1e6),
),
(
pd.DataFrame({"r": s1.astype("pint[MEur]")}),
io.InOp(r=s1 * 1e6),
io.InOp(r=s1 * 1e6),
),
(
{"w": s1.astype("pint[MW]"), "q": s2.astype("pint[MWh]")},
io.InOp(w=s1, q=s2),
io.InOp(w=s1_i, q=s2_i),
),
(
pd.DataFrame({"w": s1.astype("pint[MW]"), "q": s2.astype("pint[MWh]")}),
io.InOp(w=s1_u, q=s2_u),
io.InOp(w=s1_u, q=s2_u),
),
(
{"w": s1.astype("pint[MW]"), "q": s2.astype("pint[GWh]")},
io.InOp(w=s1, q=s2 * 1000),
io.InOp(w=s1_i, q=s2_i * 1000),
),
(
pd.DataFrame({"w": s1.astype("pint[MW]"), "q": s2.astype("pint[GWh]")}),
io.InOp(w=s1_u, q=s2_u * 1000),
io.InOp(w=s1_u, q=s2_u * 1000),
),
# . unknown name -> KeyError
(
{"z": s1},
KeyError,
None,
),
(
pd.DataFrame({"z": s1}),
KeyError,
None,
),
(
{"z": s1.astype("pint[MW]")},
KeyError,
None,
),
(
pd.DataFrame({"z": s1.astype("pint[MW]")}),
KeyError,
None,
),
# . mix of know and unknown names -> KeyError
(
{"w": s1, "z": s2},
KeyError,
None,
),
(
pd.DataFrame({"w": s1, "z": s2}),
KeyError,
None,
),
(
{"w": s1, "p": s2 * 10, "z": s2},
KeyError,
None,
),
(
pd.DataFrame({"w": s1, "p": s2 * 10, "z": s2}),
KeyError,
None,
),
(
pd.DataFrame({"w": s2.astype("pint[GW]"), "p": s2 * 10, "z": s2}),
KeyError,
None,
),
# . combination of name with incorrect unit -> error
(
{"w": s1.astype("pint[MWh]")},
DimensionalityError,
None,
),
(
pd.DataFrame({"w": s1.astype("pint[MWh]")}),
DimensionalityError,
None,
),
(
{"p": s1.astype("pint[MWh]")},
DimensionalityError,
None,
),
(
pd.DataFrame({"p": s1.astype("pint[MWh]")}),
DimensionalityError,
None,
),
(
{"p": s2, "w": s1.astype("pint[MWh]")},
DimensionalityError,
None,
),
(
pd.DataFrame({"p": s2, "w": s1.astype("pint[MWh]")}),
DimensionalityError,
None,
),
# Combinations of value(s) and timeseries.
# . name but no unit
(
{"w": s1, "p": 50.0},
io.InOp(w=s1, p=50),
io.InOp(w=s1, p=pd.Series(50, idx1)),
),
(
{"q": -s1, "p": 50.0, "r": s2},
io.InOp(q=-s1, p=50, r=s2),
io.InOp(q=-s1_i, r=s2_i, p=pd.Series(50, idx_i)),
),
# . name and correct unit
(
{"w": s1.astype("pint[MW]"), "p": 50.0},
io.InOp(w=s1, p=50),
io.InOp(w=s1, p=pd.Series(50, idx1)),
),
(
{"w": s1.astype("pint[MW]"), "q": s2.astype("pint[MWh ]"), "p": 50},
io.InOp(w=s1, q=s2, p=50),
io.InOp(w=s1_i, q=s2_i, p=pd.Series(50, idx_i)),
),
(
{"r": s1.astype("pint[MEur]"), "p": 50.0, "q": 90_000},
io.InOp(r=s1 * 1e6, p=50, q=90_000),
io.InOp(r=s1 * 1e6, p=pd.Series(50, idx1), q=pd.Series(90_000, idx1)),
),
# . unknown name -> KeyError
(
{"z": s1, "xy": 50},
KeyError,
None,
),
# . mix of know and unknown names -> KeyError
(
{"z": s1, "p": 50.0},
KeyError,
None,
),
(
{"z": s1.astype("pint[MW]"), "p": s2},
KeyError,
None,
),
(
{"w": s1.astype("pint[GW]"), "z": 28},
KeyError,
None,
),
(
{"w": s1, "p": s2 * 10, "z": 50},
KeyError,
None,
),
# ( # exclude: not a valid dataframe contructor
# pd.DataFrame({"w": s1, "p": Q_(5.0, "ctEur/kWh"), "z": s2}),
# io.InterOp(w=s1, p=50, rest=({"z": s2},)),
# ),
(
pd.DataFrame({"w": s1.astype("pint[GW]"), "p": 50.0, "z": s2}),
KeyError,
None,
),
# . combination of name with incorrect unit -> error
(
{"w": s1.astype("pint[MWh]"), "p": Q_(50.0, "MW")},
DimensionalityError,
None,
),
(
{"p": s1.astype("pint[MWh]"), "w": 120.0},
DimensionalityError,
None,
),
(
{"z": 23.0, "p": s2, "w": s1.astype("pint[MWh]")},
KeyError,
None,
),
],
ids=id_fn,
)
def test_interop(data_in, expected_io, expected_io2):
"""Test if random data creates the expected InterOp object."""
if type(expected_io) is type and issubclass(expected_io, Exception):
with pytest.raises(expected_io):
_ = io.InOp.from_data(data_in)
return
result_io = io.InOp.from_data(data_in)
assert result_io == expected_io
if type(expected_io2) is type and issubclass(expected_io2, Exception):
with pytest.raises(expected_io2):
_ = result_io.to_timeseries()
return
result_io2 = result_io.to_timeseries()
assert result_io2 == expected_io2
result_io3 = result_io2.to_timeseries()
assert result_io3 == result_io2 # repeated application of intersection does nothing
| 26.057099
| 88
| 0.375659
|
46915a4ede9c51565edfbd0a439dd6467f9b8985
| 17,916
|
py
|
Python
|
PZR_bubblegeneration_Fin/SAC_Discrete.py
|
LeeDaeil/CNS_Autonomous
|
2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f
|
[
"Apache-2.0"
] | 2
|
2020-03-22T14:35:00.000Z
|
2020-05-26T05:06:41.000Z
|
PZR_bubblegeneration_Fin/SAC_Discrete.py
|
LeeDaeil/CNS_Autonomous
|
2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f
|
[
"Apache-2.0"
] | null | null | null |
PZR_bubblegeneration_Fin/SAC_Discrete.py
|
LeeDaeil/CNS_Autonomous
|
2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f
|
[
"Apache-2.0"
] | null | null | null |
"""
Builder: Daeil Lee 2021-01-03
Ref-Code:
- https://github.com/ku2482/sac-discrete.pytorch
-
"""
import torch
import torch.optim as opt
import torch.nn.functional as F
import numpy as np
import asyncio
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, wait
from datetime import datetime
from PZR_bubblegeneration_Fin.Memory import ReplayBuffer
from PZR_bubblegeneration_Fin.SAC_Network import ActorNet, CriticNet
from PZR_bubblegeneration_Fin.CNS_PZR import ENVCNS
from torch.utils.tensorboard import SummaryWriter
WRITER = SummaryWriter('./TFBoard')
class SAC:
def __init__(self,
# info
net_type='DNN',
lr=0.0003, alpha=1, gamma=0.99, tau=0.005,
# mem_info
capacity=1e6, seq_len=2,
# Agent Run info
max_episodes=1e6, max_steps=1e6, interval_steps=15, target_update_interval=15,
batch_size=128,
):
# -----------------------------------------------------------------------------------------
self.alpha = alpha
self.gamma = gamma
self.tau = tau
self.interval_steps = interval_steps
self.target_update_interval = target_update_interval
# -----------------------------------------------------------------------------------------
self._log_set()
# -----------------------------------------------------------------------------------------
# Call ENV
self.envs, self.agent_n, self.a_dim, self.s_dim = self._call_env()
# make Thread Pool
self.pool = ThreadPoolExecutor(len(self.envs))
#
self.a_dim = 3
# Define Memory
self.replay_buffer = ReplayBuffer(capacity, net_type, seq_len)
# Define Networks
self.Actor_Policy_Nets = [ActorNet(nub_a=self.a_dim, nub_s=self.s_dim, net_type=net_type) for _ in self.envs]
self.Critic_Q_Net1s = [CriticNet(nub_a=self.a_dim, nub_s=self.s_dim, net_type=net_type) for _ in self.envs]
self.Critic_Q_Net2s = [CriticNet(nub_a=self.a_dim, nub_s=self.s_dim, net_type=net_type) for _ in self.envs]
self.Critic_Q_Target_Net1s = [CriticNet(nub_a=self.a_dim, nub_s=self.s_dim, net_type=net_type) for _ in self.envs]
self.Critic_Q_Target_Net2s = [CriticNet(nub_a=self.a_dim, nub_s=self.s_dim, net_type=net_type) for _ in self.envs]
# Copy parameters from Critic_Q_Nets to Critoc_Q_Target_Nets
for critic_q_target, critic_q, i in zip(self.Critic_Q_Target_Net1s, self.Critic_Q_Net1s, range(len(self.envs))):
critic_q_target.load_state_dict(critic_q.state_dict())
for critic_q_target_para in critic_q_target.parameters():
critic_q_target_para.requires_grad = False
critic_q.save(path=f'./Model/Critic_Q_net1_{i}')
for critic_q_target, critic_q, i in zip(self.Critic_Q_Target_Net2s, self.Critic_Q_Net2s, range(len(self.envs))):
critic_q_target.load_state_dict(critic_q.state_dict())
for critic_q_target_para in critic_q_target.parameters():
critic_q_target_para.requires_grad = False
critic_q.save(path=f'./Model/Critic_Q_net2_{i}')
# Save Models policy
for poliy, i in zip(self.Actor_Policy_Nets, range(len(self.envs))):
poliy.save(path=f'./Model/Actor_policy_net_{i}')
# Define Optimizer
self.Actor_Policy_Net_Opts = [opt.Adam(poliy_net.parameters(), lr=lr) for poliy_net in self.Actor_Policy_Nets]
self.Critic_Q_Net1_Opts = [opt.Adam(critic_q_net1.parameters(), lr=lr) for critic_q_net1 in self.Critic_Q_Net1s]
self.Critic_Q_Net2_Opts = [opt.Adam(critic_q_net2.parameters(), lr=lr) for critic_q_net2 in self.Critic_Q_Net2s]
# Agent info ------------------------------------------------------------------------------
print(f'{self.Actor_Policy_Nets}\n{self.Critic_Q_Net1s}\n{self.Critic_Q_Net2s}\n'
f'{self.Critic_Q_Target_Net1s}\n{self.Critic_Q_Target_Net2s}')
for i in range(self.agent_n):
print(f'Agent {i}|'
f'ReplayBuffer {self.replay_buffer}|MonitoringMem {0}|'
f'ENV CNSIP{self.envs[i].CNS_ip}-CNSPort{self.envs[i].CNS_port}-'
f'ComIP{self.envs[i].Remote_ip}-ComPort{self.envs[i].Remote_port}')
# Agent Run -------------------------------------------------------------------------------
self._run(self.envs, self.replay_buffer, max_episodes, max_steps, interval_steps,
target_update_interval, batch_size)
def _log_set(self):
with open('Debug_logger.txt', 'w') as f: f.write(f'[{datetime.now()}]\n')
def _log(self, txt):
with open('Debug_logger.txt', 'a') as f: f.write(f'[{datetime.now()}]\t{txt}\n')
def _call_env(self):
_CNS_info = {
0: ['192.168.0.211', 7101, False], #CNS1
1: ['192.168.0.211', 7102, False],
2: ['192.168.0.211', 7103, False],
3: ['192.168.0.211', 7104, False],
4: ['192.168.0.211', 7105, False],
#
5: ['192.168.0.212', 7201, False], #CNS2
6: ['192.168.0.212', 7202, False],
7: ['192.168.0.212', 7203, False],
8: ['192.168.0.212', 7204, False],
9: ['192.168.0.212', 7205, False],
#
10: ['192.168.0.213', 7301, False], #CNS3
11: ['192.168.0.213', 7302, False],
12: ['192.168.0.213', 7303, False],
13: ['192.168.0.213', 7304, False],
14: ['192.168.0.213', 7305, False],
}
# Set CNS
envs = [ENVCNS(Name=i, IP=_CNS_info[i][0], PORT=_CNS_info[i][1]) for i in range(len(_CNS_info))]
return envs, len(_CNS_info), envs[0].action_space, envs[0].observation_space
def _update(self, mini_batch, i, target_update):
self._log(txt=f'call_update_{i}'+'='*50)
s, a, r, s_next, d = mini_batch
# print('_update_mini_batch:\n', s, s_next, a, r, d)
s = torch.FloatTensor(s)
s_next = torch.FloatTensor(s_next)
a = torch.FloatTensor(a)
r = torch.FloatTensor(r).unsqueeze(1)
d = torch.FloatTensor(np.float32(d)).unsqueeze(1)
# print('_update:\n', s, s_next, a, r, d)
# -------------------------------------------------------------------------------------
# Update the Q-function or Critic network's parameters
q1, q2 = self._update_cal_q(s, a, i)
target_q = self._update_cal_target_q(r, s_next, d, i)
Critic_Q1_loss = 0.5 * F.mse_loss(q1, target_q.detach())
Critic_Q2_loss = 0.5 * F.mse_loss(q2, target_q.detach())
self._log(txt=f'q1_{q1}_{target_q}')
self._log(txt=f'q1_{q2}_{target_q}')
Critic_Q1_loss_mean = torch.mean(Critic_Q1_loss)
Critic_Q2_loss_mean = torch.mean(Critic_Q2_loss)
# print(f'_Critic_loss_sum:\n{q1}\n{target_q}\n{Critic_Q1_loss}\n{Critic_Q2_loss}\n{Critic_Q1_loss_mean}')
# print(f'_Critic_Q1_loss_mean:\n{Critic_Q1_loss_mean}')
self.Critic_Q_Net1_Opts[i].zero_grad()
Critic_Q1_loss_mean.backward()
self.Critic_Q_Net1_Opts[i].step()
self.Critic_Q_Net2_Opts[i].zero_grad()
Critic_Q2_loss_mean.backward()
self.Critic_Q_Net2_Opts[i].step()
# -------------------------------------------------------------------------------------
# Update the Actor_policy's parameters
entropies, expect_q = self._update_cal_policy_entropy(s, i)
Actor_policy_loss = entropies - expect_q
Actor_policy_loss_mean = torch.mean(Actor_policy_loss)
# print(f'Actor_policy_loss_mean:\n{Actor_policy_loss_mean}')
self.Actor_Policy_Net_Opts[i].zero_grad()
Actor_policy_loss_mean.backward()
self.Actor_Policy_Net_Opts[i].step()
# -------------------------------------------------------------------------------------
# Log net calculation process
ep = self.Wd[i]['ep']
with open(f'./DB_ep_net/{ep}.txt', 'a') as f_net_:
f_net_.write(f'{q1.data.tolist()}|{q2.data.tolist()}|{target_q.data.tolist()}|'
f'{Critic_Q1_loss_mean.data.tolist()}|{Critic_Q2_loss_mean.data.tolist()}|'
f'{entropies.data.tolist()}|{expect_q.data.tolist()}|'
f'{Actor_policy_loss.data.tolist()}|{Actor_policy_loss_mean.data.tolist()}|'
f'\n')
# -------------------------------------------------------------------------------------
# Update the Target Q network: soft-Q update
if target_update:
self._log(txt='target_update')
Q_nets = [self.Critic_Q_Net1s[i], self.Critic_Q_Net2s[i]]
Q_target_nets = [self.Critic_Q_Target_Net1s[i], self.Critic_Q_Target_Net2s[i]]
for Q_net_, Q_target_net_ in zip(Q_nets, Q_target_nets):
for Q_net_para_, Q_target_net_para_ in zip(Q_net_.parameters(), Q_target_net_.parameters()):
Q_target_net_para_.data.copy_(self.tau * Q_net_para_.data + (1 - self.tau) * Q_target_net_para_.data)
return Critic_Q1_loss_mean.detach().cpu().numpy(), Critic_Q2_loss_mean.detach().cpu().numpy(), Actor_policy_loss_mean.detach().cpu().numpy()
def _update_cal_q(self, s, a, i):
q1 = self.Critic_Q_Net1s[i](s)
q2 = self.Critic_Q_Net2s[i](s)
q1 = q1.gather(1, a.long())
q2 = q2.gather(1, a.long())
return q1, q2
def _update_cal_target_q(self, r, s_next, d, i):
with torch.no_grad():
Actor_s_next_out = self.Actor_Policy_Nets[i].sample(s_next)
# print('_Actor_Policy_Net_next_Out:\n', Actor_s_next_out)
action_next_, action_probs_next, log_probs_next = Actor_s_next_out
q1_target = self.Critic_Q_Target_Net1s[i](s_next)
q2_target = self.Critic_Q_Target_Net2s[i](s_next)
min_q1_q2_target = torch.min(q1_target, q2_target)
target_V = min_q1_q2_target - self.alpha * log_probs_next
target_V_prob = action_probs_next * target_V
target_V_sum = target_V_prob.sum(dim=1, keepdim=True)
# print(f'_target_V:\n{min_q1_q2_target}\n{log_probs_next}\n{target_V}\n{action_probs_next}'
# f'\n{target_V_prob}\n{target_V_sum}')
target_Q = r + self.gamma * (1 - d) * target_V_sum
# print(f'_target_Q:\n{target_Q}')
return target_Q
def _update_cal_policy_entropy(self, s, i):
Actor_s_out = self.Actor_Policy_Nets[i].sample(s)
# print('_Actor_Policy_Net_Out:\n', Actor_s_out)
action_, action_probs, log_probs = Actor_s_out
with torch.no_grad():
q1_ = self.Critic_Q_Net1s[i](s)
q2_ = self.Critic_Q_Net2s[i](s)
min_q1_q2 = torch.min(q1_, q2_)
entropies = torch.sum(action_probs * self.alpha * log_probs, dim=1, keepdim=True)
# print(f'_Actor_policy_entropies\n{action_probs}\n{log_probs}\n{entropies}')
expect_q = torch.sum(action_probs * min_q1_q2, dim=1, keepdim=True)
# print(f'_Actor_policy_expect_q\n{action_probs}\n{min_q1_q2}\n{expect_q}')
return entropies, expect_q
def _pool_one_step(self, envs, actions):
def __pool_one_step(env, a):
next_s, r, d, _ = env.step(a)
return next_s, r, d, _
futures = [self.pool.submit(__pool_one_step, env_, a) for env_, a in zip(envs, actions)]
wait(futures)
out = [pack_out.result() for pack_out in futures]
next_s = [out[_][0].tolist() for _ in range(self.agent_n)]
r = [out[_][1] for _ in range(self.agent_n)]
d = [out[_][2] for _ in range(self.agent_n)]
a = [out[_][3] for _ in range(self.agent_n)]
return next_s, r, d, a
def _pool_reset(self, envs):
def __pool_reset(env, ep):
env.reset(file_name=f'{ep}')
calculate_ep = []
for i in range(self.agent_n):
self.Wd[i]['ep'] = self.episode
calculate_ep.append(self.episode)
self.episode += 1
futures = [self.pool.submit(__pool_reset, env_, ep_) for env_, ep_ in zip(envs, calculate_ep)]
wait(futures)
print('All Env Reset !!')
def _pool_done_reset(self, envs, dones):
done_envs = []
done_envs_ep = []
for i in range(self.agent_n):
if dones[i]:
self.Wd[i]['ep'] = self.episode
done_envs.append(envs[i])
done_envs_ep.append(self.episode)
self.episode += 1
def __pool_done_reset(env, ep):
env.reset(file_name=f'{ep}')
futures = [self.pool.submit(__pool_done_reset, env_, ep_) for env_, ep_ in zip(done_envs, done_envs_ep)]
wait(futures)
def _run_exploit(self, net, s):
with torch.no_grad():
a = net.get_act(s)
return a.item()
def _run_explore(self, net, s):
with torch.no_grad():
a, _, _ = net.sample(s)
return a.item()
def _run_learn(self, steps, interval_steps):
return steps % interval_steps == 0
def _run_update_target(self):
return
def _run(self,
envs, replay_buffer,
max_episodes, max_steps, interval_steps, target_update_interval, batch_size):
print('Run' + '=' * 50)
steps = 0
self.episode = 0
self.writer_ep = 0
# Worker mem
self.Wd = {i: {'ep_acur': 0, 'ep_q1': 0, 'ep_q2': 0, 'ep_p': 0, 'ep':0} for i in range(self.agent_n)}
self._pool_reset(envs)
next_s, r, d, _ = self._pool_one_step(envs, actions=[[0] for _ in range(self.agent_n)])
s = next_s
while steps < max_steps and self.episode < max_episodes:
print(f'Time:[{datetime.now().minute}:{datetime.now().second}]'
f'Global_info:[{self.episode}/{max_episodes}][{steps}/{max_steps}]'
f'Env_info: {[env_.ENVStep for env_ in envs]}')
# s 에대한 a 예측
# a = [self.Actor_Policy_Nets[i].get_act(s[i]) for i in range(self.agent_n)] # a[0] tensor([[0]])
a = [[self._run_exploit(self.Actor_Policy_Nets[i], s[i])] for i in range(self.agent_n)] # a[0] [0]
# CNS Step <-
next_s, r, d, _ = self._pool_one_step(envs, a)
# Log
for i in range(self.agent_n):
with open(f'./DB_ep_srd/{self.Wd[i]["ep"]}.txt', 'a') as f_ep_srd:
f_ep_srd.write(f"{s[i]},{r[i]},{d[i]}\n")
# Buffer <-
for s_, a_, r_, next_s_, d_, id in zip(s, a, r, next_s, d, range(self.agent_n)):
self.Wd[id]['ep_acur'] += r_
replay_buffer.push(s_, a_, r_, next_s_, d_)
# s <- next_s
s = next_s
# learn
if replay_buffer.get_length() > batch_size and self._run_learn(steps=steps, interval_steps=interval_steps):
target_update = True if steps % target_update_interval == 0 else False
for i in range(self.agent_n):
mini_batch = replay_buffer.sample(batch_size, per=False)
q1_loss, q2_loss, p_loss = self._update(mini_batch, i, target_update)
with open(f'./DB_ep/{self.Wd[i]["ep"]}.txt', 'a') as f:
f.write(f"{q1_loss},{q2_loss},{p_loss}\n")
self.Wd[i]['ep_q1'] += q1_loss
self.Wd[i]['ep_q2'] += q2_loss
self.Wd[i]['ep_p'] += p_loss
# Done ep ??
for d_, id in zip(d, range(self.agent_n)):
if d_:
print(f"{self.Wd[id]['ep_q1']},{self.Wd[id]['ep_q1']/envs[id].ENVStep},"
f"{self.Wd[id]['ep_q2']},{self.Wd[id]['ep_q2']/envs[id].ENVStep},"
f"{self.Wd[id]['ep_p']},{self.Wd[id]['ep_p']/envs[id].ENVStep},"
f"{self.Wd[id]['ep_acur']},{self.Wd[id]['ep_acur']/envs[id].ENVStep}\n")
with open(f'./DB_ep/tot.txt', 'a') as f:
f.write(f"{self.Wd[id]['ep']},"
f"{self.Wd[id]['ep_q1']},{self.Wd[id]['ep_q1']/envs[id].ENVStep},"
f"{self.Wd[id]['ep_q2']},{self.Wd[id]['ep_q2']/envs[id].ENVStep},"
f"{self.Wd[id]['ep_p']},{self.Wd[id]['ep_p']/envs[id].ENVStep},"
f"{self.Wd[id]['ep_acur']},{self.Wd[id]['ep_acur']/envs[id].ENVStep}\n")
self.writer_ep += 1
WRITER.add_scalar('Loss/q1', self.Wd[id]['ep_q1'], self.writer_ep)
WRITER.add_scalar('Loss/q2', self.Wd[id]['ep_q2'], self.writer_ep)
WRITER.add_scalar('Loss/p', self.Wd[id]['ep_p'], self.writer_ep)
WRITER.add_scalar('Loss/r', self.Wd[id]['ep_acur'], self.writer_ep)
WRITER.add_scalar('Loss-av/q1-av', self.Wd[id]['ep_q1']/envs[id].ENVStep, self.writer_ep)
WRITER.add_scalar('Loss-av/q2-av', self.Wd[id]['ep_q2']/envs[id].ENVStep, self.writer_ep)
WRITER.add_scalar('Loss-av/p-av', self.Wd[id]['ep_p']/envs[id].ENVStep, self.writer_ep)
WRITER.add_scalar('Loss-av/r-av', self.Wd[id]['ep_acur']/envs[id].ENVStep, self.writer_ep)
for _ in self.Wd[id].keys():
self.Wd[id][_] = 0
self._pool_done_reset(envs, d)
steps += self.agent_n
# End
print(f'Done Training:'
f'[{self.episode}/{max_episodes}]'
f'[{steps}/{max_steps}]' + '=' * 50)
if __name__ == '__main__':
_ = SAC()
| 44.567164
| 148
| 0.558439
|
dcf8abe258e05bdf4a36697c49e573eb5ef7cea8
| 7,007
|
py
|
Python
|
keystone/tests/unit/backend/role/test_ldap.py
|
maestro-hybrid-cloud/keystone
|
a597a86b854215835a4d54885daeb161d7b0efb8
|
[
"Apache-2.0"
] | null | null | null |
keystone/tests/unit/backend/role/test_ldap.py
|
maestro-hybrid-cloud/keystone
|
a597a86b854215835a4d54885daeb161d7b0efb8
|
[
"Apache-2.0"
] | null | null | null |
keystone/tests/unit/backend/role/test_ldap.py
|
maestro-hybrid-cloud/keystone
|
a597a86b854215835a4d54885daeb161d7b0efb8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit.backend import core_ldap
from keystone.tests.unit.backend.role import core as core_role
from keystone.tests.unit import default_fixtures
CONF = cfg.CONF
class LdapRoleCommon(core_ldap.BaseBackendLdapCommon, core_role.RoleTests):
"""Tests that should be run in every LDAP configuration.
Include additional tests that are unique to LDAP (or need to be overridden)
which should be run for all the various LDAP configurations we test.
"""
pass
class LdapRole(LdapRoleCommon, core_ldap.BaseBackendLdap, unit.TestCase):
"""Test in an all-LDAP configuration.
Include additional tests that are unique to LDAP (or need to be overridden)
which only need to be run in a basic LDAP configurations.
"""
def test_configurable_allowed_role_actions(self):
role = {'id': u'fäké1', 'name': u'fäké1'}
self.role_api.create_role(u'fäké1', role)
role_ref = self.role_api.get_role(u'fäké1')
self.assertEqual(u'fäké1', role_ref['id'])
role['name'] = u'fäké2'
self.role_api.update_role(u'fäké1', role)
self.role_api.delete_role(u'fäké1')
self.assertRaises(exception.RoleNotFound,
self.role_api.get_role,
u'fäké1')
def test_configurable_forbidden_role_actions(self):
self.config_fixture.config(
group='ldap', role_allow_create=False, role_allow_update=False,
role_allow_delete=False)
self.load_backends()
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assertRaises(exception.ForbiddenAction,
self.role_api.create_role,
role['id'],
role)
self.role_member['name'] = uuid.uuid4().hex
self.assertRaises(exception.ForbiddenAction,
self.role_api.update_role,
self.role_member['id'],
self.role_member)
self.assertRaises(exception.ForbiddenAction,
self.role_api.delete_role,
self.role_member['id'])
def test_role_filter(self):
role_ref = self.role_api.get_role(self.role_member['id'])
self.assertDictEqual(self.role_member, role_ref)
self.config_fixture.config(group='ldap',
role_filter='(CN=DOES_NOT_MATCH)')
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.role_filter will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.role_api.get_role.invalidate(self.role_api,
self.role_member['id'])
self.assertRaises(exception.RoleNotFound,
self.role_api.get_role,
self.role_member['id'])
def test_role_attribute_mapping(self):
self.config_fixture.config(group='ldap', role_name_attribute='ou')
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.role_name_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.role_api.get_role.invalidate(self.role_api,
self.role_member['id'])
role_ref = self.role_api.get_role(self.role_member['id'])
self.assertEqual(self.role_member['id'], role_ref['id'])
self.assertEqual(self.role_member['name'], role_ref['name'])
self.config_fixture.config(group='ldap', role_name_attribute='sn')
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.role_name_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.role_api.get_role.invalidate(self.role_api,
self.role_member['id'])
role_ref = self.role_api.get_role(self.role_member['id'])
self.assertEqual(self.role_member['id'], role_ref['id'])
self.assertNotIn('name', role_ref)
def test_role_attribute_ignore(self):
self.config_fixture.config(group='ldap',
role_attribute_ignore=['name'])
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.role_attribute_ignore will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.role_api.get_role.invalidate(self.role_api,
self.role_member['id'])
role_ref = self.role_api.get_role(self.role_member['id'])
self.assertEqual(self.role_member['id'], role_ref['id'])
self.assertNotIn('name', role_ref)
class LdapIdentitySqlEverythingElseRole(
core_ldap.BaseBackendLdapIdentitySqlEverythingElse, LdapRoleCommon,
unit.TestCase):
"""Test Identity in LDAP, Everything else in SQL."""
pass
class LdapIdentitySqlEverythingElseWithMappingRole(
LdapIdentitySqlEverythingElseRole,
core_ldap.BaseBackendLdapIdentitySqlEverythingElseWithMapping):
"""Test ID mapping of default LDAP backend."""
pass
| 43.253086
| 79
| 0.6572
|
cbab3a357e4ad69576afc85b4060587fbf96f82b
| 3,329
|
py
|
Python
|
code/main.py
|
bnesposito/zika-detection
|
62d5f962e71af54d9dc51eb91b62329d84735e68
|
[
"Apache-2.0"
] | null | null | null |
code/main.py
|
bnesposito/zika-detection
|
62d5f962e71af54d9dc51eb91b62329d84735e68
|
[
"Apache-2.0"
] | null | null | null |
code/main.py
|
bnesposito/zika-detection
|
62d5f962e71af54d9dc51eb91b62329d84735e68
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import time
from sklearn.ensemble import VotingClassifier
import config
import process
import models
def main():
LOGGER_LEVEL = 10
RAW_DATA_PATH = './data/raw/'
RAW_CSV_NAME = 'raw_data.csv'
t0 = time.time()
logger = config.config_logger(__name__, LOGGER_LEVEL)
pd.set_option('display.float_format', lambda x: '{0:.2f}'.format(x))
logger.info('Beginning execution: zika dataset')
logger.info('Logger configured - level {0}'.format(LOGGER_LEVEL))
logger.info('Opening CSV: {0}{1}'.format(RAW_DATA_PATH, RAW_CSV_NAME))
raw_data = pd.read_csv(RAW_DATA_PATH + RAW_CSV_NAME)
logger.info('Raw dataset description:')
process.basic_descriptives(raw_data)
raw_data = process.preprocess(raw_data)
#print(raw_data.describe().transpose().to_string())
#print(raw_data.head().to_string())
#print(raw_data.info().to_string())
y_dengue = raw_data['dengue_pcr']
y_zika = raw_data['zika_pcr']
y_chik = raw_data['chik_pcr']
diseases = [y_dengue, y_zika, y_chik]
# Check process code for further explanation of select_disease function.
# code: 1. Dengue, 2. Zika, 3. Chik, 4. Any
# only_one: if True, input np.nan to patients with another disease.
y = process.select_disease(diseases, code=1, only_one=False)
logger.info('Target var frequency: \n{0}'.format(y.value_counts()))
logger.info('Total obs: {0}'.format(y.value_counts().sum()))
remove_list = ['id', 'centro_pob', 'name', 'dep', 'prov', 'dist',
'serotipo1', 'serotipo2', 'serotipo3', 'serotipo4',
'dengue_pcr', 'zika_pcr', 'chik_pcr']
X = process.remove_vars(raw_data, remove_list)
X = process.keep_non_nan(X, y)
y = y.dropna()
logger.info('Features dataset')
process.basic_descriptives(X)
logger.info('Split train test')
X_train, X_test, y_train, y_test = models.split_data(X, y, proportion=0.4)
logger.info('Estimating models')
logger.info('GBM')
grid_gbm = models.gbm_grid(X_train, y_train, n_cv=5)
logger.info(grid_gbm.best_params_)
logger.info('Train score: {0}'.format(grid_gbm.best_score_))
logger.info('Test score: {0}'.format(grid_gbm.score(X_test, y_test)))
logger.info('Logit')
grid_logit = models.logit_grid(X_train, y_train, n_cv=5)
logger.info(grid_logit.best_params_)
logger.info('Train score: {0}'.format(grid_logit.best_score_))
logger.info('Test score: {0}'.format(grid_logit.score(X_test, y_test)))
logger.info('AdaBoost')
grid_adaboost = models.adaboost_grid(X_train, y_train, n_cv=5)
logger.info(grid_adaboost.best_params_)
logger.info('Train score: {0}'.format(grid_adaboost.best_score_))
logger.info('Test score: {0}'.format(grid_adaboost.score(X_test, y_test)))
logger.info('Soft Voting')
eclf = VotingClassifier(estimators=[('gbm', grid_gbm), ('logit', grid_logit),
('ada', grid_adaboost)], voting='soft')
eclf.fit(X_train, y_train)
y_pred = eclf.predict_proba(X_test)
print(y_pred[:5,:])
logger.info('Train score: {0}'.format(eclf.score(X_train, y_train)))
logger.info('Test score: {0}'.format(eclf.score(X_test, y_test)))
config.time_taken_display(t0)
if __name__ == '__main__':
main()
| 36.582418
| 81
| 0.676479
|
d00423c680ad949ad979bd13fbaf3a375a88ee47
| 1,196
|
py
|
Python
|
tests/testapp/migrations/0001_initial.py
|
garyd203/django-lifecycle
|
f60a1394b3fb44b84c9c997ac87c2edc7b7a7f55
|
[
"MIT"
] | null | null | null |
tests/testapp/migrations/0001_initial.py
|
garyd203/django-lifecycle
|
f60a1394b3fb44b84c9c997ac87c2edc7b7a7f55
|
[
"MIT"
] | null | null | null |
tests/testapp/migrations/0001_initial.py
|
garyd203/django-lifecycle
|
f60a1394b3fb44b84c9c997ac87c2edc7b7a7f55
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.3 on 2018-03-23 05:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('password', models.CharField(max_length=200)),
('email', models.FileField(null=True, upload_to='')),
('password_updated_at', models.DateTimeField(null=True)),
('joined_at', models.DateTimeField(null=True)),
('has_trial', models.BooleanField(default=False)),
('status', models.CharField(choices=[('active', 'Active'), ('banned', 'Banned'), ('inactive', 'Inactive')], default='active', max_length=30)),
],
options={
'abstract': False,
},
),
]
| 36.242424
| 158
| 0.560201
|
4b99949609a4b6ed5e86b2396aae2366d75f8167
| 555
|
py
|
Python
|
1-100/17/17.py
|
Thomaw/Project-Euler
|
bcad5d8a1fd3ebaa06fa52d92d286607e9372a8d
|
[
"MIT"
] | null | null | null |
1-100/17/17.py
|
Thomaw/Project-Euler
|
bcad5d8a1fd3ebaa06fa52d92d286607e9372a8d
|
[
"MIT"
] | null | null | null |
1-100/17/17.py
|
Thomaw/Project-Euler
|
bcad5d8a1fd3ebaa06fa52d92d286607e9372a8d
|
[
"MIT"
] | null | null | null |
s={0:"",1:"one",2:"two",3:"three",4:"four",5:"five",6:"six"/
,7:"seven",8:"eight",9:"nine",10:"ten",11:"eleven"/
,12:"twelve",13:"thirteen",14:"fourteen",15:"fifteen"/
,16:"sixteen",17:"seventeen",18:"eighteen",19:"nineteen"/
,20:"twenty",30:"thirty",40:"forty",50:"fifty"/
,60:"sixty",70:"seventy",80:"eighty",90:"ninety"}
for i in range(1,1000):
if(not i in s.keys()):
if(i<100):
s[i]=s[i/10*10]+s[i%10]
else:
s[i]=s[i/100]+"hundred"
if(i%100):
s[i]+="and"+s[i%100]
s[1000]="onethousand"
total=0;
for i in s.values():
total+=len(i)
| 27.75
| 60
| 0.583784
|
0954f281c8639b3673eb8cae034b02aa05706ce5
| 1,961
|
py
|
Python
|
omtk/__init__.py
|
renaudll/omtk
|
a7740d53a5587529773594bfd7c37e553787028f
|
[
"MIT"
] | 20
|
2015-09-30T16:07:02.000Z
|
2022-03-12T06:57:59.000Z
|
omtk/__init__.py
|
nilouco/omtk
|
a7740d53a5587529773594bfd7c37e553787028f
|
[
"MIT"
] | 23
|
2015-12-22T15:41:02.000Z
|
2018-04-13T02:52:41.000Z
|
omtk/__init__.py
|
nilouco/omtk
|
a7740d53a5587529773594bfd7c37e553787028f
|
[
"MIT"
] | 13
|
2015-07-10T16:06:26.000Z
|
2021-08-21T20:09:41.000Z
|
import sys
from .core import *
import pymel.core as pymel
__dependencies__ = [
('deps',)
]
current_dir = os.path.dirname(os.path.realpath(__file__))
for dependency in __dependencies__:
path = os.path.realpath(os.path.join(current_dir, *dependency))
sys.path.append(path)
# HACK: Load matrixNodes.dll
pymel.loadPlugin('matrixNodes', quiet=True)
def _reload(kill_ui=True):
"""
Reload all module in their respective order.
"""
import core
reload(core)
core._reload()
import libs
reload(libs)
libs._reload()
from omtk.core import plugin_manager
reload(plugin_manager)
plugin_manager.plugin_manager.reload_all()
import ui_shared
reload(ui_shared)
from ui import pluginmanager_window
reload(pluginmanager_window)
from ui import preferences_window
reload(preferences_window)
from ui import widget_list_influences
reload(widget_list_influences)
from ui import widget_list_modules
reload(widget_list_modules)
from ui import widget_list_meshes
reload(widget_list_meshes)
from ui import widget_logger
reload(widget_logger)
import widget_list_influences
reload(widget_list_influences)
import widget_list_modules
reload(widget_list_modules)
import widget_list_meshes
reload(widget_list_meshes)
import widget_logger
reload(widget_logger)
from ui import main_window
reload(main_window)
import preferences_window
reload(preferences_window)
import pluginmanager_window
reload(pluginmanager_window)
import main_window
reload(main_window)
if kill_ui:
# Try to kill the window to prevent any close event error
try:
pymel.deleteUI('OpenRiggingToolkit')
except:
pass
reload(main_window)
def show():
"""
Show a simple gui. Note that PySide or PyQt4 is needed.
"""
import main_window
main_window.show()
| 20.642105
| 67
| 0.711882
|
41a07abad738f41570fda5fe865b70918bfc53bd
| 1,917
|
py
|
Python
|
Web/4/example_12.py
|
mabdelaal86/python-courses
|
5e2be0df3c00eb084ec39d49402be38fac635097
|
[
"MIT"
] | 1
|
2020-03-10T15:40:22.000Z
|
2020-03-10T15:40:22.000Z
|
Web/4/example_12.py
|
mabdelaal86/python-courses
|
5e2be0df3c00eb084ec39d49402be38fac635097
|
[
"MIT"
] | null | null | null |
Web/4/example_12.py
|
mabdelaal86/python-courses
|
5e2be0df3c00eb084ec39d49402be38fac635097
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from flask import Flask, request, jsonify, abort
app = Flask(__name__)
students = {
105: {"id": 105, "name": "Ibrahim Fatti", "gender": "Male", "birth_date": "1995-12-13", "address": "Giza", "class": 3, "group": "A"},
109: {"id": 109, "name": "Shady Hamdy", "gender": "Male", "birth_date": "1995-10-22", "address": "Giza", "class": 3, "group": "B"},
115: {"id": 115, "name": "Amani Fahmy", "gender": "Female", "birth_date": "1996-05-12", "address": "Cairo", "class": 2, "group": "A"},
122: {"id": 122, "name": "Kareem Ahmad", "gender": "Male", "birth_date": "1997-09-14", "address": "Cairo", "class": 1, "group": "C"}
}
@app.route("/students/", methods=['GET'])
def read_all():
return jsonify(list(students.values()))
@app.route("/students/<int:student_id>/", methods=['GET'])
def read(student_id):
student = students.get(student_id)
if student is None:
abort(404, "Student not found")
return jsonify(student)
@app.route("/students/", methods=['POST'])
def create():
data = request.get_json()
student_id = data['id']
if student_id in students:
abort(400, "Duplicated ID")
students[student_id] = data
return jsonify(data), 201
@app.route("/students/<int:student_id>/", methods=['PUT'])
def update(student_id):
if student_id not in students:
abort(404, "Student not found")
data = request.get_json()
students[student_id] = data
return "", 204
@app.route("/students/<int:student_id>/", methods=['DELETE'])
def delete(student_id):
if student_id not in students:
abort(404, "Student not found")
del students[student_id]
return "", 204
@app.errorhandler(404)
@app.errorhandler(400)
def on_error(error):
return jsonify({"status": error.code, "title": error.description}), error.code
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
| 27.385714
| 138
| 0.624413
|
45ac478bcc60a60ba1fd6d251de2e254b76ada60
| 6,164
|
py
|
Python
|
xblog/tests/test_mt.py
|
rubeon/django-xblog
|
1709a3c2f6c1901231f817f9adeb189b0be6251e
|
[
"BSD-2-Clause"
] | null | null | null |
xblog/tests/test_mt.py
|
rubeon/django-xblog
|
1709a3c2f6c1901231f817f9adeb189b0be6251e
|
[
"BSD-2-Clause"
] | null | null | null |
xblog/tests/test_mt.py
|
rubeon/django-xblog
|
1709a3c2f6c1901231f817f9adeb189b0be6251e
|
[
"BSD-2-Clause"
] | null | null | null |
"""
test case for mt. xmlrpc methods
"""
from django.test import TestCase
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.test.utils import override_settings
from django.test.client import Client
from django.conf import settings
from xblog.models import Post
from xblog.models import Blog
from xblog.models import Author
from xblog.models import Category
from xblog.models import Link
from xblog.models import Tag
from xblog.models import LinkCategory
from xblog.models import FILTERS
from datetime import datetime
try:
from xmlrpc.client import Binary
from xmlrpc.client import Fault
from xmlrpc.client import ServerProxy
except ImportError: # Python 2
from xmlrpclib import Binary
from xmlrpclib import Fault
from xmlrpclib import ServerProxy
from .utils import TestTransport
post_content = {
'title':'This is a test title',
'description': "<p>This is the post content. Hey-ooooo!</p>",
'post_type': 'post',
'dateCreated': datetime.now(),
'date_created_gmt': datetime.now(),
'categories': [],
'mt_keywords': ['tag1','tag2','tag3'],
'mt_excerpt': "<p>This is the...</p>",
'mt_text_more': "Hey-oooooO!",
'mt_allow_comments':True,
'mt_allow_pings': True,
'wp_slug': 'this-is-the-test-title',
'wp_password': 'mypassword',
# 'wp_author_id': ''
# 'wp_author_display_name':
'post_status':'publish',
'wp_post_format': 'Post',
'sticky': False,
'custom_fields':[],
'enclosure':{},
}
@override_settings(
ROOT_URLCONF='xblog.tests.conf.urls'
)
class MtTestCase(TestCase):
"""
Test Cases for the wp.* XMLRPC API calls
"""
def setUp(self):
"""
Bring up the test environment
"""
# create our test user
self.test_user1 = User.objects.create(
username="test_user1",
first_name="Test",
last_name="User2",
email="testuser@example.com",
password="MyTestPass1",
is_staff=False,
is_superuser=False
)
#
self.test_user2 = User.objects.create(
username="test_user2",
first_name="Test",
last_name="User2",
email="testuser2@example.com",
password="MyTestPass1",
is_staff=False,
is_superuser=False
)
self.rogue_user = User.objects.create(
username="rogue_user",
first_name="Rogue",
last_name="User",
email="testuser2@example.com",
password="MyTestPass1",
is_staff=False,
is_superuser=False
)
self.test_admin = User.objects.create(
username="admin",
first_name="Admin",
last_name="User",
email="admin@example.com",
password="MyAdminPass1",
is_staff=True,
is_superuser=True
)
self.test_blog = Blog.objects.create(
title="Test User 1's Space",
description="A blog for Test User 1. Slippery when wet!",
owner = User.objects.get(username="test_user1"),
site = Site.objects.get_current()
)
self.test_category1 = Category.objects.create(
title="Test Category 1",
description="Category mean namely for testing",
blog = self.test_blog
)
self.post = Post.objects.create(
title = "Test User 1 Post",
body = "This is some stuff.\n\nSome stuff, you know.",
blog = self.test_blog,
author = self.test_user1.author,
status = 'publish'
)
self.post.save()
self.draft = Post.objects.create(
title = "Test User 1 Post",
body = "This is some stuff.\n\nSome stuff, you know.",
blog = self.test_blog,
author = self.test_user1.author,
status = 'draft'
)
# enable remote access for test_user1
self.test_user1.author.remote_access_enabled = True
self.test_user1.author.save()
# disable remote access for test_user2
self.test_user2.author.remote_access_enabled = False
self.test_user2.author.save()
self.rogue_user.author.remote_access_enabled = True
self.rogue_user.author.save()
self.test_admin.author.remote_access_enabled = True
self.test_admin.author.save()
self.s = ServerProxy('http://localhost:8000/xmlrpc/', transport=TestTransport(), verbose=0)
def test_mt_set_post_categories(self):
"""
make sure that categories can be set
"""
postid = self.post.id
username = self.test_user1.username
password = self.test_user1.author.remote_access_key
cat = self.test_category1
categories = [{
'categoryId': cat.id,
'isPrimary': True
},]
res = self.s.mt.setPostCategories(postid, username, password, categories)
# smoke check
self.assertTrue(res)
p = self.post
for category in categories:
c = Category.objects.get(pk=category['categoryId'])
self.assertIn(c, p.categories.all())
def test_mt_get_post_categories(self):
postid = self.post.id
username = self.test_user1.username
password = self.test_user1.author.remote_access_key
categories = self.s.mt.getPostCategories(postid, username, password)
for category in categories:
c = Category.objects.get(pk=categories['categoryId'])
self.assertIn(c, p.categories.all())
def test_mt_publish_post(self):
postid = self.draft.id
username = self.test_user1.username
password = self.test_user1.author.remote_access_key
self.assertTrue(self.draft.status=="draft")
res = self.s.mt.publishPost(postid, username, password)
self.assertTrue(res)
post = Post.objects.get(pk=postid)
self.assertTrue(post.status=='publish')
| 30.364532
| 99
| 0.614049
|
40055188cda07e9016356349c69a69d50848fabb
| 11,502
|
py
|
Python
|
trainer.py
|
zedoggo/ThesisBinus
|
1132330cd221677a4e7abe27ff0637642ee02872
|
[
"MIT"
] | 2
|
2020-11-08T15:39:10.000Z
|
2021-02-25T08:07:55.000Z
|
trainer.py
|
zedoggo/ThesisBinus
|
1132330cd221677a4e7abe27ff0637642ee02872
|
[
"MIT"
] | null | null | null |
trainer.py
|
zedoggo/ThesisBinus
|
1132330cd221677a4e7abe27ff0637642ee02872
|
[
"MIT"
] | 1
|
2020-12-13T13:40:34.000Z
|
2020-12-13T13:40:34.000Z
|
import numpy as np
import torch
from torch import optim
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
from models.CC import CrowdCounter
from config import cfg
from misc.utils import *
import pdb
import csv
class Trainer():
def __init__(self, dataloader, cfg_data, pwd):
self.cfg_data = cfg_data
self.data_mode = cfg.DATASET
self.exp_name = cfg.EXP_NAME
self.exp_path = cfg.EXP_PATH
self.pwd = pwd
self.net_name = cfg.NET
self.net = CrowdCounter(cfg.GPU_ID,self.net_name).cuda()
self.optimizer = optim.Adam(self.net.CCN.parameters(), lr=cfg.LR, weight_decay=1e-4)
# self.optimizer = optim.SGD(self.net.parameters(), cfg.LR, momentum=0.95,weight_decay=5e-4)
self.scheduler = StepLR(self.optimizer, step_size=cfg.NUM_EPOCH_LR_DECAY, gamma=cfg.LR_DECAY)
self.train_record = {'best_mae': 1e20, 'best_mse':1e20, 'best_model_name': ''}
self.timer = {'iter time' : Timer(),'train time' : Timer(),'val time' : Timer()}
self.epoch = 0
self.i_tb = 0
if cfg.PRE_GCC:
self.net.load_state_dict(torch.load(cfg.PRE_GCC_MODEL))
self.train_loader, self.val_loader, self.restore_transform = dataloader()
if cfg.RESUME:
latest_state = torch.load(cfg.RESUME_PATH)
self.net.load_state_dict(latest_state['net'])
self.optimizer.load_state_dict(latest_state['optimizer'])
self.scheduler.load_state_dict(latest_state['scheduler'])
self.epoch = latest_state['epoch'] + 1
self.i_tb = latest_state['i_tb']
self.train_record = latest_state['train_record']
self.exp_path = latest_state['exp_path']
self.exp_name = latest_state['exp_name']
self.writer, self.log_txt = logger(self.exp_path, self.exp_name, self.pwd, 'exp', resume=cfg.RESUME)
def forward(self):
# self.validate_V3()
for epoch in range(self.epoch,cfg.MAX_EPOCH):
self.epoch = epoch
if epoch > cfg.LR_DECAY_START:
self.scheduler.step()
# training
self.timer['train time'].tic()
self.train()
self.timer['train time'].toc(average=False)
print( 'train time: {:.2f}s'.format(self.timer['train time'].diff) )
print( '='*20 )
# validation
if epoch%cfg.VAL_FREQ==0 or epoch>cfg.VAL_DENSE_START:
self.timer['val time'].tic()
if self.data_mode in ['SHHA', 'SHHB', 'QNRF', 'UCF50']:
self.validate_V1()
elif self.data_mode is 'WE':
self.validate_V2()
elif self.data_mode is 'GCC':
self.validate_V3()
self.timer['val time'].toc(average=False)
print( 'val time: {:.2f}s'.format(self.timer['val time'].diff) )
def train(self): # training for all datasets
self.net.train()
for i, data in enumerate(self.train_loader, 0):
self.timer['iter time'].tic()
img, gt_map = data
img = Variable(img).cuda()
gt_map = Variable(gt_map).cuda()
self.optimizer.zero_grad()
pred_map = self.net(img, gt_map)
loss = self.net.loss
loss.backward()
self.optimizer.step()
if (i + 1) % cfg.PRINT_FREQ == 0:
self.i_tb += 1
self.writer.add_scalar('train_loss', loss.item(), self.i_tb)
self.timer['iter time'].toc(average=False)
print( '[ep %d][it %d][loss %.4f][lr %.4f][%.2fs]' % \
(self.epoch + 1, i + 1, loss.item(), self.optimizer.param_groups[0]['lr']*10000, self.timer['iter time'].diff) )
print( ' [cnt: gt: %.1f pred: %.2f]' % (gt_map[0].sum().data/self.cfg_data.LOG_PARA, pred_map[0].sum().data/self.cfg_data.LOG_PARA) )
# nge write ke .csv file inline (ada 2, 1 training, 1 validasi dibawah )
# nge write epoch, iter, loss, waktu(time) juga
training_iter_time = self.timer['iter time'].diff
csvRow = ['number_of_epoch', 'iteration', 'training_loss', 'iteration_time']
csvFile = "training_result.csv"
with open(csvFile, 'a') as fp:
wr = csv.writer(fp, dialect='excel')
wr.writerow(csvRow)
wr.writerow([self.epoch + 1, i + 1, loss.item(), training_iter_time])
def validate_V1(self):# validate_V1 for SHHA, SHHB, UCF-QNRF, UCF50
self.net.eval()
losses = AverageMeter()
maes = AverageMeter()
mses = AverageMeter()
for vi, data in enumerate(self.val_loader, 0):
img, gt_map = data
with torch.no_grad():
img = Variable(img).cuda()
gt_map = Variable(gt_map).cuda()
pred_map = self.net.forward(img,gt_map)
pred_map = pred_map.data.cpu().numpy()
gt_map = gt_map.data.cpu().numpy()
for i_img in range(pred_map.shape[0]):
pred_cnt = np.sum(pred_map[i_img])/self.cfg_data.LOG_PARA
gt_count = np.sum(gt_map[i_img])/self.cfg_data.LOG_PARA
losses.update(self.net.loss.item())
maes.update(abs(gt_count-pred_cnt))
mses.update((gt_count-pred_cnt)*(gt_count-pred_cnt))
if vi==0:
vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)
mae = maes.avg
mse = np.sqrt(mses.avg)
loss = losses.avg
self.writer.add_scalar('val_loss', loss, self.epoch + 1)
self.writer.add_scalar('mae', mae, self.epoch + 1)
self.writer.add_scalar('mse', mse, self.epoch + 1)
self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
[mae, mse, loss],self.train_record,self.log_txt)
print_summary(self.exp_name,[mae, mse, loss],self.train_record)
# loss, mae, mse
csvRow = ['mae_value', 'mse_value', 'validation_loss']
csvFile = "validation_result.csv"
with open(csvFile, 'a') as fp:
wr = csv.writer(fp, dialect='excel')
wr.writerow(csvRow)
wr.writerow([mae, mse, loss])
def validate_V2(self):# validate_V2 for WE
self.net.eval()
losses = AverageCategoryMeter(5)
maes = AverageCategoryMeter(5)
roi_mask = []
from datasets.WE.setting import cfg_data
from scipy import io as sio
for val_folder in cfg_data.VAL_FOLDER:
roi_mask.append(sio.loadmat(os.path.join(cfg_data.DATA_PATH,'test',val_folder + '_roi.mat'))['BW'])
for i_sub,i_loader in enumerate(self.val_loader,0):
mask = roi_mask[i_sub]
for vi, data in enumerate(i_loader, 0):
img, gt_map = data
with torch.no_grad():
img = Variable(img).cuda()
gt_map = Variable(gt_map).cuda()
pred_map = self.net.forward(img,gt_map)
pred_map = pred_map.data.cpu().numpy()
gt_map = gt_map.data.cpu().numpy()
for i_img in range(pred_map.shape[0]):
pred_cnt = np.sum(pred_map[i_img])/self.cfg_data.LOG_PARA
gt_count = np.sum(gt_map[i_img])/self.cfg_data.LOG_PARA
losses.update(self.net.loss.item(),i_sub)
maes.update(abs(gt_count-pred_cnt),i_sub)
if vi==0:
vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)
mae = np.average(maes.avg)
loss = np.average(losses.avg)
self.writer.add_scalar('val_loss', loss, self.epoch + 1)
self.writer.add_scalar('mae', mae, self.epoch + 1)
self.writer.add_scalar('mae_s1', maes.avg[0], self.epoch + 1)
self.writer.add_scalar('mae_s2', maes.avg[1], self.epoch + 1)
self.writer.add_scalar('mae_s3', maes.avg[2], self.epoch + 1)
self.writer.add_scalar('mae_s4', maes.avg[3], self.epoch + 1)
self.writer.add_scalar('mae_s5', maes.avg[4], self.epoch + 1)
self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
[mae, 0, loss],self.train_record,self.log_txt)
print_WE_summary(self.log_txt,self.epoch,[mae, 0, loss],self.train_record,maes)
def validate_V3(self):# validate_V3 for GCC
self.net.eval()
losses = AverageMeter()
maes = AverageMeter()
mses = AverageMeter()
c_maes = {'level':AverageCategoryMeter(9), 'time':AverageCategoryMeter(8),'weather':AverageCategoryMeter(7)}
c_mses = {'level':AverageCategoryMeter(9), 'time':AverageCategoryMeter(8),'weather':AverageCategoryMeter(7)}
for vi, data in enumerate(self.val_loader, 0):
img, gt_map, attributes_pt = data
with torch.no_grad():
img = Variable(img).cuda()
gt_map = Variable(gt_map).cuda()
pred_map = self.net.forward(img,gt_map)
pred_map = pred_map.data.cpu().numpy()
gt_map = gt_map.data.cpu().numpy()
for i_img in range(pred_map.shape[0]):
pred_cnt = np.sum(pred_map[i_img])/self.cfg_data.LOG_PARA
gt_count = np.sum(gt_map[i_img])/self.cfg_data.LOG_PARA
s_mae = abs(gt_count-pred_cnt)
s_mse = (gt_count-pred_cnt)*(gt_count-pred_cnt)
losses.update(self.net.loss.item())
maes.update(s_mae)
mses.update(s_mse)
attributes_pt = attributes_pt.squeeze()
c_maes['level'].update(s_mae,attributes_pt[i_img][0])
c_mses['level'].update(s_mse,attributes_pt[i_img][0])
c_maes['time'].update(s_mae,attributes_pt[i_img][1]/3)
c_mses['time'].update(s_mse,attributes_pt[i_img][1]/3)
c_maes['weather'].update(s_mae,attributes_pt[i_img][2])
c_mses['weather'].update(s_mse,attributes_pt[i_img][2])
if vi==0:
vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)
loss = losses.avg
mae = maes.avg
mse = np.sqrt(mses.avg)
self.writer.add_scalar('val_loss', loss, self.epoch + 1)
self.writer.add_scalar('mae', mae, self.epoch + 1)
self.writer.add_scalar('mse', mse, self.epoch + 1)
self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
[mae, mse, loss],self.train_record,self.log_txt)
print_GCC_summary(self.log_txt,self.epoch,[mae, mse, loss],self.train_record,c_maes,c_mses)
| 39.662069
| 167
| 0.565467
|
9ea8d9ccbfd0def3d7a84712fe1828ccb9d69b0d
| 235
|
py
|
Python
|
tests/packages/tree/tree-package2/setup.py
|
sbg/dante
|
104543c3ccb5e762d3e9cd6e8fa04c5fa91e2227
|
[
"Apache-2.0"
] | 9
|
2017-11-03T15:53:01.000Z
|
2019-10-01T14:09:56.000Z
|
tests/packages/tree/tree-package2/setup.py
|
sbg/dante
|
104543c3ccb5e762d3e9cd6e8fa04c5fa91e2227
|
[
"Apache-2.0"
] | 4
|
2019-10-01T12:53:58.000Z
|
2021-04-26T15:39:16.000Z
|
tests/packages/tree/tree-package2/setup.py
|
sbg/dante
|
104543c3ccb5e762d3e9cd6e8fa04c5fa91e2227
|
[
"Apache-2.0"
] | 5
|
2017-11-03T15:50:40.000Z
|
2021-09-13T08:50:45.000Z
|
from setuptools import setup, find_packages
install_requires = [
'tree-package3',
'tree-package7'
]
setup(
name='tree-package2',
version='1.0.0',
install_requires=install_requires,
packages=find_packages(),
)
| 16.785714
| 43
| 0.689362
|
501b2784e981c8e2b766beeac3ba2218b9884d98
| 2,916
|
py
|
Python
|
tests/test_reduce_max.py
|
yanndupis/tf-encrypted
|
cfaea3ba87520f73979ed4e4f397eba3beb0a535
|
[
"Apache-2.0"
] | null | null | null |
tests/test_reduce_max.py
|
yanndupis/tf-encrypted
|
cfaea3ba87520f73979ed4e4f397eba3beb0a535
|
[
"Apache-2.0"
] | null | null | null |
tests/test_reduce_max.py
|
yanndupis/tf-encrypted
|
cfaea3ba87520f73979ed4e4f397eba3beb0a535
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import numpy as np
import tensorflow as tf
import tf_encrypted as tfe
import pytest
@pytest.mark.slow
class TestReduceMax(unittest.TestCase):
def setUp(self):
tf.reset_default_graph()
def tearDown(self):
tf.reset_default_graph()
def test_reduce_max_1d(self):
t = np.array([1, 2, 3, 4]).astype(float)
with tf.Session() as sess:
out_tf = tf.reduce_max(t)
expected = sess.run(out_tf)
with tfe.protocol.SecureNN() as prot:
b = prot.define_private_variable(tf.constant(t))
out_tfe = prot.reduce_max(b)
with tfe.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(2):
actual = sess.run(out_tfe.reveal(), tag='test_1d')
np.testing.assert_array_equal(actual, expected)
def test_reduce_max_2d_axis0(self):
t = np.array([1, 2, 3, 4, 5, 6, 7, 8]).reshape(2, 4).astype(float)
with tf.Session() as sess:
out_tf = tf.reduce_max(t, axis=0)
expected = sess.run(out_tf)
with tfe.protocol.SecureNN() as prot:
b = prot.define_private_variable(tf.constant(t))
out_tfe = prot.reduce_max(b, axis=0)
with tfe.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(2):
actual = sess.run(out_tfe.reveal(), tag='test_2d_axis0')
np.testing.assert_array_equal(actual, expected)
def test_reduce_max_2d_axis1(self):
t = np.array([1, 2, 3, 4, 5, 6, 7, 8]).reshape(2, 4).astype(float)
with tf.Session() as sess:
out_tf = tf.reduce_max(t, axis=1)
expected = sess.run(out_tf)
with tfe.protocol.SecureNN() as prot:
b = prot.define_private_variable(tf.constant(t))
out_tfe = prot.reduce_max(b, axis=1)
with tfe.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(2):
actual = sess.run(out_tfe.reveal(), tag='test_2d_axis1')
np.testing.assert_array_equal(actual, expected)
def test_reduce_max_3d_axis0(self):
t = np.array([1, 2, 3, 4, 5, 6, 7, 8]).reshape(2, 2, 2)
with tf.Session() as sess:
out = tf.reduce_max(t, axis=0)
expected = sess.run(out)
with tfe.protocol.SecureNN() as prot:
b = prot.define_private_variable(tf.constant(t))
out_tfe = prot.reduce_max(b, axis=0)
with tfe.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(2):
actual = sess.run(out_tfe.reveal(), tag='test_3d_axis0')
np.testing.assert_array_equal(actual, expected)
if __name__ == '__main__':
unittest.main()
| 30.061856
| 76
| 0.580247
|
a70be78f82e9b079a603298a6da542545d6fc4ce
| 2,277
|
py
|
Python
|
doc/scripts/new_kernel.py
|
chemlove/radical.ensemblemd
|
0ec4b127760d2fee88d4eae1768fecec4bdd6b21
|
[
"MIT"
] | null | null | null |
doc/scripts/new_kernel.py
|
chemlove/radical.ensemblemd
|
0ec4b127760d2fee88d4eae1768fecec4bdd6b21
|
[
"MIT"
] | null | null | null |
doc/scripts/new_kernel.py
|
chemlove/radical.ensemblemd
|
0ec4b127760d2fee88d4eae1768fecec4bdd6b21
|
[
"MIT"
] | null | null | null |
from radical.ensemblemd.kernel_plugins.kernel_base import KernelBase
# ------------------------------------------------------------------------------
#
_KERNEL_INFO = {
"name": "sleep", # Mandatory
"description": "sleeping kernel", # Optional
"arguments": { # Mandatory
"--interval=": {
"mandatory": True, # Mandatory argument? True or False
"description": "Number of seconds to do nothing."
},
},
"machine_configs": # Use a dictionary with keys as
{ # resource names and values specific
"local.localhost": # to the resource
{
"environment" : None, # dict or None, can be used to set env variables
"pre_exec" : None, # list or None, can be used to load modules
"executable" : ["/bin/sleep"], # specify the executable to be used
"uses_mpi" : False # mpi-enabled? True or False
},
}
}
# ------------------------------------------------------------------------------
#
class MyUserDefinedKernel(KernelBase):
def __init__(self):
super(MyUserDefinedKernel, self).__init__(_KERNEL_INFO)
"""Le constructor."""
# --------------------------------------------------------------------------
#
@staticmethod
def get_name():
return _KERNEL_INFO["name"]
def _bind_to_resource(self, resource_key):
"""This function binds the Kernel to a specific resource defined in
"resource_key".
"""
arguments = ['{0}'.format(self.get_arg("--interval="))]
self._executable = _KERNEL_INFO["machine_configs"][resource_key]["executable"]
self._arguments = arguments
self._environment = _KERNEL_INFO["machine_configs"][resource_key]["environment"]
self._uses_mpi = _KERNEL_INFO["machine_configs"][resource_key]["uses_mpi"]
self._pre_exec = _KERNEL_INFO["machine_configs"][resource_key]["pre_exec"]
# ------------------------------------------------------------------------------
| 41.4
| 94
| 0.467721
|
36fd7c4e13cfcdedbc820127300c3083245cb73f
| 1,079
|
py
|
Python
|
checkov/kubernetes/checks/resource/k8s/MemoryRequests.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | null | null | null |
checkov/kubernetes/checks/resource/k8s/MemoryRequests.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | null | null | null |
checkov/kubernetes/checks/resource/k8s/MemoryRequests.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | null | null | null |
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.kubernetes.checks.resource.base_spec_check import BaseK8Check
class MemoryRequests(BaseK8Check):
def __init__(self):
name = "Memory requests should be set"
id = "CKV_K8S_12"
# Location: container .resources.requests.memory
supported_kind = ['containers', 'initContainers']
categories = [CheckCategories.KUBERNETES]
super().__init__(name=name, id=id, categories=categories, supported_entities=supported_kind)
def get_resource_id(self, conf):
return f'{conf["parent"]} - {conf["name"]}' if conf.get('name') else conf["parent"]
def scan_spec_conf(self, conf):
if conf.get("resources"):
if "requests" in conf["resources"]:
if "memory" not in conf["resources"]["requests"]:
return CheckResult.FAILED
else:
return CheckResult.FAILED
else:
return CheckResult.FAILED
return CheckResult.PASSED
check = MemoryRequests()
| 34.806452
| 100
| 0.650602
|
2b85296d381b7291d7f65939352149c99b60e83c
| 2,866
|
py
|
Python
|
demo/demo.py
|
mcstro/natural-neighbor-interpolation
|
76ba7bb50c84aef35e993902c46824e5991df45d
|
[
"MIT"
] | 64
|
2017-09-17T00:37:20.000Z
|
2022-02-03T20:16:54.000Z
|
demo/demo.py
|
mcstro/natural-neighbor-interpolation
|
76ba7bb50c84aef35e993902c46824e5991df45d
|
[
"MIT"
] | 5
|
2018-07-27T16:31:35.000Z
|
2020-06-15T02:53:48.000Z
|
demo/demo.py
|
mcstro/natural-neighbor-interpolation
|
76ba7bb50c84aef35e993902c46824e5991df45d
|
[
"MIT"
] | 13
|
2018-06-06T18:51:50.000Z
|
2021-12-26T02:47:05.000Z
|
'''
Comparison of natural neighbor and linear barycentric interpolation.
'''
import numpy as np
import scipy.interpolate
import matplotlib as mpl
mpl.use('Agg') # so it can run on Travis without a display
import matplotlib.pyplot as plt
import naturalneighbor
def error_str(errors):
numerical_error = errors[~np.isnan(errors)]
mean_err = np.mean(numerical_error)
std_err = np.std(numerical_error)
max_err = np.max(numerical_error)
return "(Mean={:.2f}, Std={:.2f} Max={:.2f})".format(mean_err, std_err, max_err)
def compare_interp_for_func(func, func_as_string, image_name):
coord_max = 60
xmax = coord_max
ymax = coord_max
zmax = coord_max
final_shape = (xmax, ymax, zmax)
num_known_points = 100
known_points = np.round(np.random.rand(num_known_points, 3) * np.min([xmax, ymax, zmax]))
grid_ranges = [
[0, xmax, 1],
[0, ymax, 1],
[0, zmax, 1],
]
grid = np.mgrid[0:xmax:1, 0:ymax:1, 0:zmax:1]
known_values = np.array([func(*point) for point in known_points], dtype=np.float64)
true_values = np.reshape([func(x, y, z) for x, y, z in zip(*grid)], final_shape)
linear_interp = scipy.interpolate.griddata(known_points, known_values, tuple(grid), method='linear')
nn_interp = naturalneighbor.griddata(known_points, known_values, grid_ranges)
nn_interp[np.isnan(linear_interp)] = float('nan')
nn_interp_slice = nn_interp[:, :, 20]
linear_interp_slice = linear_interp[:, :, 20]
true_values_slice = true_values[:, :, 20]
nn_interp_err = np.abs(nn_interp_slice - true_values_slice)
linear_interp_err = np.abs(linear_interp_slice - true_values_slice)
fig = plt.figure(figsize=(16, 10))
ax1 = fig.add_subplot(2, 3, 1)
ax1.imshow(true_values_slice)
ax1.set_title("True Values\n{}".format(func_as_string))
ax2 = fig.add_subplot(2, 3, 2)
ax2.imshow(nn_interp_err)
nn_error_str = error_str(nn_interp_err)
ax2.set_title("Natural Neighbor Abs Error\n{}".format(nn_error_str))
ax3 = fig.add_subplot(2, 3, 3)
ax3.imshow(linear_interp_err)
linear_error_str = error_str(linear_interp_err)
ax3.set_title("Linear Barycentric Abs Error\n{}".format(linear_error_str))
ax5 = fig.add_subplot(2, 3, 5)
ax5.imshow(nn_interp_slice)
ax5.set_title("Natural Neighbor Values")
ax6 = fig.add_subplot(2, 3, 6)
ax6.imshow(linear_interp_slice)
ax6.set_title("Linear Barycentric Values")
plt.savefig(image_name, dpi=100)
if __name__ == '__main__':
np.random.seed(100)
compare_interp_for_func(
(lambda x, y, z: np.sin(y / 10) + np.sin(x / 10)),
'sin(y/10) + sin(x/10)',
'sin_sin_comparison.png',
)
compare_interp_for_func(
(lambda x, y, z: x + np.sin(x / 10) / 10),
'x + sin(x/10)/10',
'linear_comparison.png',
)
| 30.168421
| 104
| 0.671668
|
d765dc90842041b034636bca6e35759d3c8fac58
| 4,267
|
py
|
Python
|
kubernetes/client/models/extensions_v1beta1_deployment_strategy.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | 2
|
2020-06-21T08:03:18.000Z
|
2020-06-21T09:53:29.000Z
|
kubernetes/client/models/extensions_v1beta1_deployment_strategy.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/extensions_v1beta1_deployment_strategy.py
|
L3T/python
|
b6e4ae81a2afb49f668a142eb7d1c6e2571ef478
|
[
"Apache-2.0"
] | 1
|
2020-12-10T07:28:08.000Z
|
2020-12-10T07:28:08.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ExtensionsV1beta1DeploymentStrategy(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'rolling_update': 'ExtensionsV1beta1RollingUpdateDeployment',
'type': 'str'
}
attribute_map = {
'rolling_update': 'rollingUpdate',
'type': 'type'
}
def __init__(self, rolling_update=None, type=None): # noqa: E501
"""ExtensionsV1beta1DeploymentStrategy - a model defined in OpenAPI""" # noqa: E501
self._rolling_update = None
self._type = None
self.discriminator = None
if rolling_update is not None:
self.rolling_update = rolling_update
if type is not None:
self.type = type
@property
def rolling_update(self):
"""Gets the rolling_update of this ExtensionsV1beta1DeploymentStrategy. # noqa: E501
:return: The rolling_update of this ExtensionsV1beta1DeploymentStrategy. # noqa: E501
:rtype: ExtensionsV1beta1RollingUpdateDeployment
"""
return self._rolling_update
@rolling_update.setter
def rolling_update(self, rolling_update):
"""Sets the rolling_update of this ExtensionsV1beta1DeploymentStrategy.
:param rolling_update: The rolling_update of this ExtensionsV1beta1DeploymentStrategy. # noqa: E501
:type: ExtensionsV1beta1RollingUpdateDeployment
"""
self._rolling_update = rolling_update
@property
def type(self):
"""Gets the type of this ExtensionsV1beta1DeploymentStrategy. # noqa: E501
Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate. # noqa: E501
:return: The type of this ExtensionsV1beta1DeploymentStrategy. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ExtensionsV1beta1DeploymentStrategy.
Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate. # noqa: E501
:param type: The type of this ExtensionsV1beta1DeploymentStrategy. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtensionsV1beta1DeploymentStrategy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.262411
| 124
| 0.609562
|
4c31f4bb76ebaf032fe58c1ed8efbd00da52a9c2
| 2,438
|
py
|
Python
|
day22/sol.py
|
samstronghammer/adventofcode2020
|
a03098ce886bbf011e01f5897461e7caac468202
|
[
"MIT"
] | null | null | null |
day22/sol.py
|
samstronghammer/adventofcode2020
|
a03098ce886bbf011e01f5897461e7caac468202
|
[
"MIT"
] | null | null | null |
day22/sol.py
|
samstronghammer/adventofcode2020
|
a03098ce886bbf011e01f5897461e7caac468202
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
import os
sys.path.append(f"{os.path.dirname(__file__)}/..")
import util
import math
# Implementation of the classic "war" card game with a recursive
# twist at the end. Very fun :) For part 1 and 2 I used lists
# where the end of the list is the top of the deck. It seemed
# easier.
def deck_score(deck):
ans = 0
for i, val in enumerate(deck):
ans += ((i + 1) * val)
return ans
def combat(p1_original_deck, p2_original_deck):
p1_deck = p1_original_deck.copy()
p2_deck = p2_original_deck.copy()
while len(p1_deck) > 0 and len(p2_deck) > 0:
top_p1 = p1_deck.pop()
top_p2 = p2_deck.pop()
if top_p1 > top_p2:
p1_deck = [top_p2, top_p1] + p1_deck
else:
p2_deck = [top_p1, top_p2] + p2_deck
return deck_score(p1_deck if len(p1_deck) > 0 else p2_deck)
# Returns a pair of values. A boolean representing the winner (T => p1 and F => p2)
# and the deck score of the winner.
def recursive_combat(p1_original_deck, p2_original_deck):
p1_deck = p1_original_deck.copy()
p2_deck = p2_original_deck.copy()
prev_states = set()
while True:
# End conditions
if len(p1_deck) == 0:
return False, deck_score(p2_deck)
if len(p2_deck) == 0:
return True, deck_score(p1_deck)
game_state = (tuple(p1_deck), tuple(p2_deck))
if game_state in prev_states:
return True, deck_score(p1_deck)
# If didn't terminate, add to set of game states
prev_states.add(game_state)
top_p1 = p1_deck.pop()
top_p2 = p2_deck.pop()
winner = False
if top_p1 <= len(p1_deck) and top_p2 <= len(p2_deck):
winner = recursive_combat(p1_deck[-top_p1:], p2_deck[-top_p2:])[0]
else:
winner = top_p1 > top_p2
if winner:
p1_deck = [top_p2, top_p1] + p1_deck
else:
p2_deck = [top_p1, top_p2] + p2_deck
fn = f"{os.path.dirname(__file__)}/in.txt"
l = util.filetolist(fn)
p1 = []
p2 = []
on_p1 = True
for line in l:
if line:
try:
val = int(line)
if on_p1:
p1.insert(0, val)
else:
p2.insert(0, val)
except:
pass
else:
on_p1 = not on_p1
print("Part 1 Solution:")
print(combat(p1, p2))
print("Part 2 Solution:")
print(recursive_combat(p1, p2)[1])
| 28.022989
| 83
| 0.597621
|
a717eed88b103ba28b6c3cb1a7d7262f9fa49bc8
| 246
|
py
|
Python
|
deposito/api/viewsets.py
|
TooDoo-BlastOff-Desafios/Python-Squad1
|
e5e9523baf20b770aeb6682abb09522a8402ae27
|
[
"MIT"
] | null | null | null |
deposito/api/viewsets.py
|
TooDoo-BlastOff-Desafios/Python-Squad1
|
e5e9523baf20b770aeb6682abb09522a8402ae27
|
[
"MIT"
] | null | null | null |
deposito/api/viewsets.py
|
TooDoo-BlastOff-Desafios/Python-Squad1
|
e5e9523baf20b770aeb6682abb09522a8402ae27
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets
from deposito.api import serializers
from deposito import models
class DepositoViewset(viewsets.ModelViewSet):
serializer_class = serializers.DepositoSerializer
queryset = models.Deposito.objects.all()
| 35.142857
| 53
| 0.833333
|
533694e6de300a9c4fe71fca6cd42c1ea2334e52
| 831
|
py
|
Python
|
Growth_calc.py
|
Samosborne94/FinanceModel
|
a8faff18538a080ead42602c2b96c61bd8a13021
|
[
"Apache-2.0"
] | null | null | null |
Growth_calc.py
|
Samosborne94/FinanceModel
|
a8faff18538a080ead42602c2b96c61bd8a13021
|
[
"Apache-2.0"
] | null | null | null |
Growth_calc.py
|
Samosborne94/FinanceModel
|
a8faff18538a080ead42602c2b96c61bd8a13021
|
[
"Apache-2.0"
] | 1
|
2021-09-26T03:57:42.000Z
|
2021-09-26T03:57:42.000Z
|
#Portfolio Growth Calculator (incorporating asset allocation decisions)
#User Inputs:
num1 = input('Waiting Time (Years): ')
num2 = input('Equity Allocation (Dollars): ')
num3 = input('Bond Allocation (Dollars): ')
#Transform string inputs into floats and integers for calculation:
a = int(float(num1))
b = int(float(num2))
c = int(float(num3))
#Calculate the customer's asset allocation:
d = b/(b+c)
e = c/(b+c)
#Alternatively, customer can specify these expected returns. We fix them for simplicity:
stock = 0.08
bond = 0.03
#Compound Interest
print ("Projected Net Worth (Dollars):", int(d*b*(1+stock)**a + (e*c*(1+bond)**a)))
#Doubling Time Calculator (CAGR)
projected_worth = int(d*b*(1+stock)**a + (e*c*(1+bond)**a))
multiple = projected_worth / (b+c)
print("Doubling Time (Years):", int(72/(100*(multiple**(1/a)-1))))
| 29.678571
| 88
| 0.695548
|
ce545c49f8f43be4b066813a2f1e86a586b88553
| 40,315
|
py
|
Python
|
pygments/lexers/asm.py
|
eerimoq/pygments
|
3cd60987c27d2228ac46bfa2648e280aaaf61fc1
|
[
"BSD-2-Clause"
] | 1
|
2021-12-27T22:40:31.000Z
|
2021-12-27T22:40:31.000Z
|
pygments/lexers/asm.py
|
eerimoq/pygments
|
3cd60987c27d2228ac46bfa2648e280aaaf61fc1
|
[
"BSD-2-Clause"
] | null | null | null |
pygments/lexers/asm.py
|
eerimoq/pygments
|
3cd60987c27d2228ac46bfa2648e280aaaf61fc1
|
[
"BSD-2-Clause"
] | null | null | null |
"""
pygments.lexers.asm
~~~~~~~~~~~~~~~~~~~
Lexers for assembly languages.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, words, \
DelegatingLexer, default
from pygments.lexers.c_cpp import CppLexer, CLexer
from pygments.lexers.d import DLexer
from pygments.token import Text, Name, Number, String, Comment, Punctuation, \
Other, Keyword, Operator, Literal
__all__ = ['GasLexer', 'ObjdumpLexer', 'DObjdumpLexer', 'CppObjdumpLexer',
'CObjdumpLexer', 'HsailLexer', 'LlvmLexer', 'LlvmMirBodyLexer',
'LlvmMirLexer', 'NasmLexer', 'NasmObjdumpLexer', 'TasmLexer',
'Ca65Lexer', 'Dasm16Lexer']
class GasLexer(RegexLexer):
"""
For Gas (AT&T) assembly code.
"""
name = 'GAS'
aliases = ['gas', 'asm']
filenames = ['*.s', '*.S']
mimetypes = ['text/x-gas']
#: optional Comment or Whitespace
string = r'"(\\"|[^"])*"'
char = r'[\w$.@-]'
identifier = r'(?:[a-zA-Z$_]' + char + r'*|\.' + char + '+)'
number = r'(?:0[xX][a-fA-F0-9]+|#?-?\d+)'
register = '%' + identifier
tokens = {
'root': [
include('whitespace'),
(identifier + ':', Name.Label),
(r'\.' + identifier, Name.Attribute, 'directive-args'),
(r'lock|rep(n?z)?|data\d+', Name.Attribute),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text)
],
'directive-args': [
(identifier, Name.Constant),
(string, String),
('@' + identifier, Name.Attribute),
(number, Number.Integer),
(register, Name.Variable),
(r'[\r\n]+', Text, '#pop'),
(r'([;#]|//).*?\n', Comment.Single, '#pop'),
(r'/[*].*?[*]/', Comment.Multiline),
(r'/[*].*?\n[\w\W]*?[*]/', Comment.Multiline, '#pop'),
include('punctuation'),
include('whitespace')
],
'instruction-args': [
# For objdump-disassembled code, shouldn't occur in
# actual assembler input
('([a-z0-9]+)( )(<)('+identifier+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation)),
('([a-z0-9]+)( )(<)('+identifier+')([-+])('+number+')(>)',
bygroups(Number.Hex, Text, Punctuation, Name.Constant,
Punctuation, Number.Integer, Punctuation)),
# Address constants
(identifier, Name.Constant),
(number, Number.Integer),
# Registers
(register, Name.Variable),
# Numeric constants
('$'+number, Number.Integer),
(r"$'(.|\\')'", String.Char),
(r'[\r\n]+', Text, '#pop'),
(r'([;#]|//).*?\n', Comment.Single, '#pop'),
(r'/[*].*?[*]/', Comment.Multiline),
(r'/[*].*?\n[\w\W]*?[*]/', Comment.Multiline, '#pop'),
include('punctuation'),
include('whitespace')
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'([;#]|//).*?\n', Comment.Single),
(r'/[*][\w\W]*?[*]/', Comment.Multiline)
],
'punctuation': [
(r'[-*,.()\[\]!:]+', Punctuation)
]
}
def analyse_text(text):
if re.search(r'^\.(text|data|section)', text, re.M):
return True
elif re.search(r'^\.\w+', text, re.M):
return 0.1
def _objdump_lexer_tokens(asm_lexer):
"""
Common objdump lexer tokens to wrap an ASM lexer.
"""
hex_re = r'[0-9A-Za-z]'
return {
'root': [
# File name & format:
('(.*?)(:)( +file format )(.*?)$',
bygroups(Name.Label, Punctuation, Text, String)),
# Section header
('(Disassembly of section )(.*?)(:)$',
bygroups(Text, Name.Label, Punctuation)),
# Function labels
# (With offset)
('('+hex_re+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation, Number.Hex, Punctuation)),
# (Without offset)
('('+hex_re+'+)( )(<)(.*?)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation)),
# Code line with disassembled instructions
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *\t)([a-zA-Z].*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text,
using(asm_lexer))),
# Code line with ascii
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *)(.*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
# Continued code line, only raw opcodes without disassembled
# instruction
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)$',
bygroups(Text, Name.Label, Text, Number.Hex)),
# Skipped a few bytes
(r'\t\.\.\.$', Text),
# Relocation line
# (With offset)
(r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x'+hex_re+'+)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant, Punctuation, Number.Hex)),
# (Without offset)
(r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant)),
(r'[^\n]+\n', Other)
]
}
class ObjdumpLexer(RegexLexer):
"""
For the output of ``objdump -dr``.
"""
name = 'objdump'
aliases = ['objdump']
filenames = ['*.objdump']
mimetypes = ['text/x-objdump']
tokens = _objdump_lexer_tokens(GasLexer)
class DObjdumpLexer(DelegatingLexer):
"""
For the output of ``objdump -Sr`` on compiled D files.
"""
name = 'd-objdump'
aliases = ['d-objdump']
filenames = ['*.d-objdump']
mimetypes = ['text/x-d-objdump']
def __init__(self, **options):
super().__init__(DLexer, ObjdumpLexer, **options)
class CppObjdumpLexer(DelegatingLexer):
"""
For the output of ``objdump -Sr`` on compiled C++ files.
"""
name = 'cpp-objdump'
aliases = ['cpp-objdump', 'c++-objdumb', 'cxx-objdump']
filenames = ['*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump']
mimetypes = ['text/x-cpp-objdump']
def __init__(self, **options):
super().__init__(CppLexer, ObjdumpLexer, **options)
class CObjdumpLexer(DelegatingLexer):
"""
For the output of ``objdump -Sr`` on compiled C files.
"""
name = 'c-objdump'
aliases = ['c-objdump']
filenames = ['*.c-objdump']
mimetypes = ['text/x-c-objdump']
def __init__(self, **options):
super().__init__(CLexer, ObjdumpLexer, **options)
class HsailLexer(RegexLexer):
"""
For HSAIL assembly code.
.. versionadded:: 2.2
"""
name = 'HSAIL'
aliases = ['hsail', 'hsa']
filenames = ['*.hsail']
mimetypes = ['text/x-hsail']
string = r'"[^"]*?"'
identifier = r'[a-zA-Z_][\w.]*'
# Registers
register_number = r'[0-9]+'
register = r'(\$(c|s|d|q)' + register_number + ')'
# Qualifiers
alignQual = r'(align\(\d+\))'
widthQual = r'(width\((\d+|all)\))'
allocQual = r'(alloc\(agent\))'
# Instruction Modifiers
roundingMod = (r'((_ftz)?(_up|_down|_zero|_near))')
datatypeMod = (r'_('
# packedTypes
r'u8x4|s8x4|u16x2|s16x2|u8x8|s8x8|u16x4|s16x4|u32x2|s32x2|'
r'u8x16|s8x16|u16x8|s16x8|u32x4|s32x4|u64x2|s64x2|'
r'f16x2|f16x4|f16x8|f32x2|f32x4|f64x2|'
# baseTypes
r'u8|s8|u16|s16|u32|s32|u64|s64|'
r'b128|b8|b16|b32|b64|b1|'
r'f16|f32|f64|'
# opaqueType
r'roimg|woimg|rwimg|samp|sig32|sig64)')
# Numeric Constant
float = r'((\d+\.)|(\d*\.\d+))[eE][+-]?\d+'
hexfloat = r'0[xX](([0-9a-fA-F]+\.[0-9a-fA-F]*)|([0-9a-fA-F]*\.[0-9a-fA-F]+))[pP][+-]?\d+'
ieeefloat = r'0((h|H)[0-9a-fA-F]{4}|(f|F)[0-9a-fA-F]{8}|(d|D)[0-9a-fA-F]{16})'
tokens = {
'root': [
include('whitespace'),
include('comments'),
(string, String),
(r'@' + identifier + ':?', Name.Label),
(register, Name.Variable.Anonymous),
include('keyword'),
(r'&' + identifier, Name.Variable.Global),
(r'%' + identifier, Name.Variable),
(hexfloat, Number.Hex),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(ieeefloat, Number.Float),
(float, Number.Float),
(r'\d+', Number.Integer),
(r'[=<>{}\[\]()*.,:;!]|x\b', Punctuation)
],
'whitespace': [
(r'(\n|\s)+', Text),
],
'comments': [
(r'/\*.*?\*/', Comment.Multiline),
(r'//.*?\n', Comment.Single),
],
'keyword': [
# Types
(r'kernarg' + datatypeMod, Keyword.Type),
# Regular keywords
(r'\$(full|base|small|large|default|zero|near)', Keyword),
(words((
'module', 'extension', 'pragma', 'prog', 'indirect', 'signature',
'decl', 'kernel', 'function', 'enablebreakexceptions',
'enabledetectexceptions', 'maxdynamicgroupsize', 'maxflatgridsize',
'maxflatworkgroupsize', 'requireddim', 'requiredgridsize',
'requiredworkgroupsize', 'requirenopartialworkgroups'),
suffix=r'\b'), Keyword),
# instructions
(roundingMod, Keyword),
(datatypeMod, Keyword),
(r'_(' + alignQual + '|' + widthQual + ')', Keyword),
(r'_kernarg', Keyword),
(r'(nop|imagefence)\b', Keyword),
(words((
'cleardetectexcept', 'clock', 'cuid', 'debugtrap', 'dim',
'getdetectexcept', 'groupbaseptr', 'kernargbaseptr', 'laneid',
'maxcuid', 'maxwaveid', 'packetid', 'setdetectexcept', 'waveid',
'workitemflatabsid', 'workitemflatid', 'nullptr', 'abs', 'bitrev',
'currentworkgroupsize', 'currentworkitemflatid', 'fract', 'ncos',
'neg', 'nexp2', 'nlog2', 'nrcp', 'nrsqrt', 'nsin', 'nsqrt',
'gridgroups', 'gridsize', 'not', 'sqrt', 'workgroupid',
'workgroupsize', 'workitemabsid', 'workitemid', 'ceil', 'floor',
'rint', 'trunc', 'add', 'bitmask', 'borrow', 'carry', 'copysign',
'div', 'rem', 'sub', 'shl', 'shr', 'and', 'or', 'xor', 'unpackhi',
'unpacklo', 'max', 'min', 'fma', 'mad', 'bitextract', 'bitselect',
'shuffle', 'cmov', 'bitalign', 'bytealign', 'lerp', 'nfma', 'mul',
'mulhi', 'mul24hi', 'mul24', 'mad24', 'mad24hi', 'bitinsert',
'combine', 'expand', 'lda', 'mov', 'pack', 'unpack', 'packcvt',
'unpackcvt', 'sad', 'sementp', 'ftos', 'stof', 'cmp', 'ld', 'st',
'_eq', '_ne', '_lt', '_le', '_gt', '_ge', '_equ', '_neu', '_ltu',
'_leu', '_gtu', '_geu', '_num', '_nan', '_seq', '_sne', '_slt',
'_sle', '_sgt', '_sge', '_snum', '_snan', '_sequ', '_sneu', '_sltu',
'_sleu', '_sgtu', '_sgeu', 'atomic', '_ld', '_st', '_cas', '_add',
'_and', '_exch', '_max', '_min', '_or', '_sub', '_wrapdec',
'_wrapinc', '_xor', 'ret', 'cvt', '_readonly', '_kernarg', '_global',
'br', 'cbr', 'sbr', '_scacq', '_screl', '_scar', '_rlx', '_wave',
'_wg', '_agent', '_system', 'ldimage', 'stimage', '_v2', '_v3', '_v4',
'_1d', '_2d', '_3d', '_1da', '_2da', '_1db', '_2ddepth', '_2dadepth',
'_width', '_height', '_depth', '_array', '_channelorder',
'_channeltype', 'querysampler', '_coord', '_filter', '_addressing',
'barrier', 'wavebarrier', 'initfbar', 'joinfbar', 'waitfbar',
'arrivefbar', 'leavefbar', 'releasefbar', 'ldf', 'activelaneid',
'activelanecount', 'activelanemask', 'activelanepermute', 'call',
'scall', 'icall', 'alloca', 'packetcompletionsig',
'addqueuewriteindex', 'casqueuewriteindex', 'ldqueuereadindex',
'stqueuereadindex', 'readonly', 'global', 'private', 'group',
'spill', 'arg', '_upi', '_downi', '_zeroi', '_neari', '_upi_sat',
'_downi_sat', '_zeroi_sat', '_neari_sat', '_supi', '_sdowni',
'_szeroi', '_sneari', '_supi_sat', '_sdowni_sat', '_szeroi_sat',
'_sneari_sat', '_pp', '_ps', '_sp', '_ss', '_s', '_p', '_pp_sat',
'_ps_sat', '_sp_sat', '_ss_sat', '_s_sat', '_p_sat')), Keyword),
# Integer types
(r'i[1-9]\d*', Keyword)
]
}
class LlvmLexer(RegexLexer):
"""
For LLVM assembly code.
"""
name = 'LLVM'
aliases = ['llvm']
filenames = ['*.ll']
mimetypes = ['text/x-llvm']
#: optional Comment or Whitespace
string = r'"[^"]*?"'
identifier = r'([-a-zA-Z$._][\w\-$.]*|' + string + ')'
tokens = {
'root': [
include('whitespace'),
# Before keywords, because keywords are valid label names :(...
(identifier + r'\s*:', Name.Label),
include('keyword'),
(r'%' + identifier, Name.Variable),
(r'@' + identifier, Name.Variable.Global),
(r'%\d+', Name.Variable.Anonymous),
(r'@\d+', Name.Variable.Global),
(r'#\d+', Name.Variable.Global),
(r'!' + identifier, Name.Variable),
(r'!\d+', Name.Variable.Anonymous),
(r'c?' + string, String),
(r'0[xX][a-fA-F0-9]+', Number),
(r'-?\d+(?:[.]\d+)?(?:[eE][-+]?\d+(?:[.]\d+)?)?', Number),
(r'[=<>{}\[\]()*.,!]|x\b', Punctuation)
],
'whitespace': [
(r'(\n|\s)+', Text),
(r';.*?\n', Comment)
],
'keyword': [
# Regular keywords
(words((
'aarch64_sve_vector_pcs', 'aarch64_vector_pcs', 'acq_rel',
'acquire', 'add', 'addrspace', 'addrspacecast', 'afn', 'alias',
'aliasee', 'align', 'alignLog2', 'alignstack', 'alloca',
'allocsize', 'allOnes', 'alwaysinline', 'alwaysInline',
'amdgpu_cs', 'amdgpu_es', 'amdgpu_gfx', 'amdgpu_gs',
'amdgpu_hs', 'amdgpu_kernel', 'amdgpu_ls', 'amdgpu_ps',
'amdgpu_vs', 'and', 'any', 'anyregcc', 'appending', 'arcp',
'argmemonly', 'args', 'arm_aapcs_vfpcc', 'arm_aapcscc',
'arm_apcscc', 'ashr', 'asm', 'atomic', 'atomicrmw',
'attributes', 'available_externally', 'avr_intrcc',
'avr_signalcc', 'bit', 'bitcast', 'bitMask', 'blockaddress',
'blockcount', 'br', 'branchFunnel', 'builtin', 'byArg',
'byref', 'byte', 'byteArray', 'byval', 'c', 'call', 'callbr',
'callee', 'caller', 'calls', 'canAutoHide', 'catch',
'catchpad', 'catchret', 'catchswitch', 'cc', 'ccc',
'cfguard_checkcc', 'cleanup', 'cleanuppad', 'cleanupret',
'cmpxchg', 'cold', 'coldcc', 'comdat', 'common', 'constant',
'contract', 'convergent', 'critical', 'cxx_fast_tlscc',
'datalayout', 'declare', 'default', 'define', 'deplibs',
'dereferenceable', 'dereferenceable_or_null', 'distinct',
'dllexport', 'dllimport', 'dso_local', 'dso_local_equivalent',
'dso_preemptable', 'dsoLocal', 'eq', 'exact', 'exactmatch',
'extern_weak', 'external', 'externally_initialized',
'extractelement', 'extractvalue', 'fadd', 'false', 'fast',
'fastcc', 'fcmp', 'fdiv', 'fence', 'filter', 'flags', 'fmul',
'fneg', 'fpext', 'fptosi', 'fptoui', 'fptrunc', 'freeze',
'frem', 'from', 'fsub', 'funcFlags', 'function', 'gc',
'getelementptr', 'ghccc', 'global', 'guid', 'gv', 'hash',
'hhvm_ccc', 'hhvmcc', 'hidden', 'hot', 'hotness', 'icmp',
'ifunc', 'inaccessiblemem_or_argmemonly',
'inaccessiblememonly', 'inalloca', 'inbounds', 'indir',
'indirectbr', 'info', 'initialexec', 'inline', 'inlineBits',
'inlinehint', 'inrange', 'inreg', 'insertelement',
'insertvalue', 'insts', 'intel_ocl_bicc', 'inteldialect',
'internal', 'inttoptr', 'invoke', 'jumptable', 'kind',
'landingpad', 'largest', 'linkage', 'linkonce', 'linkonce_odr',
'live', 'load', 'local_unnamed_addr', 'localdynamic',
'localexec', 'lshr', 'max', 'metadata', 'min', 'minsize',
'module', 'monotonic', 'msp430_intrcc', 'mul', 'mustprogress',
'musttail', 'naked', 'name', 'nand', 'ne', 'nest', 'ninf',
'nnan', 'noalias', 'nobuiltin', 'nocallback', 'nocapture',
'nocf_check', 'noduplicate', 'noduplicates', 'nofree',
'noimplicitfloat', 'noinline', 'noInline', 'nomerge', 'none',
'nonlazybind', 'nonnull', 'noprofile', 'norecurse',
'noRecurse', 'noredzone', 'noreturn', 'nosync', 'notail',
'notEligibleToImport', 'noundef', 'nounwind', 'nsw',
'nsz', 'null', 'null_pointer_is_valid', 'nuw', 'oeq', 'offset',
'oge', 'ogt', 'ole', 'olt', 'one', 'opaque', 'optforfuzzing',
'optnone', 'optsize', 'or', 'ord', 'param', 'params',
'partition', 'path', 'personality', 'phi', 'poison',
'preallocated', 'prefix', 'preserve_allcc', 'preserve_mostcc',
'private', 'prologue', 'protected', 'ptrtoint', 'ptx_device',
'ptx_kernel', 'readnone', 'readNone', 'readonly', 'readOnly',
'reassoc', 'refs', 'relbf', 'release', 'resByArg', 'resume',
'ret', 'returnDoesNotAlias', 'returned', 'returns_twice',
'safestack', 'samesize', 'sanitize_address',
'sanitize_hwaddress', 'sanitize_memory', 'sanitize_memtag',
'sanitize_thread', 'sdiv', 'section', 'select', 'seq_cst',
'sext', 'sge', 'sgt', 'shadowcallstack', 'shl',
'shufflevector', 'sideeffect', 'signext', 'single',
'singleImpl', 'singleImplName', 'sitofp', 'sizeM1',
'sizeM1BitWidth', 'sle', 'slt', 'source_filename',
'speculatable', 'speculative_load_hardening', 'spir_func',
'spir_kernel', 'srem', 'sret', 'ssp', 'sspreq', 'sspstrong',
'store', 'strictfp', 'sub', 'summaries', 'summary', 'swiftcc',
'swifterror', 'swiftself', 'switch', 'syncscope', 'tail',
'tailcc', 'target', 'thread_local', 'to', 'token', 'triple',
'true', 'trunc', 'type', 'typeCheckedLoadConstVCalls',
'typeCheckedLoadVCalls', 'typeid', 'typeidCompatibleVTable',
'typeIdInfo', 'typeTestAssumeConstVCalls',
'typeTestAssumeVCalls', 'typeTestRes', 'typeTests', 'udiv',
'ueq', 'uge', 'ugt', 'uitofp', 'ule', 'ult', 'umax', 'umin',
'undef', 'une', 'uniformRetVal', 'uniqueRetVal', 'unknown',
'unnamed_addr', 'uno', 'unordered', 'unreachable', 'unsat',
'unwind', 'urem', 'uselistorder', 'uselistorder_bb', 'uwtable',
'va_arg', 'varFlags', 'variable', 'vcall_visibility',
'vFuncId', 'virtFunc', 'virtualConstProp', 'void', 'volatile',
'vscale', 'vTableFuncs', 'weak', 'weak_odr', 'webkit_jscc',
'win64cc', 'within', 'wpdRes', 'wpdResolutions', 'writeonly',
'x', 'x86_64_sysvcc', 'x86_fastcallcc', 'x86_intrcc',
'x86_mmx', 'x86_regcallcc', 'x86_stdcallcc', 'x86_thiscallcc',
'x86_vectorcallcc', 'xchg', 'xor', 'zeroext',
'zeroinitializer', 'zext', 'immarg', 'willreturn'),
suffix=r'\b'), Keyword),
# Types
(words(('void', 'half', 'bfloat', 'float', 'double', 'fp128',
'x86_fp80', 'ppc_fp128', 'label', 'metadata', 'x86_mmx',
'x86_amx', 'token')),
Keyword.Type),
# Integer types
(r'i[1-9]\d*', Keyword.Type)
]
}
class LlvmMirBodyLexer(RegexLexer):
"""
For LLVM MIR examples without the YAML wrapper.
For more information on LLVM MIR see https://llvm.org/docs/MIRLangRef.html.
.. versionadded:: 2.6
"""
name = 'LLVM-MIR Body'
aliases = ['llvm-mir-body']
filenames = []
mimetypes = []
tokens = {
'root': [
# Attributes on basic blocks
(words(('liveins', 'successors'), suffix=':'), Keyword),
# Basic Block Labels
(r'bb\.[0-9]+(\.[a-zA-Z0-9_.-]+)?( \(address-taken\))?:', Name.Label),
(r'bb\.[0-9]+ \(%[a-zA-Z0-9_.-]+\)( \(address-taken\))?:', Name.Label),
(r'%bb\.[0-9]+(\.\w+)?', Name.Label),
# Stack references
(r'%stack\.[0-9]+(\.\w+\.addr)?', Name),
# Subreg indices
(r'%subreg\.\w+', Name),
# Virtual registers
(r'%[a-zA-Z0-9_]+ *', Name.Variable, 'vreg'),
# Reference to LLVM-IR global
include('global'),
# Reference to Intrinsic
(r'intrinsic\(\@[a-zA-Z0-9_.]+\)', Name.Variable.Global),
# Comparison predicates
(words(('eq', 'ne', 'sgt', 'sge', 'slt', 'sle', 'ugt', 'uge', 'ult',
'ule'), prefix=r'intpred\(', suffix=r'\)'), Name.Builtin),
(words(('oeq', 'one', 'ogt', 'oge', 'olt', 'ole', 'ugt', 'uge',
'ult', 'ule'), prefix=r'floatpred\(', suffix=r'\)'),
Name.Builtin),
# Physical registers
(r'\$\w+', String.Single),
# Assignment operator
(r'=', Operator),
# gMIR Opcodes
(r'(G_ANYEXT|G_[SZ]EXT|G_SEXT_INREG|G_TRUNC|G_IMPLICIT_DEF|G_PHI|'
r'G_FRAME_INDEX|G_GLOBAL_VALUE|G_INTTOPTR|G_PTRTOINT|G_BITCAST|'
r'G_CONSTANT|G_FCONSTANT|G_VASTART|G_VAARG|G_CTLZ|G_CTLZ_ZERO_UNDEF|'
r'G_CTTZ|G_CTTZ_ZERO_UNDEF|G_CTPOP|G_BSWAP|G_BITREVERSE|'
r'G_ADDRSPACE_CAST|G_BLOCK_ADDR|G_JUMP_TABLE|G_DYN_STACKALLOC|'
r'G_ADD|G_SUB|G_MUL|G_[SU]DIV|G_[SU]REM|G_AND|G_OR|G_XOR|G_SHL|'
r'G_[LA]SHR|G_[IF]CMP|G_SELECT|G_GEP|G_PTR_MASK|G_SMIN|G_SMAX|'
r'G_UMIN|G_UMAX|G_[US]ADDO|G_[US]ADDE|G_[US]SUBO|G_[US]SUBE|'
r'G_[US]MULO|G_[US]MULH|G_FNEG|G_FPEXT|G_FPTRUNC|G_FPTO[US]I|'
r'G_[US]ITOFP|G_FABS|G_FCOPYSIGN|G_FCANONICALIZE|G_FMINNUM|'
r'G_FMAXNUM|G_FMINNUM_IEEE|G_FMAXNUM_IEEE|G_FMINIMUM|G_FMAXIMUM|'
r'G_FADD|G_FSUB|G_FMUL|G_FMA|G_FMAD|G_FDIV|G_FREM|G_FPOW|G_FEXP|'
r'G_FEXP2|G_FLOG|G_FLOG2|G_FLOG10|G_FCEIL|G_FCOS|G_FSIN|G_FSQRT|'
r'G_FFLOOR|G_FRINT|G_FNEARBYINT|G_INTRINSIC_TRUNC|'
r'G_INTRINSIC_ROUND|G_LOAD|G_[ZS]EXTLOAD|G_INDEXED_LOAD|'
r'G_INDEXED_[ZS]EXTLOAD|G_STORE|G_INDEXED_STORE|'
r'G_ATOMIC_CMPXCHG_WITH_SUCCESS|G_ATOMIC_CMPXCHG|'
r'G_ATOMICRMW_(XCHG|ADD|SUB|AND|NAND|OR|XOR|MAX|MIN|UMAX|UMIN|FADD|'
r'FSUB)'
r'|G_FENCE|G_EXTRACT|G_UNMERGE_VALUES|G_INSERT|G_MERGE_VALUES|'
r'G_BUILD_VECTOR|G_BUILD_VECTOR_TRUNC|G_CONCAT_VECTORS|'
r'G_INTRINSIC|G_INTRINSIC_W_SIDE_EFFECTS|G_BR|G_BRCOND|'
r'G_BRINDIRECT|G_BRJT|G_INSERT_VECTOR_ELT|G_EXTRACT_VECTOR_ELT|'
r'G_SHUFFLE_VECTOR)\b',
Name.Builtin),
# Target independent opcodes
(r'(COPY|PHI|INSERT_SUBREG|EXTRACT_SUBREG|REG_SEQUENCE)\b',
Name.Builtin),
# Flags
(words(('killed', 'implicit')), Keyword),
# ConstantInt values
(r'i[0-9]+ +', Keyword.Type, 'constantint'),
# ConstantFloat values
(r'(half|float|double) +', Keyword.Type, 'constantfloat'),
# Bare immediates
include('integer'),
# MMO's
(r':: *', Operator, 'mmo'),
# MIR Comments
(r';.*', Comment),
# If we get here, assume it's a target instruction
(r'[a-zA-Z0-9_]+', Name),
# Everything else that isn't highlighted
(r'[(), \n]+', Text),
],
# The integer constant from a ConstantInt value
'constantint': [
include('integer'),
(r'(?=.)', Text, '#pop'),
],
# The floating point constant from a ConstantFloat value
'constantfloat': [
include('float'),
(r'(?=.)', Text, '#pop'),
],
'vreg': [
# The bank or class if there is one
(r' *:(?!:)', Keyword, ('#pop', 'vreg_bank_or_class')),
# The LLT if there is one
(r' *\(', Text, 'vreg_type'),
(r'(?=.)', Text, '#pop'),
],
'vreg_bank_or_class': [
# The unassigned bank/class
(r' *_', Name.Variable.Magic),
(r' *[a-zA-Z0-9_]+', Name.Variable),
# The LLT if there is one
(r' *\(', Text, 'vreg_type'),
(r'(?=.)', Text, '#pop'),
],
'vreg_type': [
# Scalar and pointer types
(r' *[sp][0-9]+', Keyword.Type),
(r' *<[0-9]+ *x *[sp][0-9]+>', Keyword.Type),
(r'\)', Text, '#pop'),
(r'(?=.)', Text, '#pop'),
],
'mmo': [
(r'\(', Text),
(r' +', Text),
(words(('load', 'store', 'on', 'into', 'from', 'align', 'monotonic',
'acquire', 'release', 'acq_rel', 'seq_cst')),
Keyword),
# IR references
(r'%ir\.[a-zA-Z0-9_.-]+', Name),
(r'%ir-block\.[a-zA-Z0-9_.-]+', Name),
(r'[-+]', Operator),
include('integer'),
include('global'),
(r',', Punctuation),
(r'\), \(', Text),
(r'\)', Text, '#pop'),
],
'integer': [(r'-?[0-9]+', Number.Integer),],
'float': [(r'-?[0-9]+\.[0-9]+(e[+-][0-9]+)?', Number.Float)],
'global': [(r'\@[a-zA-Z0-9_.]+', Name.Variable.Global)],
}
class LlvmMirLexer(RegexLexer):
"""
Lexer for the overall LLVM MIR document format.
MIR is a human readable serialization format that's used to represent LLVM's
machine specific intermediate representation. It allows LLVM's developers to
see the state of the compilation process at various points, as well as test
individual pieces of the compiler.
For more information on LLVM MIR see https://llvm.org/docs/MIRLangRef.html.
.. versionadded:: 2.6
"""
name = 'LLVM-MIR'
aliases = ['llvm-mir']
filenames = ['*.mir']
tokens = {
'root': [
# Comments are hashes at the YAML level
(r'#.*', Comment),
# Documents starting with | are LLVM-IR
(r'--- \|$', Keyword, 'llvm_ir'),
# Other documents are MIR
(r'---', Keyword, 'llvm_mir'),
# Consume everything else in one token for efficiency
(r'[^-#]+|.', Text),
],
'llvm_ir': [
# Documents end with '...' or '---'
(r'(\.\.\.|(?=---))', Keyword, '#pop'),
# Delegate to the LlvmLexer
(r'((?:.|\n)+?)(?=(\.\.\.|---))', bygroups(using(LlvmLexer))),
],
'llvm_mir': [
# Comments are hashes at the YAML level
(r'#.*', Comment),
# Documents end with '...' or '---'
(r'(\.\.\.|(?=---))', Keyword, '#pop'),
# Handle the simple attributes
(r'name:', Keyword, 'name'),
(words(('alignment', ),
suffix=':'), Keyword, 'number'),
(words(('legalized', 'regBankSelected', 'tracksRegLiveness',
'selected', 'exposesReturnsTwice'),
suffix=':'), Keyword, 'boolean'),
# Handle the attributes don't highlight inside
(words(('registers', 'stack', 'fixedStack', 'liveins', 'frameInfo',
'machineFunctionInfo'),
suffix=':'), Keyword),
# Delegate the body block to the LlvmMirBodyLexer
(r'body: *\|', Keyword, 'llvm_mir_body'),
# Consume everything else
(r'.+', Text),
(r'\n', Text),
],
'name': [
(r'[^\n]+', Name),
default('#pop'),
],
'boolean': [
(r' *(true|false)', Name.Builtin),
default('#pop'),
],
'number': [
(r' *[0-9]+', Number),
default('#pop'),
],
'llvm_mir_body': [
# Documents end with '...' or '---'.
# We have to pop llvm_mir_body and llvm_mir
(r'(\.\.\.|(?=---))', Keyword, '#pop:2'),
# Delegate the body block to the LlvmMirBodyLexer
(r'((?:.|\n)+?)(?=\.\.\.|---)', bygroups(using(LlvmMirBodyLexer))),
# The '...' is optional. If we didn't already find it then it isn't
# there. There might be a '---' instead though.
(r'(?!\.\.\.|---)((?:.|\n)+)', bygroups(using(LlvmMirBodyLexer))),
],
}
class NasmLexer(RegexLexer):
"""
For Nasm (Intel) assembly code.
"""
name = 'NASM'
aliases = ['nasm']
filenames = ['*.asm', '*.ASM']
mimetypes = ['text/x-nasm']
# Tasm uses the same file endings, but TASM is not as common as NASM, so
# we prioritize NASM higher by default
priority = 1.0
identifier = r'[a-z$._?][\w$.?#@~]*'
hexn = r'(?:0x[0-9a-f]+|$0[0-9a-f]*|[0-9]+[0-9a-f]*h)'
octn = r'[0-7]+q'
binn = r'[01]+b'
decn = r'[0-9]+'
floatn = decn + r'\.e?' + decn
string = r'"(\\"|[^"\n])*"|' + r"'(\\'|[^'\n])*'|" + r"`(\\`|[^`\n])*`"
declkw = r'(?:res|d)[bwdqt]|times'
register = (r'r[0-9][0-5]?[bwd]?|'
r'[a-d][lh]|[er]?[a-d]x|[er]?[sb]p|[er]?[sd]i|[c-gs]s|st[0-7]|'
r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7]')
wordop = r'seg|wrt|strict'
type = r'byte|[dq]?word'
# Directives must be followed by whitespace, otherwise CPU will match
# cpuid for instance.
directives = (r'(?:BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|'
r'ORG|ALIGN|STRUC|ENDSTRUC|COMMON|CPU|GROUP|UPPERCASE|IMPORT|'
r'EXPORT|LIBRARY|MODULE)\s+')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'^\s*%', Comment.Preproc, 'preproc'),
include('whitespace'),
(identifier + ':', Name.Label),
(r'(%s)(\s+)(equ)' % identifier,
bygroups(Name.Constant, Keyword.Declaration, Keyword.Declaration),
'instruction-args'),
(directives, Keyword, 'instruction-args'),
(declkw, Keyword.Declaration, 'instruction-args'),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text)
],
'instruction-args': [
(string, String),
(hexn, Number.Hex),
(octn, Number.Oct),
(binn, Number.Bin),
(floatn, Number.Float),
(decn, Number.Integer),
include('punctuation'),
(register, Name.Builtin),
(identifier, Name.Variable),
(r'[\r\n]+', Text, '#pop'),
include('whitespace')
],
'preproc': [
(r'[^;\n]+', Comment.Preproc),
(r';.*?\n', Comment.Single, '#pop'),
(r'\n', Comment.Preproc, '#pop'),
],
'whitespace': [
(r'\n', Text),
(r'[ \t]+', Text),
(r';.*', Comment.Single)
],
'punctuation': [
(r'[,():\[\]]+', Punctuation),
(r'[&|^<>+*/%~-]+', Operator),
(r'[$]+', Keyword.Constant),
(wordop, Operator.Word),
(type, Keyword.Type)
],
}
def analyse_text(text):
# Probably TASM
if re.match(r'PROC', text, re.IGNORECASE):
return False
class NasmObjdumpLexer(ObjdumpLexer):
"""
For the output of ``objdump -d -M intel``.
.. versionadded:: 2.0
"""
name = 'objdump-nasm'
aliases = ['objdump-nasm']
filenames = ['*.objdump-intel']
mimetypes = ['text/x-nasm-objdump']
tokens = _objdump_lexer_tokens(NasmLexer)
class TasmLexer(RegexLexer):
"""
For Tasm (Turbo Assembler) assembly code.
"""
name = 'TASM'
aliases = ['tasm']
filenames = ['*.asm', '*.ASM', '*.tasm']
mimetypes = ['text/x-tasm']
identifier = r'[@a-z$._?][\w$.?#@~]*'
hexn = r'(?:0x[0-9a-f]+|$0[0-9a-f]*|[0-9]+[0-9a-f]*h)'
octn = r'[0-7]+q'
binn = r'[01]+b'
decn = r'[0-9]+'
floatn = decn + r'\.e?' + decn
string = r'"(\\"|[^"\n])*"|' + r"'(\\'|[^'\n])*'|" + r"`(\\`|[^`\n])*`"
declkw = r'(?:res|d)[bwdqt]|times'
register = (r'r[0-9][0-5]?[bwd]|'
r'[a-d][lh]|[er]?[a-d]x|[er]?[sb]p|[er]?[sd]i|[c-gs]s|st[0-7]|'
r'mm[0-7]|cr[0-4]|dr[0-367]|tr[3-7]')
wordop = r'seg|wrt|strict'
type = r'byte|[dq]?word'
directives = (r'BITS|USE16|USE32|SECTION|SEGMENT|ABSOLUTE|EXTERN|GLOBAL|'
r'ORG|ALIGN|STRUC|ENDSTRUC|ENDS|COMMON|CPU|GROUP|UPPERCASE|INCLUDE|'
r'EXPORT|LIBRARY|MODULE|PROC|ENDP|USES|ARG|DATASEG|UDATASEG|END|IDEAL|'
r'P386|MODEL|ASSUME|CODESEG|SIZE')
# T[A-Z][a-z] is more of a convention. Lexer should filter out STRUC definitions
# and then 'add' them to datatype somehow.
datatype = (r'db|dd|dw|T[A-Z][a-z]+')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'^\s*%', Comment.Preproc, 'preproc'),
include('whitespace'),
(identifier + ':', Name.Label),
(directives, Keyword, 'instruction-args'),
(r'(%s)(\s+)(%s)' % (identifier, datatype),
bygroups(Name.Constant, Keyword.Declaration, Keyword.Declaration),
'instruction-args'),
(declkw, Keyword.Declaration, 'instruction-args'),
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text)
],
'instruction-args': [
(string, String),
(hexn, Number.Hex),
(octn, Number.Oct),
(binn, Number.Bin),
(floatn, Number.Float),
(decn, Number.Integer),
include('punctuation'),
(register, Name.Builtin),
(identifier, Name.Variable),
# Do not match newline when it's preceeded by a backslash
(r'(\\\s*)(;.*)([\r\n])', bygroups(Text, Comment.Single, Text)),
(r'[\r\n]+', Text, '#pop'),
include('whitespace')
],
'preproc': [
(r'[^;\n]+', Comment.Preproc),
(r';.*?\n', Comment.Single, '#pop'),
(r'\n', Comment.Preproc, '#pop'),
],
'whitespace': [
(r'[\n\r]', Text),
(r'\\[\n\r]', Text),
(r'[ \t]+', Text),
(r';.*', Comment.Single)
],
'punctuation': [
(r'[,():\[\]]+', Punctuation),
(r'[&|^<>+*=/%~-]+', Operator),
(r'[$]+', Keyword.Constant),
(wordop, Operator.Word),
(type, Keyword.Type)
],
}
def analyse_text(text):
# See above
if re.match(r'PROC', text, re.I):
return True
class Ca65Lexer(RegexLexer):
"""
For ca65 assembler sources.
.. versionadded:: 1.6
"""
name = 'ca65 assembler'
aliases = ['ca65']
filenames = ['*.s']
flags = re.IGNORECASE
tokens = {
'root': [
(r';.*', Comment.Single),
(r'\s+', Text),
(r'[a-z_.@$][\w.@$]*:', Name.Label),
(r'((ld|st)[axy]|(in|de)[cxy]|asl|lsr|ro[lr]|adc|sbc|cmp|cp[xy]'
r'|cl[cvdi]|se[cdi]|jmp|jsr|bne|beq|bpl|bmi|bvc|bvs|bcc|bcs'
r'|p[lh][ap]|rt[is]|brk|nop|ta[xy]|t[xy]a|txs|tsx|and|ora|eor'
r'|bit)\b', Keyword),
(r'\.\w+', Keyword.Pseudo),
(r'[-+~*/^&|!<>=]', Operator),
(r'"[^"\n]*.', String),
(r"'[^'\n]*.", String.Char),
(r'\$[0-9a-f]+|[0-9a-f]+h\b', Number.Hex),
(r'\d+', Number.Integer),
(r'%[01]+', Number.Bin),
(r'[#,.:()=\[\]]', Punctuation),
(r'[a-z_.@$][\w.@$]*', Name),
]
}
def analyse_text(self, text):
# comments in GAS start with "#"
if re.search(r'^\s*;', text, re.MULTILINE):
return 0.9
class Dasm16Lexer(RegexLexer):
"""
For DCPU-16 Assembly.
Check http://0x10c.com/doc/dcpu-16.txt
.. versionadded:: 2.4
"""
name = 'DASM16'
aliases = ['dasm16']
filenames = ['*.dasm16', '*.dasm']
mimetypes = ['text/x-dasm16']
INSTRUCTIONS = [
'SET',
'ADD', 'SUB',
'MUL', 'MLI',
'DIV', 'DVI',
'MOD', 'MDI',
'AND', 'BOR', 'XOR',
'SHR', 'ASR', 'SHL',
'IFB', 'IFC', 'IFE', 'IFN', 'IFG', 'IFA', 'IFL', 'IFU',
'ADX', 'SBX',
'STI', 'STD',
'JSR',
'INT', 'IAG', 'IAS', 'RFI', 'IAQ', 'HWN', 'HWQ', 'HWI',
]
REGISTERS = [
'A', 'B', 'C',
'X', 'Y', 'Z',
'I', 'J',
'SP', 'PC', 'EX',
'POP', 'PEEK', 'PUSH'
]
# Regexes yo
char = r'[a-zA-Z0-9_$@.]'
identifier = r'(?:[a-zA-Z$_]' + char + r'*|\.' + char + '+)'
number = r'[+-]?(?:0[xX][a-zA-Z0-9]+|\d+)'
binary_number = r'0b[01_]+'
instruction = r'(?i)(' + '|'.join(INSTRUCTIONS) + ')'
single_char = r"'\\?" + char + "'"
string = r'"(\\"|[^"])*"'
def guess_identifier(lexer, match):
ident = match.group(0)
klass = Name.Variable if ident.upper() in lexer.REGISTERS else Name.Label
yield match.start(), klass, ident
tokens = {
'root': [
include('whitespace'),
(':' + identifier, Name.Label),
(identifier + ':', Name.Label),
(instruction, Name.Function, 'instruction-args'),
(r'\.' + identifier, Name.Function, 'data-args'),
(r'[\r\n]+', Text)
],
'numeric' : [
(binary_number, Number.Integer),
(number, Number.Integer),
(single_char, String),
],
'arg' : [
(identifier, guess_identifier),
include('numeric')
],
'deref' : [
(r'\+', Punctuation),
(r'\]', Punctuation, '#pop'),
include('arg'),
include('whitespace')
],
'instruction-line' : [
(r'[\r\n]+', Text, '#pop'),
(r';.*?$', Comment, '#pop'),
include('whitespace')
],
'instruction-args': [
(r',', Punctuation),
(r'\[', Punctuation, 'deref'),
include('arg'),
include('instruction-line')
],
'data-args' : [
(r',', Punctuation),
include('numeric'),
(string, String),
include('instruction-line')
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r';.*?\n', Comment)
],
}
| 39.064922
| 94
| 0.479598
|
f2ed06d019ed23f69b7810bb3bb85c4c4d21fe4b
| 3,750
|
py
|
Python
|
labcontrol/gui/handlers/process_handlers/test/test_sequencing_process.py
|
jdereus/LabControl
|
9c1867dc8047075f1f3e505a2f4c3479ee6388cc
|
[
"BSD-3-Clause"
] | 3
|
2018-01-21T05:24:32.000Z
|
2019-07-12T21:49:02.000Z
|
labcontrol/gui/handlers/process_handlers/test/test_sequencing_process.py
|
jdereus/labman
|
9c1867dc8047075f1f3e505a2f4c3479ee6388cc
|
[
"BSD-3-Clause"
] | 465
|
2017-05-25T01:33:29.000Z
|
2019-07-12T21:47:59.000Z
|
labcontrol/gui/handlers/process_handlers/test/test_sequencing_process.py
|
biocore/LabControl
|
9c1867dc8047075f1f3e505a2f4c3479ee6388cc
|
[
"BSD-3-Clause"
] | 16
|
2017-05-12T21:39:18.000Z
|
2019-04-03T16:19:21.000Z
|
# ----------------------------------------------------------------------------
# Copyright (c) 2017-, LabControl development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import zipfile
from io import BytesIO
from unittest import main
from tornado.escape import json_encode, json_decode
from labcontrol.gui.testing import TestHandlerBase
import logging
class TestSequencingProcessHandler(TestHandlerBase):
def test_get_sequencing_process_handler_pool_type(self):
response = self.get('/process/sequencing/somepooltype/')
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
def test_post_sequencing_process_handler(self):
data = {'pools': json_encode([1, 2]), 'run_name': 'test_run',
'experiment': 'test_experiment',
'sequencer': 19, 'fwd_cycles': 150, 'rev_cycles': 150,
'principal_investigator': 'admin@foo.bar',
'additional_contacts': json_encode(
['demo@microbio.me', 'shared@foo.bar'])}
response = self.post('/process/sequencing/couldbeanything/', data)
self.assertEqual(response.code, 200)
self.assertCountEqual(json_decode(response.body), ['process'])
def test_get_download_sample_sheet_handler(self):
# amplicon sequencing process
logging.debug("in test_get_download_sample_sheet_handler")
response = self.get('/process/sequencing/1/sample_sheet')
self.assertNotEqual(response.body, '')
self.assertEqual(response.code, 200)
self.assertTrue(response.body.startswith(b'# PI,Dude,test@foo.bar\n'))
logging.debug(response.headers['Content-Disposition'])
s = "attachment; filename=2017-10-25_samplesheet_Test_Run.1.csv"
self.assertEqual(response.headers['Content-Disposition'], s)
# shotgun sequencing process
response = self.get('/process/sequencing/2/sample_sheet')
self.assertNotEqual(response.body, '')
self.assertEqual(response.code, 200)
self.assertTrue(response.body.startswith(b'# PI,Dude,test@foo.bar\n'))
self.assertEqual(response.headers['Content-Disposition'],
"attachment; filename=2017-10-25_samplesheet_"
"TestShotgunRun1_TestExperimentShotgun1.csv")
def test_get_download_preparation_sheet_handler(self):
response = self.get('/process/sequencing/1/preparation_sheets')
self.assertNotEqual(response.body, '')
self.assertEqual(response.code, 200)
self.assertEqual(response.headers['Content-Type'], 'application/zip')
self.assertEqual(response.headers['Expires'], '0')
self.assertEqual(response.headers['Cache-Control'], 'no-cache')
self.assertEqual(response.headers['Content-Disposition'],
'attachment; filename=2017-10-25_preps'
'_Test_Run.1.zip')
expected_files = ['2017-10-25_prep_Test_Run.1.txt']
archive = zipfile.ZipFile(BytesIO(response.body), 'r')
# NB: Apparently order of namelist results is not stable, hence
# the need to call sorted()
self.assertEqual(sorted(archive.namelist()), expected_files)
# NB: All the below does is test that the files in the archive have
# SOME non-empty content--it doesn't check what that content IS.
for curr_file_name in expected_files:
contents = archive.open(curr_file_name).read()
self.assertNotEqual(contents, '')
if __name__ == '__main__':
main()
| 44.642857
| 78
| 0.645867
|
15992f14f25e2e2945c556dffe9d2a2bc89bacf0
| 2,158
|
py
|
Python
|
templates/app.py
|
brix4dayz/TRiCAM2.0
|
716f154403c8c0aa903d7391bf4c14d45c778a22
|
[
"MIT"
] | 1
|
2015-08-11T20:50:36.000Z
|
2015-08-11T20:50:36.000Z
|
templates/app.py
|
brix4dayz/TRiCAM2.0
|
716f154403c8c0aa903d7391bf4c14d45c778a22
|
[
"MIT"
] | null | null | null |
templates/app.py
|
brix4dayz/TRiCAM2.0
|
716f154403c8c0aa903d7391bf4c14d45c778a22
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask, render_template, request, redirect, url_for, send_from_directory
from werkzeug import secure_filename
# Initialize the Flask application
app = Flask(__name__)
# This is the path to the upload directory
app.config['UPLOAD_FOLDER'] = 'uploads/'
# These are the extension that we are accepting to be uploaded
app.config['ALLOWED_EXTENSIONS'] = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
# For a given file, return whether it's an allowed type or not
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
# This route will show a form to perform an AJAX request
# jQuery is loaded to execute the request and update the
# value of the operation
@app.route('/')
def index():
return render_template('index.html')
# Route that will process the file upload
@app.route('/upload', methods=['POST'])
def upload():
# Get the name of the uploaded file
file = request.files['file']
# Check if the file is one of the allowed types/extensions
if file and allowed_file(file.filename):
# Make the filename safe, remove unsupported chars
filename = secure_filename(file.filename)
# Move the file form the temporal folder to
# the upload folder we setup
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# Redirect the user to the uploaded_file route, which
# will basicaly show on the browser the uploaded file
return redirect(url_for('uploaded_file',
filename=filename))
# This route is expecting a parameter containing the name
# of a file. Then it will locate that file on the upload
# directory and show it on the browser, so if the user uploads
# an image, that image is going to be show after the upload
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
if __name__ == '__main__':
app.run(
host="0.0.0.0",
port=int("80"),
debug=True
)
| 36.576271
| 89
| 0.673772
|
ef7304c1f55547f8e355782f15d2423ae2e2cdf5
| 4,057
|
py
|
Python
|
docs/conf.py
|
f4nu/vpype
|
2328ce3fb0bef60aeaf3556d2c47d0dc882d5daf
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
f4nu/vpype
|
2328ce3fb0bef60aeaf3556d2c47d0dc882d5daf
|
[
"MIT"
] | 46
|
2021-01-26T01:09:10.000Z
|
2022-03-25T06:22:02.000Z
|
docs/conf.py
|
str4w/vpype
|
c649445b8fec56b4ce9a436a7b8741c5fec1d640
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
# noinspection PyPackageRequirements
from recommonmark.parser import CommonMarkParser
project = "vpype"
# noinspection PyShadowingBuiltins
copyright = "2020, Antoine Beyeler"
author = "Antoine Beyeler"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx_click.ext",
"sphinx_autodoc_typehints",
# "recommonmark", # NOTE: see workaround below
# "alabaster",
# 'autoapi.extension',
]
# -- Autoapi configuration ------------------------------------------------
# autoapi_dirs = ['../vpype']
# autoapi_options = ['members', 'undoc-members', 'show-inheritance']
# autoapi_generate_api_docs = False
autosummary_generate = True
add_module_names = False
autosummary_imported_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "venv", ".*"]
# -- Global options ----------------------------------------------------------
# Don't mess with double-dash used in CLI options
smartquotes_action = "qe"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# html_theme = "alabaster"
# html_theme_path = [alabaster.get_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Intersphinx options
intersphinx_mapping = {
"shapely": ("https://shapely.readthedocs.io/en/latest/", None),
"click": ("https://click.palletsprojects.com/en/7.x/", None),
"python": ("https://docs.python.org/3/", None),
"Pillow": ("https://pillow.readthedocs.io/en/stable/", None),
}
# -- Napoleon options
napoleon_include_init_with_doc = True
# noinspection PyUnusedLocal
def autodoc_skip_member(app, what, name, obj, skip, options):
exclusions = (
# vpype/model.py
"VectorData",
# vpype/utils.py
"PAGE_FORMATS",
"convert",
"convert_page_format",
"Length",
# vpype_cli/debug.py
"DebugData",
# private attribute
"__dict__",
"__doc__",
"__module__",
"__weakref__",
)
exclude = name in exclusions
return skip or exclude
# RECOMMONMARK WORKAROUND
# see https://github.com/readthedocs/recommonmark/issues/177
class CustomCommonMarkParser(CommonMarkParser):
def visit_document(self, node):
pass
def setup(app):
app.connect("autodoc-skip-member", autodoc_skip_member)
# recommonmark workaround
app.add_source_suffix(".md", "markdown")
app.add_source_parser(CustomCommonMarkParser)
| 31.449612
| 79
| 0.653439
|
4448a6544487d434df7489d374fd3456107f8879
| 647
|
py
|
Python
|
__primeNumberLessThanGivenNumber.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
__primeNumberLessThanGivenNumber.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
__primeNumberLessThanGivenNumber.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
'''
Write a Python program to print the number of prime numbers
which are less than or equal to a given integer.
Input:
n (1 <= n <= 999,999)
Input the number(n): 35
Number of prime numbers which are less than or equal to n.: 11
'''
primes = [1] * 500000
primes[0] = 0
for i in range (3, 1000, 2):
if primes[i // 2]:
primes [(i * i) // 2 :: i] = [0] * len (primes[(i * i) // 2::i])
print("Input the number(n):")
n = int (input ())
if n < 4:
print("Number of prime numbers which are less than or equal to n.:", n - 1)
else:
print("Number of prime numbers which are less than or equal to n.:",sum(primes[:(n + 1) // 2]) + 1)
| 29.409091
| 103
| 0.605873
|
c7b6c2a4ea7917ef12d1d36e6f7e6b0c7542cc6c
| 21,994
|
py
|
Python
|
django/conf/global_settings.py
|
Oktosha/django
|
b10a8bd90bb1087d7abcdda971d51269579aeaad
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 5
|
2019-10-17T21:29:53.000Z
|
2021-06-23T16:27:02.000Z
|
django/conf/global_settings.py
|
Oktosha/django
|
b10a8bd90bb1087d7abcdda971d51269579aeaad
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2
|
2020-04-16T15:27:35.000Z
|
2020-06-28T00:42:50.000Z
|
django/conf/global_settings.py
|
Oktosha/django
|
b10a8bd90bb1087d7abcdda971d51269579aeaad
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 11
|
2019-09-14T20:57:30.000Z
|
2022-01-19T17:59:26.000Z
|
"""
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('hy', gettext_noop('Armenian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
LANGUAGE_COOKIE_SECURE = False
LANGUAGE_COOKIE_HTTPONLY = False
LANGUAGE_COOKIE_SAMESITE = None
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default charset to use for all HttpResponse objects, if a MIME type isn't
# manually specified. It's used to construct the Content-Type header.
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = 0o644
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the HttpOnly flag.
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', or None to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
| 34.473354
| 101
| 0.701873
|
8c0e0884748d6b991611ffe94a49130edfc61d72
| 902
|
py
|
Python
|
May_work/python/tkinter/quitBUtton.py
|
EricMorse/ECE434-Project
|
315b81003b49b51d4fc936b4826a4b70cb6b403d
|
[
"MIT"
] | null | null | null |
May_work/python/tkinter/quitBUtton.py
|
EricMorse/ECE434-Project
|
315b81003b49b51d4fc936b4826a4b70cb6b403d
|
[
"MIT"
] | null | null | null |
May_work/python/tkinter/quitBUtton.py
|
EricMorse/ECE434-Project
|
315b81003b49b51d4fc936b4826a4b70cb6b403d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
ZetCode Tkinter tutorial
This program creates a Quit
button. When we press the button,
the application terminates.
Author: Jan Bodnar
Last modified: July 2017
Website: www.zetcode.com
"""
from tkinter import Tk, BOTH
from tkinter.ttk import Frame, Button, Style
class Example(Frame):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.style = Style()
self.style.theme_use("default")
self.master.title("Quit button")
self.pack(fill=BOTH, expand=1)
quitButton = Button(self, text="Quit",
command=self.quit)
quitButton.place(x=50, y=50)
def main():
root = Tk()
root.geometry("250x150+300+300")
app = Example()
root.mainloop()
if __name__ == '__main__':
main()
| 18.04
| 46
| 0.590909
|
be97feb70d74485b6e5b7a64fe33d09d8372016a
| 7,667
|
py
|
Python
|
doc/conf.py
|
bioidiap/bob.pipelines
|
cbefdaf3b384ee11cb26a279281f007adc2d8f19
|
[
"BSD-3-Clause"
] | 1
|
2020-10-13T19:58:44.000Z
|
2020-10-13T19:58:44.000Z
|
doc/conf.py
|
bioidiap/bob.pipelines
|
cbefdaf3b384ee11cb26a279281f007adc2d8f19
|
[
"BSD-3-Clause"
] | null | null | null |
doc/conf.py
|
bioidiap/bob.pipelines
|
cbefdaf3b384ee11cb26a279281f007adc2d8f19
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import pkg_resources
import sphinx_rtd_theme
# For inter-documentation mapping:
from bob.extension.utils import link_documentation, load_requirements
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.3"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.ifconfig",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.graphviz",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.mathjax",
# 'matplotlib.sphinxext.plot_directive',
]
# Be picky about warnings
nitpicky = True
# Ignores stuff we can't easily resolve on other project's sphinx manuals
nitpick_ignore = []
# Allows the user to override warnings from a separate file
if os.path.exists("nitpick-exceptions.txt"):
for line in open("nitpick-exceptions.txt"):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# Always includes todos
todo_include_todos = True
# Generates auto-summary automatically
autosummary_generate = True
# Create numbers on figures with captions
numfig = False
# If we are on OSX, the 'dvipng' path maybe different
dvipng_osx = "/Library/TeX/texbin/dvipng"
if os.path.exists(dvipng_osx):
pngmath_dvipng = dvipng_osx
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "bob.pipelines"
copyright = "%s, Idiap Research Institute" % time.strftime("%Y")
# Grab the setup entry
distribution = pkg_resources.require(project)[0]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = distribution.version
# The full version, including alpha/beta/rc tags.
release = distribution.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["links.rst"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# Some variables which are useful for generated material
project_variable = project.replace(".", "_")
short_description = "bob.pipelines"
owner = ["Idiap Research Institute"]
# -- Options for HTML output ---------------------------------------------------
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = project_variable
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "img/bob-logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "img/bob-favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = project_variable + "_doc"
# -- Post configuration --------------------------------------------------------
# Included after all input documents
rst_epilog = """
.. |project| replace:: Bob
.. |version| replace:: %s
.. |current-year| date:: %%Y
""" % (
version,
)
# Default processing flags for sphinx
autoclass_content = "class"
autodoc_member_order = "bysource"
autodoc_default_options = {
"members": True,
"undoc-members": True,
"show-inheritance": True,
}
sphinx_requirements = "extra-intersphinx.txt"
if os.path.exists(sphinx_requirements):
intersphinx_mapping = link_documentation(
additional_packages=["python", "numpy"]
+ load_requirements(sphinx_requirements)
)
else:
intersphinx_mapping = link_documentation()
def setup(app):
# Add `>>>` button to toggle visibility of prompts in code blocks.
# see https://github.com/readthedocs/sphinx_rtd_theme/issues/167 and
# https://raw.githubusercontent.com/python/python-docs-theme/master/python_docs_theme/static/copybutton.js
app.add_js_file("copybutton.js")
| 31.040486
| 110
| 0.718925
|
52cef4c0e1c1086e90a051cf9971738bcaaeb805
| 321
|
py
|
Python
|
plaso/engine/logger.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 1,253
|
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
plaso/engine/logger.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 3,388
|
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
plaso/engine/logger.py
|
pyllyukko/plaso
|
7533db2d1035ca71d264d6281ebd5db2d073c587
|
[
"Apache-2.0"
] | 376
|
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
# -*- coding: utf-8 -*-
"""The engine sub module logger."""
import logging
_logger = logging.getLogger('engine')
# Mimic the logging module interface.
critical = _logger.critical
debug = _logger.debug
error = _logger.error
exception = _logger.exception
info = _logger.info
log = _logger.log
warning = _logger.warning
| 18.882353
| 37
| 0.738318
|
62dc48021c7971c56b9f0f65a4bfbbc815d0315c
| 28,335
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/_images_operations.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/_images_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/operations/_images_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
resource_group_name: str,
image_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"imageName": _SERIALIZER.url("image_name", image_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
resource_group_name: str,
image_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"imageName": _SERIALIZER.url("image_name", image_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
image_name: str,
subscription_id: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"imageName": _SERIALIZER.url("image_name", image_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if expand is not None:
_query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class ImagesOperations(object):
"""ImagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2017_03_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.Image",
**kwargs: Any
) -> "_models.Image":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Image')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Image', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
image_name: str,
parameters: "_models.Image",
**kwargs: Any
) -> LROPoller["_models.Image"]:
"""Create or update an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param parameters: Parameters supplied to the Create Image operation.
:type parameters: ~azure.mgmt.compute.v2017_03_30.models.Image
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2017_03_30.models.Image]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
image_name=image_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
image_name: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
request = build_delete_request_initial(
resource_group_name=resource_group_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
image_name: str,
**kwargs: Any
) -> LROPoller["_models.OperationStatusResponse"]:
"""Deletes an Image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either OperationStatusResponse or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
image_name=image_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
image_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Image":
"""Gets an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_03_30.models.Image
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
request = build_get_request(
resource_group_name=resource_group_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Image', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.ImageListResult"]:
"""Gets the list of images under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ImageListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2017_03_30.models.ImageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ImageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images"} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.ImageListResult"]:
"""Gets the list of Images in the subscription. Use nextLink property in the response to get the
next page of Images. Do this till nextLink is null to fetch all the Images.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ImageListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2017_03_30.models.ImageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2017-03-30") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ImageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images"} # type: ignore
| 41.124819
| 187
| 0.654773
|
88d3a987205b05336482703559d27efc2269b815
| 6,387
|
py
|
Python
|
sentry_sdk/integrations/tornado.py
|
Siecje/sentry-python
|
d8405491c60c5b7c3d2ec3ed97ab4bea104f4e51
|
[
"BSD-2-Clause"
] | 1
|
2019-04-15T03:36:19.000Z
|
2019-04-15T03:36:19.000Z
|
sentry_sdk/integrations/tornado.py
|
Siecje/sentry-python
|
d8405491c60c5b7c3d2ec3ed97ab4bea104f4e51
|
[
"BSD-2-Clause"
] | null | null | null |
sentry_sdk/integrations/tornado.py
|
Siecje/sentry-python
|
d8405491c60c5b7c3d2ec3ed97ab4bea104f4e51
|
[
"BSD-2-Clause"
] | null | null | null |
import weakref
from inspect import iscoroutinefunction
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.utils import (
HAS_REAL_CONTEXTVARS,
event_from_exception,
capture_internal_exceptions,
transaction_from_function,
)
from sentry_sdk.integrations import Integration
from sentry_sdk.integrations._wsgi_common import (
RequestExtractor,
_filter_headers,
_is_json_content_type,
)
from sentry_sdk.integrations.logging import ignore_logger
from tornado.web import RequestHandler, HTTPError # type: ignore
from tornado.gen import coroutine # type: ignore
if False:
from typing import Any
from typing import List
from typing import Optional
from typing import Dict
from typing import Callable
class TornadoIntegration(Integration):
identifier = "tornado"
@staticmethod
def setup_once():
# type: () -> None
import tornado # type: ignore
tornado_version = getattr(tornado, "version_info", None)
if tornado_version is None or tornado_version < (5, 0):
raise RuntimeError("Tornado 5+ required")
if not HAS_REAL_CONTEXTVARS:
# Tornado is async. We better have contextvars or we're going to leak
# state between requests.
raise RuntimeError(
"The tornado integration for Sentry requires Python 3.6+ or the aiocontextvars package"
)
ignore_logger("tornado.application")
ignore_logger("tornado.access")
old_execute = RequestHandler._execute
awaitable = iscoroutinefunction(old_execute)
if awaitable:
# Starting Tornado 6 RequestHandler._execute method is a standard Python coroutine (async/await)
# In that case our method should be a coroutine function too
async def sentry_execute_request_handler(self, *args, **kwargs):
# type: (Any, *List, **Any) -> Any
hub = Hub.current
integration = hub.get_integration(TornadoIntegration)
if integration is None:
return await old_execute(self, *args, **kwargs)
weak_handler = weakref.ref(self)
with Hub(hub) as hub:
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
scope.add_event_processor(_make_event_processor(weak_handler))
return await old_execute(self, *args, **kwargs)
else:
@coroutine # type: ignore
def sentry_execute_request_handler(self, *args, **kwargs):
hub = Hub.current
integration = hub.get_integration(TornadoIntegration)
if integration is None:
return old_execute(self, *args, **kwargs)
weak_handler = weakref.ref(self)
with Hub(hub) as hub:
with hub.configure_scope() as scope:
scope.add_event_processor(_make_event_processor(weak_handler))
result = yield from old_execute(self, *args, **kwargs)
return result
RequestHandler._execute = sentry_execute_request_handler
old_log_exception = RequestHandler.log_exception
def sentry_log_exception(self, ty, value, tb, *args, **kwargs):
# type: (Any, type, BaseException, Any, *Any, **Any) -> Optional[Any]
_capture_exception(ty, value, tb)
return old_log_exception(self, ty, value, tb, *args, **kwargs)
RequestHandler.log_exception = sentry_log_exception
def _capture_exception(ty, value, tb):
# type: (type, BaseException, Any) -> None
hub = Hub.current
if hub.get_integration(TornadoIntegration) is None:
return
if isinstance(value, HTTPError):
return
event, hint = event_from_exception(
(ty, value, tb),
client_options=hub.client.options,
mechanism={"type": "tornado", "handled": False},
)
hub.capture_event(event, hint=hint)
def _make_event_processor(weak_handler):
# type: (Callable[[], RequestHandler]) -> Callable
def tornado_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
handler = weak_handler()
if handler is None:
return event
request = handler.request
with capture_internal_exceptions():
method = getattr(handler, handler.request.method.lower())
event["transaction"] = transaction_from_function(method)
with capture_internal_exceptions():
extractor = TornadoRequestExtractor(request)
extractor.extract_into_event(event)
request_info = event["request"]
request_info["url"] = "%s://%s%s" % (
request.protocol,
request.host,
request.path,
)
request_info["query_string"] = request.query
request_info["method"] = request.method
request_info["env"] = {"REMOTE_ADDR": request.remote_ip}
request_info["headers"] = _filter_headers(dict(request.headers))
with capture_internal_exceptions():
if handler.current_user and _should_send_default_pii():
event.setdefault("user", {})["is_authenticated"] = True
return event
return tornado_processor
class TornadoRequestExtractor(RequestExtractor):
def content_length(self):
# type: () -> int
if self.request.body is None:
return 0
return len(self.request.body)
def cookies(self):
# type: () -> Dict
return {k: v.value for k, v in self.request.cookies.items()}
def raw_data(self):
# type: () -> bytes
return self.request.body
def form(self):
# type: () -> Optional[Any]
return {
k: [v.decode("latin1", "replace") for v in vs]
for k, vs in self.request.body_arguments.items()
}
def is_json(self):
# type: () -> bool
return _is_json_content_type(self.request.headers.get("content-type"))
def files(self):
# type: () -> Dict
return {k: v[0] for k, v in self.request.files.items() if v}
def size_of_file(self, file):
return len(file.body or ())
| 33.265625
| 108
| 0.617817
|
1b19a171614b14406a92a8b5831462d0ee184820
| 688
|
py
|
Python
|
OpenCV/Q2.py
|
fun-math/Autumn-of-Automation
|
08c04510f3500ac335f5c830ce3fbabb9c3fa05c
|
[
"MIT"
] | null | null | null |
OpenCV/Q2.py
|
fun-math/Autumn-of-Automation
|
08c04510f3500ac335f5c830ce3fbabb9c3fa05c
|
[
"MIT"
] | null | null | null |
OpenCV/Q2.py
|
fun-math/Autumn-of-Automation
|
08c04510f3500ac335f5c830ce3fbabb9c3fa05c
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import random
img=cv2.imread("T.jpg",1)
rows,cols,ch=img.shape
M=np.float32([[1,0,0],[0,1,0]])
for i in range(8):
x=random.randrange(40,80)
y=random.randrange(40,80)
sgnx=random.randrange(-1,2,2)
sgny=random.randrange(-1,2,2)
theta=random.randrange(0,360)
M[0,2]=sgnx*x
M[1,2]=sgny*y
M_rot=cv2.getRotationMatrix2D((166,220),theta,1)
img_new=cv2.warpAffine(img,M_rot,(cols,rows))
img_new=cv2.warpAffine(img_new,M,(cols,rows))
cv2.imshow(f"frame{i}",img_new)
img_blur1=cv2.GaussianBlur(img,(5,5),0)
cv2.imshow("frame8",img_blur1)
img_blur2=cv2.bilateralFilter(img,9,75,75)
cv2.imshow("frame9",img_blur2)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 22.933333
| 49
| 0.728198
|
44fbfa2265f71f2f461f19fc0a07bc85c90a4609
| 1,643
|
py
|
Python
|
heap/k_largest_elements_immutable_max_heap.py
|
greyshell/ds_algorithm
|
6d61b56b5c91b8159b0705d1eb09718cc66b14f5
|
[
"MIT"
] | 18
|
2020-04-09T02:53:55.000Z
|
2022-02-23T19:12:08.000Z
|
heap/k_largest_elements_immutable_max_heap.py
|
greyshell/ds_algorithm
|
6d61b56b5c91b8159b0705d1eb09718cc66b14f5
|
[
"MIT"
] | 1
|
2020-06-22T00:35:30.000Z
|
2020-06-27T18:09:42.000Z
|
heap/k_largest_elements_immutable_max_heap.py
|
greyshell/ds_algorithm
|
6d61b56b5c91b8159b0705d1eb09718cc66b14f5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# author: greyshell
# description: find k largest elements from a immutable max heap
from snowowl import Heap, HeapType
class Node:
def __init__(self, value, index):
self.value = value
self.index = index
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
def get_k_largest_elements_immutable_max_heap(immutable_max_heap: Heap, k: int) -> list:
"""
time complexity: O(k * log k)
space complexity: O(k) -> auxiliary max heap
"""
# create an auxiliary max heap
auxiliary_max_heap = Heap([], heap_type=HeapType.MAX)
# peek the min item from the immutable max heap
# create a node obj with the value and index and push that object into auxiliary max heap
node = Node(immutable_max_heap.peek(), 0)
auxiliary_max_heap.insert(node)
result = list()
for i in range(0, k):
node = auxiliary_max_heap.remove()
result.append(node.value)
index = node.index
left_child_index = 2 * index + 1
if left_child_index < len(immutable_max_heap):
left_child = immutable_max_heap.__getitem__(left_child_index)
left_node = Node(left_child, left_child_index)
auxiliary_max_heap.insert(left_node)
right_child_index = 2 * index + 2
if right_child_index < len(immutable_max_heap):
right_child = immutable_max_heap.__getitem__(right_child_index)
right_node = Node(right_child, right_child_index)
auxiliary_max_heap.insert(right_node)
return result
| 29.339286
| 93
| 0.671942
|
3139657f501d1cb7dfb0c6da88355c321c47f1b8
| 335
|
py
|
Python
|
koku/api/settings/default_settings.py
|
rubik-ai/koku
|
3255d1c217b7b6685cb2e130bf4e025946e76fac
|
[
"Apache-2.0"
] | 157
|
2018-04-30T16:27:53.000Z
|
2022-03-31T08:17:21.000Z
|
koku/api/settings/default_settings.py
|
rubik-ai/koku
|
3255d1c217b7b6685cb2e130bf4e025946e76fac
|
[
"Apache-2.0"
] | 3,250
|
2018-04-26T14:14:25.000Z
|
2022-03-31T23:49:15.000Z
|
koku/api/settings/default_settings.py
|
rubik-ai/koku
|
3255d1c217b7b6685cb2e130bf4e025946e76fac
|
[
"Apache-2.0"
] | 65
|
2018-05-10T14:11:50.000Z
|
2022-03-18T19:22:58.000Z
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Set of default settings for the user_settings table jsonfield"""
from koku.settings import KOKU_DEFAULT_COST_TYPE
from koku.settings import KOKU_DEFAULT_CURRENCY
DEFAULT_USER_SETTINGS = {"currency": KOKU_DEFAULT_CURRENCY, "cost_type": KOKU_DEFAULT_COST_TYPE}
| 33.5
| 96
| 0.81194
|
0718f4ea3eefec9642fd08e5fd4d109f052de471
| 872
|
py
|
Python
|
mockup page/plot.py
|
pohldavid/weather
|
880760a6840bfb2bca909e9ae3f06159107dba15
|
[
"CC0-1.0"
] | null | null | null |
mockup page/plot.py
|
pohldavid/weather
|
880760a6840bfb2bca909e9ae3f06159107dba15
|
[
"CC0-1.0"
] | null | null | null |
mockup page/plot.py
|
pohldavid/weather
|
880760a6840bfb2bca909e9ae3f06159107dba15
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import csv
def make_X_Axis_Label():
x_label = []
for h in range(8):
x_label.append(str(h)+"AM")
return x_label
# x = ['Maths', 'Physics', 'Chemistry']
y1=[]
y2=[]
y3=[]
with open('data.html','r') as csvfile:
plot = csv.reader(csvfile, delimiter = '\t')
for row in plot:
y1.append(row[2])
y2.append(row[3])
y3.append(row[4])
print(y1)
x = make_X_Axis_Label()
# y1 = [95, 88, 45, 65, 87, 90, 46, 75]
plt.plot(x, y1, label="Temperature \u00b0F")
#y2 = [67, 45, 56, 55, 45, 56, 60, 62]
plt.plot(x, y2, label="Humidity %")
#y3 = [28.87, 29.92, 29.90, 29.91, 29.92, 29.92, 29.90, 28.90]
plt.plot(x, y3, label="Pressure inHg")
#plt.xlabel('Time')
#plt.ylabel('Value')
plt.title('BME_280 Humidity, Temperature, Pressure')
plt.legend()
plt.show()
| 17.44
| 62
| 0.59289
|
f0054ebfdbdcc3a45b0744f9a61f3289a685b6ef
| 181
|
py
|
Python
|
__Training__/Python - HackerRank/2. Basic Data Types/Tuples.py
|
JUD210/Study-Note
|
2add9db3f11d99370f49878f0c19e9caa60d2d02
|
[
"MIT"
] | null | null | null |
__Training__/Python - HackerRank/2. Basic Data Types/Tuples.py
|
JUD210/Study-Note
|
2add9db3f11d99370f49878f0c19e9caa60d2d02
|
[
"MIT"
] | null | null | null |
__Training__/Python - HackerRank/2. Basic Data Types/Tuples.py
|
JUD210/Study-Note
|
2add9db3f11d99370f49878f0c19e9caa60d2d02
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/python-tuples/problem
num = int(input())
# 2
int_list = tuple(map(int, input().split()))
# 1 2
print(hash(int_list))
# 3713081631934410656
| 18.1
| 61
| 0.712707
|
88c68474d8821a923ee6073ff6b56471033a1a8f
| 862
|
py
|
Python
|
setup.py
|
smithblack-0/pygenetic
|
185e6b6f1a97e748094610cdf6557607024b4c8e
|
[
"MIT"
] | 2
|
2020-05-30T05:13:37.000Z
|
2021-03-15T19:54:28.000Z
|
setup.py
|
smithblack-0/pygenetic
|
185e6b6f1a97e748094610cdf6557607024b4c8e
|
[
"MIT"
] | 1
|
2021-06-19T20:30:25.000Z
|
2021-06-19T20:30:25.000Z
|
setup.py
|
smithblack-0/pygenetic
|
185e6b6f1a97e748094610cdf6557607024b4c8e
|
[
"MIT"
] | 2
|
2020-08-02T20:52:50.000Z
|
2021-02-07T15:52:15.000Z
|
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="pygenetic",
version="1.0.2",
author="Bharatraj S Telkar, Daniel Isaac, Shreyas V Patil",
author_email="telkarraj@gmail.com, danielbcbs2@gmail.com, pshreyasv100@gmail.com",
description="An Efficient Python Genetic Algorithm API",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/danny311296/pygenetic",
packages=['pygenetic'],
include_package_data=True,
license='MIT',
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
install_requires=[
'rstr==2.2.6',
'numpy==1.15.4',
'matplotlib==2.2.2',
],
)
| 30.785714
| 86
| 0.643852
|
06223891018246fe555c65b4ecd0acbf933cfc50
| 130
|
py
|
Python
|
remote-scripts/stop-server.py
|
stevecui/azure-linux-automation-1
|
ce5479f8ad229fb176c84b4ab924e78d30090e56
|
[
"Apache-2.0"
] | 59
|
2015-01-21T17:04:14.000Z
|
2022-03-05T19:51:15.000Z
|
remote-scripts/stop-server.py
|
stevecui/azure-linux-automation-1
|
ce5479f8ad229fb176c84b4ab924e78d30090e56
|
[
"Apache-2.0"
] | 24
|
2015-03-04T07:46:45.000Z
|
2018-10-11T07:32:55.000Z
|
remote-scripts/stop-server.py
|
stevecui/azure-linux-automation-1
|
ce5479f8ad229fb176c84b4ab924e78d30090e56
|
[
"Apache-2.0"
] | 151
|
2015-01-16T06:51:57.000Z
|
2021-06-08T19:00:52.000Z
|
#!/usr/bin/python
import subprocess
import logging
import string
import os
from azuremodules import *
global op
StopServer()
| 9.285714
| 26
| 0.776923
|
9a9223f8b1c7a88fe34c6ed2682b977c467f785b
| 9,303
|
py
|
Python
|
agent/vm_manager/vm_manager.py
|
fortinet/ips-bph-framework
|
145e14cced2181f388ade07d78b4f0e9452143dd
|
[
"Apache-2.0"
] | 21
|
2019-10-24T04:59:52.000Z
|
2021-05-11T12:47:17.000Z
|
agent/vm_manager/vm_manager.py
|
fortinet/ips-bph-framework
|
145e14cced2181f388ade07d78b4f0e9452143dd
|
[
"Apache-2.0"
] | null | null | null |
agent/vm_manager/vm_manager.py
|
fortinet/ips-bph-framework
|
145e14cced2181f388ade07d78b4f0e9452143dd
|
[
"Apache-2.0"
] | 9
|
2019-10-26T16:56:08.000Z
|
2021-03-15T14:10:21.000Z
|
import socket
import sys
import re
import subprocess
import time
BPH_NAME = "[BLACKPHENIX]"
def show_banner():
banner = \
"""
-=[B L A C K P H E N I X]=-
by Chris Navarrete @ FortiGuard Labs
[VirtualMachine Server Manager]
"""
print(banner)
class VBoxManager:
def __init__(self):
self.vm_manager = "C:\\Progra~1\\Oracle\\VirtualBox\\VBoxManage.exe"
def run(self, vm_data, check_output=False):
try:
if check_output:
print("{} | Checking Output".format(BPH_NAME))
return subprocess.check_output(vm_data)
else:
print("{} | Not-Checking Output".format(BPH_NAME))
subprocess.check_call(vm_data)
except subprocess.CalledProcessError:
return False
else:
return True
class VBoxControl(VBoxManager):
def __init__(self):
super().__init__()
self.nic_numbers = []
self.vm_running = False
def __vm_cmd_output(self, cmd):
try:
output_data = self.run(cmd, check_output=True).decode('ascii').split('\n')
if "not find a registered machine" in output_data:
print("{} | Wrong machine name".format(BPH_NAME))
except AttributeError:
print("{} | Error in the user input".format(BPH_NAME))
else:
return output_data
def __nic_counter(self, vm_id):
cmd = [self.vm_manager, "showvminfo", vm_id]
for data in self.__vm_cmd_output(cmd):
if re.match(r'NIC\s\d+:\s+MAC', data):
nic_found = re.search(r'(\d+)', data).group()
print("{} | >> NIC Found: {}".format(BPH_NAME, nic_found))
if nic_found not in self.nic_numbers:
self.nic_numbers.append(nic_found)
print(self.nic_numbers)
def __is_vm_running(self, vm_id):
print("{} | Searching for running VMs".format(BPH_NAME))
status = None
cmd = [self.vm_manager, "showvminfo", vm_id]
for data in self.__vm_cmd_output(cmd):
if "State: " in data:
status = list([ status for status in data.split(' ') if len(status) != 0 ])[1]
print("{} | Status Detected: {}".format(BPH_NAME, status))
if status is not None:
if status == "restoring":
print("{} | Restoring state detected. Waiting for a status change to avoid VM start-up problems...".format(BPH_NAME))
time.sleep(5)
self.__is_vm_running(vm_id)
# State: saved (since 2019-07-20T19:40:32.000000000)
# State: restoring snapshot (since 2019-07-20T19:40:32.613000000)
if status == "saved" or status == "running":
print("{} | VM-ID:({}) Found".format(BPH_NAME, vm_id))
self.vm_running = True
return True
print("{} | VM-ID:({}) Not Found".format(BPH_NAME, vm_id))
self.vm_running = False
return False
def set_network(self, vm_data):
print("{} | Setting up Network connection for the VM".format(BPH_NAME))
# Here, the network connection selected by the user will be activated.
if self.__is_vm_running(vm_data['vm_id']):
self.__nic_counter(vm_data['vm_id'])
if len(self.nic_numbers) != 0:
for nic_found in self.nic_numbers:
# If nic is not the user's selected, then disable the rest.
if vm_data['network_id'] != nic_found:
cmd = [ self.vm_manager, "controlvm", vm_data['vm_id'],
"setlinkstate{}".format(nic_found), "off" ]
print(cmd)
if self.run(cmd):
print("{} | Deactivation of unused network interface was OK".format(BPH_NAME))
else:
print("{} | Error when deactivating unused network interfaces".format(BPH_NAME))
# At this point all the network interfaces not-selected by the user
# were turning off. Here the right one will be enabled.
cmd = [ self.vm_manager, "controlvm", vm_data['vm_id'],
"setlinkstate{}".format(vm_data['network_id']), "on" ]
print(cmd)
if self.run(cmd):
print("{} | Network was set correctly".format(BPH_NAME))
else:
print("{} | Network was not set".format(BPH_NAME))
def start(self, vm_data):
print("{} | Starting VM".format(BPH_NAME))
cmd = [self.vm_manager, "startvm", vm_data['vm_id'], "--type", "gui"]
print(cmd)
if self.__is_vm_running(vm_data['vm_id']):
# If VM is running, stop and restore.
self.stop(vm_data)
# Then restore and run.
self.restore(vm_data)
if self.run(cmd):
print("{} | VM started correctly".format(BPH_NAME))
self.set_network(vm_data)
return True
return False
def stop(self, vm_data):
print("{} | Stopping VM".format(BPH_NAME))
cmd = [self.vm_manager, "controlvm", vm_data['vm_id'], "poweroff"]
print(cmd)
if self.__is_vm_running(vm_data['vm_id']):
# If VM is running, stop it.
if self.run(cmd):
print("{} | VM stopped correctly".format(BPH_NAME))
return True
return False
def restore(self, vm_data):
print("{} | Restoring VM".format(BPH_NAME))
cmd = [self.vm_manager, "snapshot", vm_data['vm_id'], "restore", vm_data['snapshot_id']]
print(cmd)
if self.__is_vm_running(vm_data['vm_id']):
# If VM is running, stop and restore.
self.stop(vm_data)
if not self.vm_running:
time.sleep(5)
if self.run(cmd):
print("{} | VM restoration OK".format(BPH_NAME))
return True
return False
def main():
show_banner()
print("{} | Starting VM Control server...".format(BPH_NAME))
s = socket.socket()
host = sys.argv[1]
port = int(sys.argv[2])
s.bind((host, port))
s.listen(1)
vbox = VBoxControl()
while True:
print("{} | Accepting connections".format(BPH_NAME))
try:
client_socket, addr = s.accept()
except KeyboardInterrupt:
sys.exit()
else:
print('Receiving connection from:', addr)
while True:
print("{} | Waiting for data...".format(BPH_NAME))
data = client_socket.recv(512).decode('ascii')
if data:
if re.match(r'restart|restore|start|stop', data):
print("{} | VM Command received: {}".format(BPH_NAME, data))
data = data.strip().split('|')
vm_data = {}
if len(data) == 4:
print("{} | OK".format(BPH_NAME))
vm_data = {}
vm_data['cmd'] = data[0]
vm_data['vm_id'] = data[1]
vm_data['snapshot_id'] = data[2]
vm_data['network_id'] = data[3]
print(vm_data)
if vm_data['cmd'] == "start":
if vbox.start(vm_data):
client_socket.send(b'OK\n')
else:
client_socket.send(b'ERROR\n')
elif vm_data['cmd'] == "stop":
if vbox.stop(vm_data):
client_socket.send(b'OK\n')
else:
client_socket.send(b'ERROR\n')
else:
print("{} | Unknown command: {}".format(BPH_NAME, data))
else:
break
if __name__ == '__main__':
main()
| 37.361446
| 138
| 0.448565
|
54e04bb61b341ef9d3dad6089e5d30d4cc2e35ea
| 1,820
|
py
|
Python
|
voorbeelden/hardware/adc/example_arduino.py
|
ddland/TIS-TN-python
|
d1f7d864c09f0af907697e5d81d66a24c08814ad
|
[
"MIT"
] | 3
|
2019-05-19T14:52:43.000Z
|
2020-09-24T07:54:29.000Z
|
voorbeelden/hardware/adc/example_arduino.py
|
ddland/TIS-TN-python
|
d1f7d864c09f0af907697e5d81d66a24c08814ad
|
[
"MIT"
] | 1
|
2017-03-31T07:18:02.000Z
|
2017-05-03T20:21:20.000Z
|
voorbeelden/hardware/adc/example_arduino.py
|
ddland/TIS-TN-python
|
d1f7d864c09f0af907697e5d81d66a24c08814ad
|
[
"MIT"
] | 4
|
2017-01-31T10:12:49.000Z
|
2021-11-18T07:47:16.000Z
|
import serial
from TN_code.hardware import get_data
from TN_code.hardware import write_data
ser = serial.Serial('/dev/ttyACM0', 9600) # arduino
"""
AnalogReadSerial.ino -> 1 datapunt
/*
AnalogReadSerial
Reads an analog input on pin 0, prints the result to the serial monitor.
Attach the center pin of a potentiometer to pin A0, and the outside pins
to +5V and ground.
This example code is in the public domain.
*/
// the setup routine runs once when you press reset:
void setup() {
// initialize serial communication at 9600 bits per second:
Serial.begin(9600);
}
// the loop routine runs over and over again forever:
void loop() {
// read the input on analog pin 0:
int sensorValue = analogRead(A0);
// print out the value you read:
Serial.println(sensorValue);
delay(1); // delay in between reads for stability
}
"""
# data = get_data.readArduino(ser)
"""
AnalogReadSerial.ino -> 2 datapunten
/*
AnalogReadSerial
Reads an analog input on pin 0, prints the result to the serial monitor.
Attach the center pin of a potentiometer to pin A0, and the outside pins
to +5V and ground.
This example code is in the public domain.
*/
String semicolumn, values2;
// the setup routine runs once when you press reset:
void setup() {
// initialize serial communication at 9600 bits per second:
Serial.begin(9600);
semicolumn = ";";
}
// the loop routine runs over and over again forever:
void loop() {
// read the input on analog pin 0:
int sensorValue1 = analogRead(A0);
int sensorValue2 = analogRead(A1);
// print out the value you read:
values2 = sensorValue1 + semicolumn;
values2 = values2 + sensorValue2;
Serial.println(values2);
delay(1); // delay in between reads for stability
}
"""
data = get_data.readArduino(ser, Ndata=2)
print(data)
| 24.931507
| 74
| 0.713736
|
b736d096ff0493d96a7dedcc3d8df9a646bf11f3
| 1,733
|
py
|
Python
|
utils/transforms.py
|
AndreRoelofs/Random-Erasing
|
2dd4c1ac82d27423fc16b450c8ea07a55cff7b9d
|
[
"Apache-2.0"
] | 650
|
2017-09-15T09:01:45.000Z
|
2022-03-22T08:22:54.000Z
|
utils/transforms.py
|
AndreRoelofs/Random-Erasing
|
2dd4c1ac82d27423fc16b450c8ea07a55cff7b9d
|
[
"Apache-2.0"
] | 18
|
2017-09-23T15:25:11.000Z
|
2022-03-09T13:23:00.000Z
|
utils/transforms.py
|
AndreRoelofs/Random-Erasing
|
2dd4c1ac82d27423fc16b450c8ea07a55cff7b9d
|
[
"Apache-2.0"
] | 160
|
2017-10-19T08:22:53.000Z
|
2022-03-25T07:00:32.000Z
|
from __future__ import absolute_import
from torchvision.transforms import *
import numpy as np
import torch
class RandomErasing(object):
def __init__(self, EPSILON = 0.5, sl = 0.02, sh = 0.4, r1 = 0.3, mean=[0.4914, 0.4822, 0.4465]):
self.EPSILON = EPSILON
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) > self.EPSILON:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1/self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
#img[0, x1:x1+h, y1:y1+w] = random.uniform(0, 1)
#img[1, x1:x1+h, y1:y1+w] = random.uniform(0, 1)
#img[2, x1:x1+h, y1:y1+w] = random.uniform(0, 1)
img[0, x1:x1+h, y1:y1+w] = self.mean[0]
img[1, x1:x1+h, y1:y1+w] = self.mean[1]
img[2, x1:x1+h, y1:y1+w] = self.mean[2]
#img[:, x1:x1+h, y1:y1+w] = torch.from_numpy(np.random.rand(3, h, w))
else:
img[0, x1:x1+h, y1:y1+w] = self.mean[1]
# img[0, x1:x1+h, y1:y1+w] = torch.from_numpy(np.random.rand(1, h, w))
return img
return img
| 36.104167
| 100
| 0.491056
|
a2684607f62942c572a3f726e3f07004430e0d84
| 9,097
|
py
|
Python
|
EnterpriseAPT29Eval.py
|
0xF2EDCA5A/EnterpriseAPT29Eval
|
425a62ca34a9baba8283a97a4124cb79d0e662e3
|
[
"MIT"
] | null | null | null |
EnterpriseAPT29Eval.py
|
0xF2EDCA5A/EnterpriseAPT29Eval
|
425a62ca34a9baba8283a97a4124cb79d0e662e3
|
[
"MIT"
] | null | null | null |
EnterpriseAPT29Eval.py
|
0xF2EDCA5A/EnterpriseAPT29Eval
|
425a62ca34a9baba8283a97a4124cb79d0e662e3
|
[
"MIT"
] | null | null | null |
from enum import Enum
import pandas as pd
import json
import glob
import os
class EnterpriseAPT29Eval():
def __init__(self, filename):
self._vendor = filename.split('/', 2)[2]
self._vendor = self._vendor.split('.', 1)[0]
with open(filename, 'r') as infile:
data=infile.read()
self._obj = json.loads(data)['Techniques']
self._df = pd.json_normalize(self._obj,'Steps', ['TechniqueId','TechniqueName', 'Tactics'])
self._steps = None
self._dfir = None
self._mssp = None
self._scores = None
self._visibility = None
self._correlated = None
self._actionability = None
self._alerts = None
self._alerts_correlated = None
self._uncorrelated_alert_steps = None
# sort and reindex dataframe by substep
def sortSubSteps(self, cleanup=False):
ver = self._df['SubStep'].str.split('.', expand=True)
self._df['Major'] = ver[0].astype(int)
self._df['Alpha'] = ver[1]
self._df['Minor'] = ver[2].astype(int)
self._df.sort_values(by=['Major','Alpha','Minor'], inplace=True)
self._df.reset_index(drop=True, inplace=True)
if cleanup:
self._df.drop(columns=['Major', 'Alpha', 'Minor'], inplace=True)
# flatten Tactics json, throwing away id's since not sequential anyway
def flattenTactics(self, inplace=False):
self._df['Tactics' if inplace else 'Tactic'] = self._df['Tactics'].apply(lambda x: x[0]['TacticName'] if len(x)==1 else x[0]['TacticName'] + ', ' + x[1]['TacticName'])
# row level operations when flattening detections
def _flattenDetections(self, detections, confchange=False):
ret, mods, mssp = 'None', [], False
dt = Enum('DetectionTypes', 'None Telemetry General Tactic Technique')
for detection in detections:
# check if we're allowing conf change and there is one
if not confchange:
ischange = False
for modifier in detection['Modifiers']:
if modifier.startswith('Configuration Change'):
ischange = True
if ischange:
continue
if detection['DetectionType'] == 'N/A':
ret = detection['DetectionType']
mods = detection['Modifiers']
break
if detection['DetectionType'] == 'MSSP':
mssp = True
elif dt[ret].value < dt[detection['DetectionType']].value:
ret = detection['DetectionType']
mods = detection['Modifiers']
return pd.Series([ret, sorted(mods), mssp])
def flattenDetections(self, inplace=False, confchange=False):
detections = self._df['Detections'].apply(lambda x: self._flattenDetections(x, confchange))
self._df['Detections' if inplace else 'Detection'] = detections[0]
self._df['Modifiers'] = detections[1]
self._df['MSSP'] = detections[2]
def get_steps(self):
if self._steps == None:
self.flattenDetections(confchange=True)
removed = pd.value_counts(self._df['Detection'].values)['N/A']
self._steps = len(self._df.index) - removed
return self._steps
steps = property(get_steps)
# This attempts to calculate the max visibility the product enables
# when configured to see/detect everything as may be adventagous for
# a digital forensics professional performing an incident response.
def score_dfir(self):
if self._steps == None:
self.get_steps()
if self._dfir == None:
misses = pd.value_counts(self._df['Detection'].values)['None']
self._dfir = self._steps - misses
def get_dfir(self):
if self._dfir == None:
self.score_dfir()
return self._dfir
dfir = property(get_dfir)
# This is a straight count of the number of MSSP detections reported
# by MITRE during the evaluation. This scoring was done under the
# DFIR configuration during the eval and must be compared to that.
def score_mssp(self):
if self._dfir == None:
self.score_dfir()
if self._mssp == None:
if True in self._df['MSSP'].values:
self._mssp = pd.value_counts(self._df['MSSP'].values)[True]
else:
self._mssp = 0
def get_mssp(self):
if self._mssp == None:
self.score_mssp()
return self._mssp
mssp = property(get_mssp)
def score_detections(self):
self.sortSubSteps()
if self._visibility == None:
self.flattenDetections(confchange=False)
misses = pd.value_counts(self._df['Detection'].values)['None']
self._visibility = self._steps - misses
if self._correlated == None:
self._correlated = 0
self._alerts = 0
self._alerts_correlated = 0
self._uncorrelated_alert_steps = 0
self._techniques = 0
arr = []
for index, row in self._df.iterrows():
if 'Correlated' in row['Modifiers']:
self._correlated += 1
if 'Alert' in row['Modifiers']:
self._alerts += 1
if 'Correlated' in row['Modifiers']:
self._alerts_correlated += 1
elif row['Major'] not in arr:
self._uncorrelated_alert_steps += 1
arr.append(row['Major'])
if row['Detection'] == 'Technique':
self._techniques += 1
if self._actionability == None:
self._efficiency = 1 - (self._alerts/self._steps)
if self._alerts > 0:
self._quality = (self._alerts_correlated + self._uncorrelated_alert_steps + self._techniques)/(2 * self._alerts)
else:
self._quality = 0
self._actionability = self._efficiency * self._quality
if self._scores == None:
self._scores = {'vendor' : self._vendor, \
'alerts' : self._alerts, \
'visibility' : self._visibility/self._steps, \
'correlation' : self._correlated/self._visibility, \
'efficiency' : self._efficiency, \
'quality' : self._quality, \
'actionability': self._actionability}
def get_scores(self):
if self._scores == None:
self.score_detections()
return self._scores
scores = property(get_scores)
def get_actionability(self):
if self._actionability == None:
self.score_detections()
return self._actionability
actionability = property(get_actionability)
def get_efficiency(self):
if self._efficiency == None:
self.score_detections()
return self._efficiency
efficiency = property(get_efficiency)
def get_quality(self):
if self._quality == None:
self.score_detections()
return self._quality
quality = property(get_quality)
def get_visibility(self):
if self._visibility == None:
self.score_detections()
return self._visibility
visibility = property(get_visibility)
def get_correlated(self):
if self._correlated == None:
self.score_detections()
return self._correlated
correlated = property(get_correlated)
def get_vendor(self):
return self._vendor
vendor = property(get_vendor)
def get_alerts(self):
if self._alerts == None:
self.score_detections()
return self._alerts
alerts = property(get_alerts)
def get_dataframe(self):
return self._df
df = property(get_dataframe)
def readout(results):
print(f'{results.vendor}\n---------------------------')
if results.mssp > 0:
print(f'The MSSP service was able to detect {results.mssp} of the {results.dfir} events the product was able')
print(f'to detect under a dfir configuration, for an efficacy of {(results.mssp * 100)/results.dfir :.2f}%')
else:
print(f'The vendor doesn\'t appear to have been leveraging an MSSP service. It should')
print(f'still be noted that a dfir configuration identified {results.dfir} events.')
print(f'\nThe product provided visibility out of the box for {results.visibility} of {results.steps} steps, for an')
print(f'efficacy of {(results.visibility * 100)/results.steps :.2f}%')
print(f'\nThe product was able to correlate {results.correlated} of the {results.visibility} events it had visibility into')
print(f'out of the box, for an efficacy of {(results.correlated * 100)/results.visibility :.2f}%\n')
if results.alerts > 0:
print(f'The product generated {results.alerts} distinct alerts for an efficiency of {results.efficiency * 100 :.2f}%, with an')
print(f'alert quality of {results.quality * 100:.2f}%, for an overall alert actionability metric of {results.quality * results.efficiency * 100 :.2f}%\n')
else:
print(f'The product was unable to generate any alerts.\n')
def write_xlsx(dfs, columns=['SubStep', 'Procedure', 'Tactic', 'TechniqueId', 'TechniqueName', 'Detection', 'Modifiers', 'MSSP']):
writer = pd.ExcelWriter(f'apt29eval.xlsx', engine='xlsxwriter')
results = pd.DataFrame(columns=['vendor', \
'alerts', \
'visibility', \
'correlation', \
'efficiency', \
'quality', \
'actionability'])
# Write out results tab
for vendor in dfs.keys():
results = results.append([dfs[vendor].scores], ignore_index=True)
results.to_excel(writer, sheet_name='Results', index=False)
# Write out individual vendor tabs
for vendor in dfs.keys():
dfs[vendor].flattenTactics()
dfs[vendor].sortSubSteps(cleanup=True)
dfs[vendor].df.to_excel(writer, sheet_name=vendor, index=False, columns=columns)
writer.save()
if __name__ == '__main__':
results = {}
for infile in sorted(glob.glob(os.path.join('./data/', '*json'))):
obj = EnterpriseAPT29Eval(infile)
readout(obj)
results.update({obj.vendor: obj})
write_xlsx(results)
| 30.62963
| 169
| 0.68682
|
e3da042c1f03d48514d0799410f5572c12656ce4
| 32,323
|
py
|
Python
|
numba/tests/test_array_reductions.py
|
blair1306/numba
|
3b9647d17d653abac15363da604eeb804dbdd15a
|
[
"BSD-2-Clause"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
numba/tests/test_array_reductions.py
|
blair1306/numba
|
3b9647d17d653abac15363da604eeb804dbdd15a
|
[
"BSD-2-Clause"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
numba/tests/test_array_reductions.py
|
blair1306/numba
|
3b9647d17d653abac15363da604eeb804dbdd15a
|
[
"BSD-2-Clause"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
from itertools import product, combinations_with_replacement
import numpy as np
from numba import jit, typeof
from numba.core.compiler import compile_isolated
from numba.tests.support import TestCase, MemoryLeakMixin, tag
import unittest
def array_all(arr):
return arr.all()
def array_all_global(arr):
return np.all(arr)
def array_any(arr):
return arr.any()
def array_any_global(arr):
return np.any(arr)
def array_cumprod(arr):
return arr.cumprod()
def array_cumprod_global(arr):
return np.cumprod(arr)
def array_nancumprod(arr):
return np.nancumprod(arr)
def array_cumsum(arr):
return arr.cumsum()
def array_cumsum_global(arr):
return np.cumsum(arr)
def array_nancumsum(arr):
return np.nancumsum(arr)
def array_sum(arr):
return arr.sum()
def array_sum_global(arr):
return np.sum(arr)
def array_prod(arr):
return arr.prod()
def array_prod_global(arr):
return np.prod(arr)
def array_mean(arr):
return arr.mean()
def array_mean_global(arr):
return np.mean(arr)
def array_var(arr):
return arr.var()
def array_var_global(arr):
return np.var(arr)
def array_std(arr):
return arr.std()
def array_std_global(arr):
return np.std(arr)
def array_min(arr):
return arr.min()
def array_min_global(arr):
return np.min(arr)
def array_max(arr):
return arr.max()
def array_max_global(arr):
return np.max(arr)
def array_argmin(arr):
return arr.argmin()
def array_argmin_global(arr):
return np.argmin(arr)
def array_argmax(arr):
return arr.argmax()
def array_argmax_global(arr):
return np.argmax(arr)
def array_median_global(arr):
return np.median(arr)
def array_nanmin(arr):
return np.nanmin(arr)
def array_nanmax(arr):
return np.nanmax(arr)
def array_nanmean(arr):
return np.nanmean(arr)
def array_nansum(arr):
return np.nansum(arr)
def array_nanprod(arr):
return np.nanprod(arr)
def array_nanstd(arr):
return np.nanstd(arr)
def array_nanvar(arr):
return np.nanvar(arr)
def array_nanmedian_global(arr):
return np.nanmedian(arr)
def array_percentile_global(arr, q):
return np.percentile(arr, q)
def array_nanpercentile_global(arr, q):
return np.nanpercentile(arr, q)
def array_ptp_global(a):
return np.ptp(a)
def array_quantile_global(arr, q):
return np.quantile(arr, q)
def array_nanquantile_global(arr, q):
return np.nanquantile(arr, q)
def base_test_arrays(dtype):
if dtype == np.bool_:
def factory(n):
assert n % 2 == 0
return np.bool_([0, 1] * (n // 2))
else:
def factory(n):
return np.arange(n, dtype=dtype) + 1
a1 = factory(10)
a2 = factory(10).reshape(2, 5)
# The prod() of this array fits in a 32-bit int
a3 = (factory(12))[::-1].reshape((2, 3, 2), order='A')
assert not (a3.flags.c_contiguous or a3.flags.f_contiguous)
return [a1, a2, a3]
def full_test_arrays(dtype):
array_list = base_test_arrays(dtype)
# Add floats with some mantissa
if dtype == np.float32:
array_list += [a / 10 for a in array_list]
# add imaginary part
if dtype == np.complex64:
acc = []
for a in array_list:
tmp = a / 10 + 1j * a / 11
tmp[::2] = np.conj(tmp[::2])
acc.append(tmp)
array_list.extend(acc)
for a in array_list:
assert a.dtype == np.dtype(dtype)
return array_list
def run_comparative(compare_func, test_array):
arrty = typeof(test_array)
cres = compile_isolated(compare_func, [arrty])
numpy_result = compare_func(test_array)
numba_result = cres.entry_point(test_array)
return numpy_result, numba_result
class TestArrayReductions(MemoryLeakMixin, TestCase):
"""
Test array reduction methods and functions such as .sum(), .max(), etc.
"""
def setUp(self):
super(TestArrayReductions, self).setUp()
np.random.seed(42)
def check_reduction_basic(self, pyfunc, **kwargs):
# Basic reduction checks on 1-d float64 arrays
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
self.assertPreciseEqual(pyfunc(arr), cfunc(arr), **kwargs)
arr = np.float64([1.0, 2.0, 0.0, -0.0, 1.0, -1.5])
check(arr)
arr = np.float64([-0.0, -1.5])
check(arr)
arr = np.float64([-1.5, 2.5, 'inf'])
check(arr)
arr = np.float64([-1.5, 2.5, '-inf'])
check(arr)
arr = np.float64([-1.5, 2.5, 'inf', '-inf'])
check(arr)
arr = np.float64(['nan', -1.5, 2.5, 'nan', 3.0])
check(arr)
arr = np.float64(['nan', -1.5, 2.5, 'nan', 'inf', '-inf', 3.0])
check(arr)
arr = np.float64([5.0, 'nan', -1.5, 'nan'])
check(arr)
# Only NaNs
arr = np.float64(['nan', 'nan'])
check(arr)
def test_all_basic(self, pyfunc=array_all):
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
arr = np.float64([1.0, 0.0, float('inf'), float('nan')])
check(arr)
arr[1] = -0.0
check(arr)
arr[1] = 1.5
check(arr)
arr = arr.reshape((2, 2))
check(arr)
check(arr[::-1])
def test_any_basic(self, pyfunc=array_any):
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
self.assertPreciseEqual(pyfunc(arr), cfunc(arr))
arr = np.float64([0.0, -0.0, 0.0, 0.0])
check(arr)
arr[2] = float('nan')
check(arr)
arr[2] = float('inf')
check(arr)
arr[2] = 1.5
check(arr)
arr = arr.reshape((2, 2))
check(arr)
check(arr[::-1])
def test_sum_basic(self):
self.check_reduction_basic(array_sum)
def test_mean_basic(self):
self.check_reduction_basic(array_mean)
def test_var_basic(self):
self.check_reduction_basic(array_var, prec='double')
def test_std_basic(self):
self.check_reduction_basic(array_std)
def test_min_basic(self):
self.check_reduction_basic(array_min)
def test_max_basic(self):
self.check_reduction_basic(array_max)
def test_argmin_basic(self):
self.check_reduction_basic(array_argmin)
def test_argmax_basic(self):
self.check_reduction_basic(array_argmax)
def test_nanmin_basic(self):
self.check_reduction_basic(array_nanmin)
def test_nanmax_basic(self):
self.check_reduction_basic(array_nanmax)
def test_nanmean_basic(self):
self.check_reduction_basic(array_nanmean)
def test_nansum_basic(self):
self.check_reduction_basic(array_nansum)
def test_nanprod_basic(self):
self.check_reduction_basic(array_nanprod)
def test_nanstd_basic(self):
self.check_reduction_basic(array_nanstd)
def test_nanvar_basic(self):
self.check_reduction_basic(array_nanvar, prec='double')
def check_median_basic(self, pyfunc, array_variations):
cfunc = jit(nopython=True)(pyfunc)
def check(arr):
expected = pyfunc(arr)
got = cfunc(arr)
self.assertPreciseEqual(got, expected)
# Odd sizes
def check_odd(a):
check(a)
a = a.reshape((9, 7))
check(a)
check(a.T)
for a in array_variations(np.arange(63) + 10.5):
check_odd(a)
# Even sizes
def check_even(a):
check(a)
a = a.reshape((4, 16))
check(a)
check(a.T)
for a in array_variations(np.arange(64) + 10.5):
check_even(a)
@staticmethod
def _array_variations(a):
# Sorted, reversed, random, many duplicates, many NaNs, all NaNs
yield a
a = a[::-1].copy()
yield a
np.random.shuffle(a)
yield a
a[a % 4 >= 1] = 3.5
yield a
a[a % 4 >= 2] = np.nan
yield a
a[:] = np.nan
yield a
def test_median_basic(self):
pyfunc = array_median_global
def variations(a):
# Sorted, reversed, random, many duplicates
yield a
a = a[::-1].copy()
yield a
np.random.shuffle(a)
yield a
a[a % 4 >= 1] = 3.5
yield a
self.check_median_basic(pyfunc, variations)
def check_percentile_and_quantile(self, pyfunc, q_upper_bound):
cfunc = jit(nopython=True)(pyfunc)
def check(a, q, abs_tol=1e-12):
expected = pyfunc(a, q)
got = cfunc(a, q)
self.assertPreciseEqual(got, expected, abs_tol=abs_tol)
a = self.random.randn(27).reshape(3, 3, 3)
q = np.linspace(0, q_upper_bound, 14)[::-1]
check(a, q)
check(a, 0)
check(a, q_upper_bound / 2)
check(a, q_upper_bound)
not_finite = [np.nan, -np.inf, np.inf]
a.flat[:10] = self.random.choice(not_finite, 10)
self.random.shuffle(a)
self.random.shuffle(q)
check(a, q)
a = a.flatten().tolist()
q = q.flatten().tolist()
check(a, q)
check(tuple(a), tuple(q))
a = self.random.choice([1, 2, 3, 4], 10)
q = np.linspace(0, q_upper_bound, 5)
check(a, q)
# tests inspired by
# https://github.com/numpy/numpy/blob/345b2f6e/numpy/lib/tests/test_function_base.py
x = np.arange(8) * 0.5
np.testing.assert_equal(cfunc(x, 0), 0.)
np.testing.assert_equal(cfunc(x, q_upper_bound), 3.5)
np.testing.assert_equal(cfunc(x, q_upper_bound / 2), 1.75)
x = np.arange(12).reshape(3, 4)
q = np.array((0.25, 0.5, 1.0)) * q_upper_bound
np.testing.assert_equal(cfunc(x, q), [2.75, 5.5, 11.0])
x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
q = np.array((0.25, 0.50)) * q_upper_bound
np.testing.assert_equal(cfunc(x, q).shape, (2,))
q = np.array((0.25, 0.50, 0.75)) * q_upper_bound
np.testing.assert_equal(cfunc(x, q).shape, (3,))
x = np.arange(12).reshape(3, 4)
np.testing.assert_equal(cfunc(x, q_upper_bound / 2), 5.5)
self.assertTrue(np.isscalar(cfunc(x, q_upper_bound / 2)))
np.testing.assert_equal(cfunc([1, 2, 3], 0), 1)
a = np.array([2, 3, 4, 1])
cfunc(a, [q_upper_bound / 2])
np.testing.assert_equal(a, np.array([2, 3, 4, 1]))
def check_percentile_edge_cases(self, pyfunc, q_upper_bound=100):
cfunc = jit(nopython=True)(pyfunc)
def check(a, q, abs_tol=1e-14):
expected = pyfunc(a, q)
got = cfunc(a, q)
self.assertPreciseEqual(got, expected, abs_tol=abs_tol)
def convert_to_float_and_check(a, q, abs_tol=1e-14):
expected = pyfunc(a, q).astype(np.float64)
got = cfunc(a, q)
self.assertPreciseEqual(got, expected, abs_tol=abs_tol)
def _array_combinations(elements):
for i in range(1, 10):
for comb in combinations_with_replacement(elements, i):
yield np.array(comb)
# high number of combinations, many including non-finite values
q = (0, 0.1 * q_upper_bound, 0.2 * q_upper_bound, q_upper_bound)
element_pool = (1, -1, np.nan, np.inf, -np.inf)
for a in _array_combinations(element_pool):
check(a, q)
# edge cases - numpy exhibits behavioural differences across
# platforms, see: https://github.com/numpy/numpy/issues/13272
if q_upper_bound == 1:
_check = convert_to_float_and_check
else:
_check = check
a = np.array(5)
q = np.array(1)
_check(a, q)
a = True
q = False
_check(a, q)
a = np.array([False, True, True])
q = a
_check(a, q)
a = 5
q = q_upper_bound / 2
_check(a, q)
def check_percentile_exceptions(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check_err(a, q):
with self.assertRaises(ValueError) as raises:
cfunc(a, q)
self.assertEqual(
"Percentiles must be in the range [0, 100]",
str(raises.exception)
)
# Exceptions leak references
self.disable_leak_check()
a = np.arange(5)
check_err(a, -5) # q less than 0
check_err(a, (1, 10, 105)) # q contains value greater than 100
check_err(a, (1, 10, np.nan)) # q contains nan
with self.assertTypingError() as e:
a = np.arange(5) * 1j
q = 0.1
cfunc(a, q)
self.assertIn('Not supported for complex dtype', str(e.exception))
def check_quantile_exceptions(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check_err(a, q):
with self.assertRaises(ValueError) as raises:
cfunc(a, q)
self.assertEqual(
"Quantiles must be in the range [0, 1]",
str(raises.exception)
)
# Exceptions leak references
self.disable_leak_check()
a = np.arange(5)
check_err(a, -0.5) # q less than 0
check_err(a, (0.1, 0.10, 1.05)) # q contains value greater than 1
check_err(a, (0.1, 0.10, np.nan)) # q contains nan
with self.assertTypingError() as e:
a = np.arange(5) * 1j
q = 0.1
cfunc(a, q)
self.assertIn('Not supported for complex dtype', str(e.exception))
def test_percentile_basic(self):
pyfunc = array_percentile_global
self.check_percentile_and_quantile(pyfunc, q_upper_bound=100)
self.check_percentile_edge_cases(pyfunc, q_upper_bound=100)
self.check_percentile_exceptions(pyfunc)
def test_nanpercentile_basic(self):
pyfunc = array_nanpercentile_global
self.check_percentile_and_quantile(pyfunc, q_upper_bound=100)
self.check_percentile_edge_cases(pyfunc, q_upper_bound=100)
self.check_percentile_exceptions(pyfunc)
def test_quantile_basic(self):
pyfunc = array_quantile_global
self.check_percentile_and_quantile(pyfunc, q_upper_bound=1)
self.check_percentile_edge_cases(pyfunc, q_upper_bound=1)
self.check_quantile_exceptions(pyfunc)
def test_nanquantile_basic(self):
pyfunc = array_nanquantile_global
self.check_percentile_and_quantile(pyfunc, q_upper_bound=1)
self.check_percentile_edge_cases(pyfunc, q_upper_bound=1)
self.check_quantile_exceptions(pyfunc)
def test_nanmedian_basic(self):
pyfunc = array_nanmedian_global
self.check_median_basic(pyfunc, self._array_variations)
def test_array_sum_global(self):
arr = np.arange(10, dtype=np.int32)
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cres = compile_isolated(array_sum_global, [arrty])
cfunc = cres.entry_point
self.assertEqual(np.sum(arr), cfunc(arr))
def test_array_prod_int_1d(self):
arr = np.arange(10, dtype=np.int32) + 1
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cres = compile_isolated(array_prod, [arrty])
cfunc = cres.entry_point
self.assertEqual(arr.prod(), cfunc(arr))
def test_array_prod_float_1d(self):
arr = np.arange(10, dtype=np.float32) + 1 / 10
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cres = compile_isolated(array_prod, [arrty])
cfunc = cres.entry_point
np.testing.assert_allclose(arr.prod(), cfunc(arr))
def test_array_prod_global(self):
arr = np.arange(10, dtype=np.int32)
arrty = typeof(arr)
self.assertEqual(arrty.ndim, 1)
self.assertEqual(arrty.layout, 'C')
cres = compile_isolated(array_prod_global, [arrty])
cfunc = cres.entry_point
np.testing.assert_allclose(np.prod(arr), cfunc(arr))
def check_cumulative(self, pyfunc):
arr = np.arange(2, 10, dtype=np.int16)
expected, got = run_comparative(pyfunc, arr)
self.assertPreciseEqual(got, expected)
arr = np.linspace(2, 8, 6)
expected, got = run_comparative(pyfunc, arr)
self.assertPreciseEqual(got, expected)
arr = arr.reshape((3, 2))
expected, got = run_comparative(pyfunc, arr)
self.assertPreciseEqual(got, expected)
def test_array_cumsum(self):
self.check_cumulative(array_cumsum)
def test_array_cumsum_global(self):
self.check_cumulative(array_cumsum_global)
def test_array_cumprod(self):
self.check_cumulative(array_cumprod)
def test_array_cumprod_global(self):
self.check_cumulative(array_cumprod_global)
def check_aggregation_magnitude(self, pyfunc, is_prod=False):
"""
Check that integer overflows are avoided (issue #931).
"""
# Overflows are avoided here (ints are cast either to intp
# or float64).
n_items = 2 if is_prod else 10 # avoid overflow on prod()
arr = (np.arange(n_items) + 40000).astype('int16')
npr, nbr = run_comparative(pyfunc, arr)
self.assertPreciseEqual(npr, nbr)
# Overflows are avoided for functions returning floats here.
# Other functions may wrap around.
arr = (np.arange(10) + 2**60).astype('int64')
npr, nbr = run_comparative(pyfunc, arr)
self.assertPreciseEqual(npr, nbr)
arr = arr.astype('uint64')
npr, nbr = run_comparative(pyfunc, arr)
self.assertPreciseEqual(npr, nbr)
def test_sum_magnitude(self):
self.check_aggregation_magnitude(array_sum)
self.check_aggregation_magnitude(array_sum_global)
def test_cumsum_magnitude(self):
self.check_aggregation_magnitude(array_cumsum)
self.check_aggregation_magnitude(array_cumsum_global)
def test_nancumsum_magnitude(self):
self.check_aggregation_magnitude(array_nancumsum, is_prod=True)
def test_prod_magnitude(self):
self.check_aggregation_magnitude(array_prod, is_prod=True)
self.check_aggregation_magnitude(array_prod_global, is_prod=True)
def test_cumprod_magnitude(self):
self.check_aggregation_magnitude(array_cumprod, is_prod=True)
self.check_aggregation_magnitude(array_cumprod_global, is_prod=True)
def test_nancumprod_magnitude(self):
self.check_aggregation_magnitude(array_nancumprod, is_prod=True)
def test_mean_magnitude(self):
self.check_aggregation_magnitude(array_mean)
self.check_aggregation_magnitude(array_mean_global)
def test_var_magnitude(self):
self.check_aggregation_magnitude(array_var)
self.check_aggregation_magnitude(array_var_global)
def test_std_magnitude(self):
self.check_aggregation_magnitude(array_std)
self.check_aggregation_magnitude(array_std_global)
def _do_check_nptimedelta(self, pyfunc, arr):
arrty = typeof(arr)
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
# Even vs. odd size, for np.median
self.assertPreciseEqual(cfunc(arr[:-1]), pyfunc(arr[:-1]))
# Test with different orders, for np.median
arr = arr[::-1].copy() # Keep 'C' layout
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
np.random.shuffle(arr)
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
# Test with a NaT
arr[arr.size // 2] = 'NaT'
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
if 'median' not in pyfunc.__name__:
# Test with (val, NaT)^N (and with the random NaT from above)
# use a loop, there's some weird thing/bug with arr[1::2] = 'NaT'
# Further Numba has bug(s) relating to NaN/NaT handling in anything
# using a partition such as np.median
for x in range(1, len(arr), 2):
arr[x] = 'NaT'
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
# Test with all NaTs
arr.fill(arrty.dtype('NaT'))
self.assertPreciseEqual(cfunc(arr), pyfunc(arr))
def check_npdatetime(self, pyfunc):
arr = np.arange(10).astype(dtype='M8[Y]')
self._do_check_nptimedelta(pyfunc, arr)
def check_nptimedelta(self, pyfunc):
arr = np.arange(10).astype(dtype='m8[s]')
self._do_check_nptimedelta(pyfunc, arr)
def test_min_npdatetime(self):
self.check_npdatetime(array_min)
self.check_nptimedelta(array_min)
def test_max_npdatetime(self):
self.check_npdatetime(array_max)
self.check_nptimedelta(array_max)
def test_argmin_npdatetime(self):
self.check_npdatetime(array_argmin)
self.check_nptimedelta(array_argmin)
def test_argmax_npdatetime(self):
self.check_npdatetime(array_argmax)
self.check_nptimedelta(array_argmax)
def test_median_npdatetime(self):
self.check_nptimedelta(array_median_global)
def test_sum_npdatetime(self):
self.check_nptimedelta(array_sum)
def test_cumsum_npdatetime(self):
self.check_nptimedelta(array_cumsum)
def test_mean_npdatetime(self):
self.check_nptimedelta(array_mean)
def check_nan_cumulative(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def _set_some_values_to_nan(a):
p = a.size // 2 # set approx half elements to NaN
np.put(a, np.random.choice(range(a.size), p, replace=False), np.nan)
return a
def a_variations():
yield np.linspace(-1, 3, 60).reshape(3, 4, 5)
yield np.array([np.inf, 3, 4])
yield np.array([True, True, True, False])
yield np.arange(1, 10)
yield np.asfortranarray(np.arange(1, 64) - 33.3)
yield np.arange(1, 10, dtype=np.float32)[::-1]
for a in a_variations():
check(a) # no nans
check(_set_some_values_to_nan(a.astype(np.float64))) # about 50% nans
# edge cases
check(np.array([]))
check(np.full(10, np.nan))
parts = np.array([np.nan, 2, np.nan, 4, 5, 6, 7, 8, 9])
a = parts + 1j * parts[::-1]
a = a.reshape(3, 3)
check(a)
def test_nancumprod_basic(self):
self.check_cumulative(array_nancumprod)
self.check_nan_cumulative(array_nancumprod)
def test_nancumsum_basic(self):
self.check_cumulative(array_nancumsum)
self.check_nan_cumulative(array_nancumsum)
def test_ptp_basic(self):
pyfunc = array_ptp_global
cfunc = jit(nopython=True)(pyfunc)
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def a_variations():
yield np.arange(10)
yield np.array([-1.1, np.nan, 2.2])
yield np.array([-np.inf, 5])
yield (4, 2, 5)
yield (1,)
yield np.full(5, 5)
yield [2.2, -2.3, 0.1]
a = np.linspace(-10, 10, 16).reshape(4, 2, 2)
yield a
yield np.asfortranarray(a)
yield a[::-1]
np.random.RandomState(0).shuffle(a)
yield a
yield 6
yield 6.5
yield -np.inf
yield 1 + 4j
yield [2.2, np.nan]
yield [2.2, np.inf]
yield ((4.1, 2.0, -7.6), (4.3, 2.7, 5.2))
yield np.full(5, np.nan)
yield 1 + np.nan * 1j
yield np.nan + np.nan * 1j
yield np.nan
for a in a_variations():
check(a)
def test_ptp_complex(self):
pyfunc = array_ptp_global
cfunc = jit(nopython=True)(pyfunc)
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def make_array(real_nan=False, imag_nan=False):
real = np.linspace(-4, 4, 25)
if real_nan:
real[4:9] = np.nan
imag = np.linspace(-5, 5, 25)
if imag_nan:
imag[7:12] = np.nan
return (real + 1j * imag).reshape(5, 5)
for real_nan, imag_nan in product([True, False], repeat=2):
comp = make_array(real_nan, imag_nan)
check(comp)
real = np.ones(8)
imag = np.arange(-4, 4)
comp = real + 1j * imag
check(comp)
comp = real - 1j * imag
check(comp)
comp = np.full((4, 4), fill_value=(1 - 1j))
check(comp)
def test_ptp_exceptions(self):
pyfunc = array_ptp_global
cfunc = jit(nopython=True)(pyfunc)
# Exceptions leak references
self.disable_leak_check()
with self.assertTypingError() as e:
cfunc(np.array((True, True, False)))
msg = "Boolean dtype is unsupported (as per NumPy)"
self.assertIn(msg, str(e.exception))
with self.assertRaises(ValueError) as e:
cfunc(np.array([]))
msg = "zero-size array reduction not possible"
self.assertIn(msg, str(e.exception))
def test_min_max_complex_basic(self):
pyfuncs = array_min_global, array_max_global
for pyfunc in pyfuncs:
cfunc = jit(nopython=True)(pyfunc)
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
real = np.linspace(-10, 10, 40)
real[:4] = real[-1]
imag = real * 2
a = real - imag * 1j
check(a)
for _ in range(10):
self.random.shuffle(real)
self.random.shuffle(imag)
dtype = self.random.choice([np.complex64, np.complex128])
a = real - imag * 1j
a[:4] = a[-1]
check(a.astype(dtype))
def test_nanmin_nanmax_complex_basic(self):
pyfuncs = array_nanmin, array_nanmax
for pyfunc in pyfuncs:
cfunc = jit(nopython=True)(pyfunc)
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
real = np.linspace(-10, 10, 40)
real[:4] = real[-1]
real[5:9] = np.nan
imag = real * 2
imag[7:12] = np.nan
a = real - imag * 1j
check(a)
for _ in range(10):
self.random.shuffle(real)
self.random.shuffle(imag)
a = real - imag * 1j
a[:4] = a[-1]
check(a)
def test_nanmin_nanmax_non_array_inputs(self):
pyfuncs = array_nanmin, array_nanmax
def check(a):
expected = pyfunc(a)
got = cfunc(a)
self.assertPreciseEqual(expected, got)
def a_variations():
yield [1, 6, 4, 2]
yield ((-10, 4, -12), (5, 200, -30))
yield np.array(3)
yield (2,)
yield 3.142
yield False
yield (np.nan, 3.142, -5.2, 3.0)
yield [np.inf, np.nan, -np.inf]
yield [(np.nan, 1.1), (-4.4, 8.7)]
for pyfunc in pyfuncs:
cfunc = jit(nopython=True)(pyfunc)
for a in a_variations():
check(a)
@classmethod
def install_generated_tests(cls):
# These form a testing product where each of the combinations are tested
# these function are tested in real and complex space
reduction_funcs = [array_sum, array_sum_global,
array_prod, array_prod_global,
array_mean, array_mean_global,
array_var, array_var_global,
array_std, array_std_global,
array_all, array_all_global,
array_any, array_any_global,
array_min, array_min_global,
array_max, array_max_global,
array_nanmax, array_nanmin,
array_nansum,
]
# these functions only work in real space as no complex comparison
# operator is implemented
reduction_funcs_rspace = [array_argmin, array_argmin_global,
array_argmax, array_argmax_global]
reduction_funcs += [array_nanmean, array_nanstd, array_nanvar]
reduction_funcs += [array_nanprod]
dtypes_to_test = [np.int32, np.float32, np.bool_, np.complex64]
def install_tests(dtypes, funcs):
# Install tests on class
for dt in dtypes:
test_arrays = full_test_arrays(dt)
for red_func, test_array in product(funcs, test_arrays):
# Create the name for the test function
test_name = "test_{0}_{1}_{2}d"
test_name = test_name.format(red_func.__name__,
test_array.dtype.name,
test_array.ndim)
def new_test_function(self, redFunc=red_func,
testArray=test_array,
testName=test_name):
ulps = 1
if 'prod' in red_func.__name__ and \
np.iscomplexobj(testArray):
# prod family accumulate slightly more error on
# some architectures (power, 32bit) for complex input
ulps = 3
npr, nbr = run_comparative(redFunc, testArray)
self.assertPreciseEqual(npr, nbr, msg=testName,
prec="single", ulps=ulps)
# Install it into the class
setattr(cls, test_name, new_test_function)
# install tests for reduction functions that only work in real space
install_tests(dtypes_to_test[:-1], reduction_funcs_rspace)
# install tests for reduction functions
install_tests(dtypes_to_test, reduction_funcs)
TestArrayReductions.install_generated_tests()
class TestArrayReductionsExceptions(MemoryLeakMixin, TestCase):
# int64, size 0
zero_size = np.arange(0)
def check_exception(self, pyfunc, msg):
cfunc = jit(nopython=True)(pyfunc)
# make sure NumPy raises consistently/no behaviour change
with self.assertRaises(BaseException):
pyfunc(self.zero_size)
# check numba impl raises expected
with self.assertRaises(ValueError) as e:
cfunc(self.zero_size)
self.assertIn(msg, str(e.exception))
@classmethod
def install(cls):
fn_to_msg = dict()
empty_seq = "attempt to get {0} of an empty sequence"
op_no_ident = ("zero-size array to reduction operation "
"{0}")
for x in [array_argmax, array_argmax_global, array_argmin,
array_argmin_global]:
fn_to_msg[x] = empty_seq
for x in [array_max, array_max, array_min, array_min]:
fn_to_msg[x] = op_no_ident
name_template = "test_zero_size_array_{0}"
for fn, msg in fn_to_msg.items():
test_name = name_template.format(fn.__name__)
lmsg = msg.format(fn.__name__)
lmsg = lmsg.replace('array_','').replace('_global','')
def test_fn(self, func=fn, message=lmsg):
self.check_exception(func, message)
setattr(cls, test_name, test_fn)
TestArrayReductionsExceptions.install()
if __name__ == '__main__':
unittest.main()
| 31.199807
| 92
| 0.588497
|
1b2984c83bccefc7229589b51dabfb4e47b3e6d5
| 13,720
|
py
|
Python
|
pyro/util.py
|
ssameerr/pyro
|
c04fc931631ec9e8694def207b5ca0e432d5e501
|
[
"MIT"
] | null | null | null |
pyro/util.py
|
ssameerr/pyro
|
c04fc931631ec9e8694def207b5ca0e432d5e501
|
[
"MIT"
] | null | null | null |
pyro/util.py
|
ssameerr/pyro
|
c04fc931631ec9e8694def207b5ca0e432d5e501
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import functools
import re
import warnings
import graphviz
import numpy as np
import torch
from torch.autograd import Variable
from torch.nn import Parameter
from pyro.poutine.poutine import _PYRO_STACK
from pyro.poutine.util import site_is_subsample
def parse_torch_version():
"""
Parses `torch.__version__` into a semver-ish version tuple.
This is needed to handle subpatch `_n` parts outside of the semver spec.
:returns: a tuple `(major, minor, patch, extra_stuff)`
"""
match = re.match(r"(\d\.\d\.\d)(.*)", torch.__version__)
major, minor, patch = map(int, match.group(1).split("."))
extra_stuff = match.group(2)
return major, minor, patch, extra_stuff
def detach_iterable(iterable):
if isinstance(iterable, Variable):
return iterable.detach()
else:
return [var.detach() for var in iterable]
def _dict_to_tuple(d):
"""
Recursively converts a dictionary to a list of key-value tuples
Only intended for use as a helper function inside memoize!!
May break when keys cant be sorted, but that is not an expected use-case
"""
if isinstance(d, dict):
return tuple([(k, _dict_to_tuple(d[k])) for k in sorted(d.keys())])
else:
return d
def get_tensor_data(t):
if isinstance(t, Variable):
return t.data
return t
def memoize(fn):
"""
https://stackoverflow.com/questions/1988804/what-is-memoization-and-how-can-i-use-it-in-python
unbounded memoize
alternate in py3: https://docs.python.org/3/library/functools.html
lru_cache
"""
mem = {}
def _fn(*args, **kwargs):
kwargs_tuple = _dict_to_tuple(kwargs)
if (args, kwargs_tuple) not in mem:
mem[(args, kwargs_tuple)] = fn(*args, **kwargs)
return mem[(args, kwargs_tuple)]
return _fn
def set_rng_seed(rng_seed):
"""
Sets seeds of torch, numpy, and torch.cuda (if available).
:param int rng_seed: The seed value.
"""
torch.manual_seed(rng_seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(rng_seed)
np.random.seed(rng_seed)
def ones(*args, **kwargs):
"""
:param torch.Tensor type_as: optional argument for tensor type
A convenience function for Parameter(torch.ones(...))
"""
retype = kwargs.pop('type_as', None)
p_tensor = torch.ones(*args, **kwargs)
return Parameter(p_tensor if retype is None else p_tensor.type_as(retype))
def zeros(*args, **kwargs):
"""
:param torch.Tensor type_as: optional argument for tensor type
A convenience function for Parameter(torch.zeros(...))
"""
retype = kwargs.pop('type_as', None)
p_tensor = torch.zeros(*args, **kwargs)
return Parameter(p_tensor if retype is None else p_tensor.type_as(retype))
def ng_ones(*args, **kwargs):
"""
:param torch.Tensor type_as: optional argument for tensor type
A convenience function for Variable(torch.ones(...), requires_grad=False)
"""
retype = kwargs.pop('type_as', None)
p_tensor = torch.ones(*args, **kwargs)
return Variable(p_tensor if retype is None else p_tensor.type_as(retype), requires_grad=False)
def ng_zeros(*args, **kwargs):
"""
:param torch.Tensor type_as: optional argument for tensor type
A convenience function for Variable(torch.ones(...), requires_grad=False)
"""
retype = kwargs.pop('type_as', None)
p_tensor = torch.zeros(*args, **kwargs)
return Variable(p_tensor if retype is None else p_tensor.type_as(retype), requires_grad=False)
def log_sum_exp(vecs):
n = len(vecs.size())
if n == 1:
vecs = vecs.view(1, -1)
_, idx = torch.max(vecs, 1)
max_score = torch.index_select(vecs, 1, idx.view(-1))
ret = max_score + torch.log(torch.sum(torch.exp(vecs - max_score.expand_as(vecs))))
if n == 1:
return ret.view(-1)
return ret
def zero_grads(tensors):
"""
Sets gradients of list of Variables to zero in place
"""
for p in tensors:
if p.grad is not None:
if p.grad.volatile:
p.grad.data.zero_()
else:
data = p.grad.data
p.grad = Variable(data.new().resize_as_(data).zero_())
def apply_stack(initial_msg):
"""
:param dict initial_msg: the starting version of the trace site
:returns: an updated message that is the final version of the trace site
Execute the poutine stack at a single site according to the following scheme:
1. Walk down the stack from top to bottom, collecting into the message
all information necessary to execute the stack at that site
2. For each poutine in the stack from bottom to top:
Execute the poutine with the message;
If the message field "stop" is True, stop;
Otherwise, continue
3. Return the updated message
"""
stack = _PYRO_STACK
# TODO check at runtime if stack is valid
# msg is used to pass information up and down the stack
msg = initial_msg
# first, gather all information necessary to apply the stack to this site
for frame in reversed(stack):
msg = frame._prepare_site(msg)
# go until time to stop?
for frame in stack:
assert msg["type"] in ("sample", "param"), \
"{} is an invalid site type, how did that get there?".format(msg["type"])
msg["value"] = getattr(frame, "_pyro_{}".format(msg["type"]))(msg)
if msg["stop"]:
break
return msg
class NonlocalExit(Exception):
"""
Exception for exiting nonlocally from poutine execution.
Used by poutine.EscapePoutine to return site information.
"""
def __init__(self, site, *args, **kwargs):
"""
:param site: message at a pyro site
constructor. Just stores the input site.
"""
super(NonlocalExit, self).__init__(*args, **kwargs)
self.site = site
def enum_extend(trace, msg, num_samples=None):
"""
:param trace: a partial trace
:param msg: the message at a pyro primitive site
:param num_samples: maximum number of extended traces to return.
:returns: a list of traces, copies of input trace with one extra site
Utility function to copy and extend a trace with sites based on the input site
whose values are enumerated from the support of the input site's distribution.
Used for exact inference and integrating out discrete variables.
"""
if num_samples is None:
num_samples = -1
# Batched .enumerate_support() assumes batched values are independent.
batch_shape = msg["fn"].batch_shape(msg["value"], *msg["args"], **msg["kwargs"])
is_batched = any(size > 1 for size in batch_shape)
inside_iarange = any(frame.vectorized for frame in msg["cond_indep_stack"])
if is_batched and not inside_iarange:
raise ValueError(
"Tried to enumerate a batched pyro.sample site '{}' outside of a pyro.iarange. "
"To fix, either enclose in a pyro.iarange, or avoid batching.".format(msg["name"]))
extended_traces = []
for i, s in enumerate(msg["fn"].enumerate_support(*msg["args"], **msg["kwargs"])):
if i > num_samples and num_samples >= 0:
break
msg_copy = msg.copy()
msg_copy.update(value=s)
tr_cp = trace.copy()
tr_cp.add_node(msg["name"], **msg_copy)
extended_traces.append(tr_cp)
return extended_traces
def mc_extend(trace, msg, num_samples=None):
"""
:param trace: a partial trace
:param msg: the message at a pyro primitive site
:param num_samples: maximum number of extended traces to return.
:returns: a list of traces, copies of input trace with one extra site
Utility function to copy and extend a trace with sites based on the input site
whose values are sampled from the input site's function.
Used for Monte Carlo marginalization of individual sample sites.
"""
if num_samples is None:
num_samples = 1
extended_traces = []
for i in range(num_samples):
msg_copy = msg.copy()
msg_copy["value"] = msg_copy["fn"](*msg_copy["args"], **msg_copy["kwargs"])
tr_cp = trace.copy()
tr_cp.add_node(msg_copy["name"], **msg_copy)
extended_traces.append(tr_cp)
return extended_traces
def discrete_escape(trace, msg):
"""
:param trace: a partial trace
:param msg: the message at a pyro primitive site
:returns: boolean decision value
Utility function that checks if a sample site is discrete and not already in a trace.
Used by EscapePoutine to decide whether to do a nonlocal exit at a site.
Subroutine for integrating out discrete variables for variance reduction.
"""
return (msg["type"] == "sample") and \
(not msg["is_observed"]) and \
(msg["name"] not in trace) and \
(getattr(msg["fn"], "enumerable", False))
def all_escape(trace, msg):
"""
:param trace: a partial trace
:param msg: the message at a pyro primitive site
:returns: boolean decision value
Utility function that checks if a site is not already in a trace.
Used by EscapePoutine to decide whether to do a nonlocal exit at a site.
Subroutine for approximately integrating out variables for variance reduction.
"""
return (msg["type"] == "sample") and \
(not msg["is_observed"]) and \
(msg["name"] not in trace)
def save_visualization(trace, graph_output):
"""
:param pyro.poutine.Trace trace: a trace to be visualized
:param graph_output: the graph will be saved to graph_output.pdf
:type graph_output: str
Take a trace generated by poutine.trace with `graph_type='dense'` and render
the graph with the output saved to file.
- non-reparameterized stochastic nodes are salmon
- reparameterized stochastic nodes are half salmon, half grey
- observation nodes are green
Example:
trace = pyro.poutine.trace(model, graph_type="dense").get_trace()
save_visualization(trace, 'output')
"""
g = graphviz.Digraph()
for label, node in trace.nodes.items():
if site_is_subsample(node):
continue
shape = 'ellipse'
if label in trace.stochastic_nodes and label not in trace.reparameterized_nodes:
fillcolor = 'salmon'
elif label in trace.reparameterized_nodes:
fillcolor = 'lightgrey;.5:salmon'
elif label in trace.observation_nodes:
fillcolor = 'darkolivegreen3'
else:
# only visualize RVs
continue
g.node(label, label=label, shape=shape, style='filled', fillcolor=fillcolor)
for label1, label2 in trace.edges:
if site_is_subsample(trace.nodes[label1]):
continue
if site_is_subsample(trace.nodes[label2]):
continue
g.edge(label1, label2)
g.render(graph_output, view=False, cleanup=True)
def check_model_guide_match(model_trace, guide_trace):
"""
:param pyro.poutine.Trace model_trace: Trace object of the model
:param pyro.poutine.Trace guide_trace: Trace object of the guide
:raises: RuntimeWarning, ValueError
Checks that (1) there is a bijection between the samples in the guide
and the samples in the model, (2) each `iarange` statement in the guide
also appears in the model, (3) at each sample site that appears in both
the model and guide, the model and guide agree on sample shape.
"""
# Check ordinary sample sites.
model_vars = set(name for name, site in model_trace.nodes.items()
if site["type"] == "sample" and not site["is_observed"]
if type(site["fn"]).__name__ != "_Subsample")
guide_vars = set(name for name, site in guide_trace.nodes.items()
if site["type"] == "sample"
if type(site["fn"]).__name__ != "_Subsample")
if not (guide_vars <= model_vars):
warnings.warn("Found vars in guide but not model: {}".format(guide_vars - model_vars))
if not (model_vars <= guide_vars):
warnings.warn("Found vars in model but not guide: {}".format(model_vars - guide_vars))
# Check shapes agree.
for name in model_vars & guide_vars:
model_site = model_trace.nodes[name]
guide_site = guide_trace.nodes[name]
if hasattr(model_site["fn"], "shape") and hasattr(guide_site["fn"], "shape"):
model_shape = model_site["fn"].shape(None, *model_site["args"], **model_site["kwargs"])
guide_shape = guide_site["fn"].shape(None, *guide_site["args"], **guide_site["kwargs"])
if model_shape != guide_shape:
raise ValueError("Model and guide dims disagree at site '{}': {} vs {}".format(
name, model_shape, guide_shape))
# Check subsample sites introduced by iarange.
model_vars = set(name for name, site in model_trace.nodes.items()
if site["type"] == "sample" and not site["is_observed"]
if type(site["fn"]).__name__ == "_Subsample")
guide_vars = set(name for name, site in guide_trace.nodes.items()
if site["type"] == "sample"
if type(site["fn"]).__name__ == "_Subsample")
if not (guide_vars <= model_vars):
warnings.warn("Found iarange statements in guide but not model: {}".format(guide_vars - model_vars))
def deep_getattr(obj, name):
"""
Python getattr() for arbitrarily deep attributes
Throws an AttributeError if bad attribute
"""
return functools.reduce(getattr, name.split("."), obj)
| 34.734177
| 108
| 0.654373
|
5eddd41fc903f23c5ff489f5db219a6d12f5ee1f
| 64
|
py
|
Python
|
google_screener_data_extract/__init__.py
|
spidezad/google_screener_data_extract
|
8efe14e73918808182d8745ef38c38f1ac686f6e
|
[
"BSD-3-Clause"
] | 28
|
2015-09-27T21:11:23.000Z
|
2021-05-17T06:33:20.000Z
|
google_screener_data_extract/__init__.py
|
spidezad/google_screener_data_extract
|
8efe14e73918808182d8745ef38c38f1ac686f6e
|
[
"BSD-3-Clause"
] | 1
|
2015-10-18T23:11:03.000Z
|
2018-03-27T05:58:10.000Z
|
google_screener_data_extract/__init__.py
|
spidezad/google_screener_data_extract
|
8efe14e73918808182d8745ef38c38f1ac686f6e
|
[
"BSD-3-Clause"
] | 24
|
2016-01-14T09:53:48.000Z
|
2018-05-17T02:00:56.000Z
|
from .google_screener_data_extract import GoogleStockDataExtract
| 64
| 64
| 0.9375
|
fe7c4626a7c0b1bb731c5b6a94b98e65b194c35e
| 4,755
|
py
|
Python
|
ps2/PeachPy/tor_tmsk_tmrc.py
|
SeiichiroMine/Tales-of-Rebirth
|
5cb00825dd19affed4062f1f849906b74bb7fcc0
|
[
"MIT"
] | 2
|
2021-06-17T14:56:59.000Z
|
2021-11-04T02:50:34.000Z
|
ps2/PeachPy/tor_tmsk_tmrc.py
|
SeiichiroMine/Tales-of-Rebirth
|
5cb00825dd19affed4062f1f849906b74bb7fcc0
|
[
"MIT"
] | null | null | null |
ps2/PeachPy/tor_tmsk_tmrc.py
|
SeiichiroMine/Tales-of-Rebirth
|
5cb00825dd19affed4062f1f849906b74bb7fcc0
|
[
"MIT"
] | 3
|
2021-06-17T14:57:16.000Z
|
2021-11-29T19:32:40.000Z
|
import sys
import os
import json
import struct
import re
import subprocess
import shutil
import string
tmsk_pointer_begin = 0x410
#tmsk_isize = 0xAC00
tmrc_pointer_begin = 0x450
extension = 'tm2'
#tmsk_num = data[0x404:2]
#tmrc_num = data[0x406:2]
#palette = data[:0x400]
##Header construction info
TIM2_header_magic = b'TIM2\x04\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00'
TIM2_header_tmskdata = b'\x40\xB0\x00\x00\x00\x04\x00\x00\x00\xAC\x00\x00\x30\x00\x00\x01\x00\x01\x03\x05\x00\x01\xAC\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
TIM2_palette_length = b'\x00\x04\x00\x00'
TIM2_header_length = b'\x30\x00\x00\x01\x00\x01\x03\x05'
blah_blah_blah = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
##and then, pointer_end of tmsk/tmrc pointers is tmsk_num(or tmrc_num)+(n*4)
##each pointer is 4 bytes long
##pointer is relative to the end of the palette data, so the first pointer will read 0x100, but it's actually pointing to 0x500 in the file (0x400+0x100)=0x500
##tmsk all have same data length, image width, image height, could probably just write a single header for those...
##tmrc have variable image length, width, height. Image length isn't indicated in the header, but it is width*height
##final tim2 should be tim header magic + header data + image data + palette
def mkdir(name):
try: os.mkdir(name)
except: pass
def extract_tmsk():
mkdir('FILE/tmsk/tim2')
for file in os.listdir('FILE/tmsk/'):
if not file.endswith('tmsk'):
continue
f = open('FILE/tmsk/' + file, 'rb')
data = f.read()
palette = data[:0x400]
tmsk_num = data [0x404:0x406]
tmsk_num_int = int.from_bytes(tmsk_num, 'little')
tmsk_pointer_end = tmsk_pointer_begin + (tmsk_num_int * 4)
sexy_file = file.replace('.tmsk', '')
f.seek(tmsk_pointer_begin, 0)
pointers = []
while f.tell() < tmsk_pointer_end:
p = struct.unpack('<L', f.read(4))[0]
pointers.append(p)
for i in range(len(pointers)):
start = pointers[i] + 0x400
size = 0xAC00
f.seek(start, 0)
tmsk_idata = f.read(size)
o = open('FILE/tmsk/tim2/' + sexy_file +'_' + '%02d.%s' % (i, extension), 'wb')
o.write(TIM2_header_magic + TIM2_header_tmskdata + tmsk_idata + palette)
o.close()
f.close()
def extract_tmrc():
mkdir('FILE/tmsk/tim2')
for file in os.listdir('FILE/tmsk/'):
if not file.endswith('tmsk'):
continue
f = open('FILE/tmsk/' + file, 'rb')
data = f.read()
palette = data[:0x400]
tmrc_num = data[0x406:0x408]
tmrc_num_int = int.from_bytes(tmrc_num, 'little')
tmrc_pointer_end = tmrc_pointer_begin + (tmrc_num_int * 4)
sexy_file = file.replace('.tmsk', '')
f.seek(tmrc_pointer_begin, 0)
pointers = []
while f.tell() < tmrc_pointer_end:
p = struct.unpack('<L', f.read(4))[0]
pointers.append(p)
for i in range(len(pointers)):
w_start = pointers[i] + 0x400 + 8
h_start = pointers[i] + 0x400 + 10
h_end = pointers[i] + 0x400 + 12
tmrc_w = data[w_start:h_start]
tmrc_h = data[h_start:h_end]
tmrc_w_int = int.from_bytes(tmrc_w, 'little')
tmrc_h_int = int.from_bytes(tmrc_h, 'little')
i_start = pointers[i] + 0x400 + 128
isize = tmrc_w_int * tmrc_h_int
f.seek(i_start)
tmrc_idata = f.read(isize)
TIM2_size = isize + 0x40 + 0x400
TIM2_size_bytes = TIM2_size.to_bytes(4, 'little')
img_size_bytes = isize.to_bytes(4, 'little')
o = open('FILE/tmsk/tim2/' + sexy_file +'_' + 'tmrc' + '_' + '%02d.%s' % (i, extension), 'wb')
o.write(TIM2_header_magic + TIM2_size_bytes + TIM2_palette_length + img_size_bytes + TIM2_header_length + tmrc_w + tmrc_h + blah_blah_blah + tmrc_idata + palette)
o.close()
f.close()
if __name__ == '__main__':
if sys.argv[1] == 'extract' and sys.argv[2] == 'tmsk':
extract_tmsk()
elif sys.argv[1] == 'extract' and sys.argv[2] == 'tmrc':
extract_tmrc()
elif sys.argv[1] == 'help':
print('Tales of Rebirth Skit Image to TIM2 Converter\n')
print('By SymphoniaLauren\n')
print('USAGE:\n')
print('python tor_tmsk_tmrc.py extract [tmsk]/[tmrc]\n')
print('TMSK is what I call the skit faces, TMRC are the little animated tiles\nfor the small parts like the eyes and mouth')
else:
sys.exit(1)
| 39.625
| 218
| 0.617035
|
16b7e8181d9a4136104c0a75fc0de84740ca772b
| 946
|
py
|
Python
|
banners/migrations/0001_initial.py
|
AlexGolovaschenko/OwenAgriculture
|
4d393da3736d0a71b1d25b720ed16af38013b682
|
[
"Apache-2.0"
] | null | null | null |
banners/migrations/0001_initial.py
|
AlexGolovaschenko/OwenAgriculture
|
4d393da3736d0a71b1d25b720ed16af38013b682
|
[
"Apache-2.0"
] | 7
|
2021-03-19T03:36:56.000Z
|
2022-01-13T02:44:37.000Z
|
banners/migrations/0001_initial.py
|
AlexGolovaschenko/OwenAgriculture
|
4d393da3736d0a71b1d25b720ed16af38013b682
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-06-18 08:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Banner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Название')),
('banner_image', models.ImageField(upload_to='banners', verbose_name='Картинка баннера')),
('order', models.PositiveSmallIntegerField(default=0, verbose_name='Порядок отображения')),
('display', models.BooleanField(default=True, verbose_name='Показывать')),
],
options={
'verbose_name': 'Баннер',
'verbose_name_plural': 'Баннеры',
},
),
]
| 32.62069
| 114
| 0.584567
|
7552955cfda3953c5be741bc73e47097a19e94bf
| 12,400
|
py
|
Python
|
test/test_related_events.py
|
LaudateCorpus1/hyper-h2
|
7dfab8f8e0e8605c4a2a90706b217d0a0a0c45b7
|
[
"MIT"
] | 2
|
2020-07-01T20:46:51.000Z
|
2021-04-28T21:28:48.000Z
|
test/test_related_events.py
|
LaudateCorpus1/hyper-h2
|
7dfab8f8e0e8605c4a2a90706b217d0a0a0c45b7
|
[
"MIT"
] | null | null | null |
test/test_related_events.py
|
LaudateCorpus1/hyper-h2
|
7dfab8f8e0e8605c4a2a90706b217d0a0a0c45b7
|
[
"MIT"
] | 3
|
2021-06-03T10:10:16.000Z
|
2022-03-17T19:57:00.000Z
|
# -*- coding: utf-8 -*-
"""
test_related_events.py
~~~~~~~~~~~~~~~~~~~~~~
Specific tests to validate the "related events" logic used by certain events
inside hyper-h2.
"""
import h2.connection
import h2.events
class TestRelatedEvents(object):
"""
Related events correlate all those events that happen on a single frame.
"""
example_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
]
example_response_headers = [
(':status', '200'),
('server', 'fake-serv/0.1.0')
]
informational_response_headers = [
(':status', '100'),
('server', 'fake-serv/0.1.0')
]
example_trailers = [
('another', 'field'),
]
def test_request_received_related_all(self, frame_factory):
"""
RequestReceived has two possible related events: PriorityUpdated and
StreamEnded, all fired when a single HEADERS frame is received.
"""
c = h2.connection.H2Connection(client_side=False)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
input_frame = frame_factory.build_headers_frame(
headers=self.example_request_headers,
flags=['END_STREAM', 'PRIORITY'],
stream_weight=15,
depends_on=0,
exclusive=False,
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 3
base_event = events[0]
other_events = events[1:]
assert base_event.stream_ended in other_events
assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
assert base_event.priority_updated in other_events
assert isinstance(
base_event.priority_updated, h2.events.PriorityUpdated
)
def test_request_received_related_priority(self, frame_factory):
"""
RequestReceived can be related to PriorityUpdated.
"""
c = h2.connection.H2Connection(client_side=False)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
input_frame = frame_factory.build_headers_frame(
headers=self.example_request_headers,
flags=['PRIORITY'],
stream_weight=15,
depends_on=0,
exclusive=False,
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 2
base_event = events[0]
priority_updated_event = events[1]
assert base_event.priority_updated is priority_updated_event
assert base_event.stream_ended is None
assert isinstance(
base_event.priority_updated, h2.events.PriorityUpdated
)
def test_request_received_related_stream_ended(self, frame_factory):
"""
RequestReceived can be related to StreamEnded.
"""
c = h2.connection.H2Connection(client_side=False)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
input_frame = frame_factory.build_headers_frame(
headers=self.example_request_headers,
flags=['END_STREAM'],
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 2
base_event = events[0]
stream_ended_event = events[1]
assert base_event.stream_ended is stream_ended_event
assert base_event.priority_updated is None
assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
def test_response_received_related_nothing(self, frame_factory):
"""
ResponseReceived is ordinarily related to no events.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(stream_id=1, headers=self.example_request_headers)
input_frame = frame_factory.build_headers_frame(
headers=self.example_response_headers,
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 1
base_event = events[0]
assert base_event.stream_ended is None
assert base_event.priority_updated is None
def test_response_received_related_all(self, frame_factory):
"""
ResponseReceived has two possible related events: PriorityUpdated and
StreamEnded, all fired when a single HEADERS frame is received.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(stream_id=1, headers=self.example_request_headers)
input_frame = frame_factory.build_headers_frame(
headers=self.example_response_headers,
flags=['END_STREAM', 'PRIORITY'],
stream_weight=15,
depends_on=0,
exclusive=False,
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 3
base_event = events[0]
other_events = events[1:]
assert base_event.stream_ended in other_events
assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
assert base_event.priority_updated in other_events
assert isinstance(
base_event.priority_updated, h2.events.PriorityUpdated
)
def test_response_received_related_priority(self, frame_factory):
"""
ResponseReceived can be related to PriorityUpdated.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(stream_id=1, headers=self.example_request_headers)
input_frame = frame_factory.build_headers_frame(
headers=self.example_response_headers,
flags=['PRIORITY'],
stream_weight=15,
depends_on=0,
exclusive=False,
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 2
base_event = events[0]
priority_updated_event = events[1]
assert base_event.priority_updated is priority_updated_event
assert base_event.stream_ended is None
assert isinstance(
base_event.priority_updated, h2.events.PriorityUpdated
)
def test_response_received_related_stream_ended(self, frame_factory):
"""
ResponseReceived can be related to StreamEnded.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(stream_id=1, headers=self.example_request_headers)
input_frame = frame_factory.build_headers_frame(
headers=self.example_response_headers,
flags=['END_STREAM'],
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 2
base_event = events[0]
stream_ended_event = events[1]
assert base_event.stream_ended is stream_ended_event
assert base_event.priority_updated is None
assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
def test_trailers_received_related_all(self, frame_factory):
"""
TrailersReceived has two possible related events: PriorityUpdated and
StreamEnded, all fired when a single HEADERS frame is received.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(stream_id=1, headers=self.example_request_headers)
f = frame_factory.build_headers_frame(
headers=self.example_response_headers,
)
c.receive_data(f.serialize())
input_frame = frame_factory.build_headers_frame(
headers=self.example_trailers,
flags=['END_STREAM', 'PRIORITY'],
stream_weight=15,
depends_on=0,
exclusive=False,
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 3
base_event = events[0]
other_events = events[1:]
assert base_event.stream_ended in other_events
assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
assert base_event.priority_updated in other_events
assert isinstance(
base_event.priority_updated, h2.events.PriorityUpdated
)
def test_trailers_received_related_stream_ended(self, frame_factory):
"""
TrailersReceived can be related to StreamEnded by itself.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(stream_id=1, headers=self.example_request_headers)
f = frame_factory.build_headers_frame(
headers=self.example_response_headers,
)
c.receive_data(f.serialize())
input_frame = frame_factory.build_headers_frame(
headers=self.example_trailers,
flags=['END_STREAM'],
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 2
base_event = events[0]
stream_ended_event = events[1]
assert base_event.stream_ended is stream_ended_event
assert base_event.priority_updated is None
assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
def test_informational_response_related_nothing(self, frame_factory):
"""
InformationalResponseReceived in the standard case is related to
nothing.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(stream_id=1, headers=self.example_request_headers)
input_frame = frame_factory.build_headers_frame(
headers=self.informational_response_headers,
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 1
base_event = events[0]
assert base_event.priority_updated is None
def test_informational_response_received_related_all(self, frame_factory):
"""
InformationalResponseReceived has one possible related event:
PriorityUpdated, fired when a single HEADERS frame is received.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(stream_id=1, headers=self.example_request_headers)
input_frame = frame_factory.build_headers_frame(
headers=self.informational_response_headers,
flags=['PRIORITY'],
stream_weight=15,
depends_on=0,
exclusive=False,
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 2
base_event = events[0]
priority_updated_event = events[1]
assert base_event.priority_updated is priority_updated_event
assert isinstance(
base_event.priority_updated, h2.events.PriorityUpdated
)
def test_data_received_normally_relates_to_nothing(self, frame_factory):
"""
A plain DATA frame leads to DataReceieved with no related events.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(stream_id=1, headers=self.example_request_headers)
f = frame_factory.build_headers_frame(
headers=self.example_response_headers,
)
c.receive_data(f.serialize())
input_frame = frame_factory.build_data_frame(
data=b'some data',
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 1
base_event = events[0]
assert base_event.stream_ended is None
def test_data_received_related_stream_ended(self, frame_factory):
"""
DataReceived can be related to StreamEnded by itself.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(stream_id=1, headers=self.example_request_headers)
f = frame_factory.build_headers_frame(
headers=self.example_response_headers,
)
c.receive_data(f.serialize())
input_frame = frame_factory.build_data_frame(
data=b'some data',
flags=['END_STREAM'],
)
events = c.receive_data(input_frame.serialize())
assert len(events) == 2
base_event = events[0]
stream_ended_event = events[1]
assert base_event.stream_ended is stream_ended_event
assert isinstance(base_event.stream_ended, h2.events.StreamEnded)
| 33.695652
| 78
| 0.647823
|
ed69fb60ee18639c056e88d7ff043799e6bee82e
| 26,335
|
py
|
Python
|
AppServer/lib/django-1.4/tests/modeltests/invalid_models/invalid_models/models.py
|
loftwah/appscale
|
586fc1347ebc743d7a632de698f4dbfb09ae38d6
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/django-1.4/tests/modeltests/invalid_models/invalid_models/models.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/django-1.4/tests/modeltests/invalid_models/invalid_models/models.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
#encoding=utf-8
"""
26. Invalid models
This example exists purely to point out errors in models.
"""
from django.db import connection, models
class FieldErrors(models.Model):
charfield = models.CharField()
charfield2 = models.CharField(max_length=-1)
charfield3 = models.CharField(max_length="bad")
decimalfield = models.DecimalField()
decimalfield2 = models.DecimalField(max_digits=-1, decimal_places=-1)
decimalfield3 = models.DecimalField(max_digits="bad", decimal_places="bad")
decimalfield4 = models.DecimalField(max_digits=9, decimal_places=10)
decimalfield5 = models.DecimalField(max_digits=10, decimal_places=10)
filefield = models.FileField()
choices = models.CharField(max_length=10, choices='bad')
choices2 = models.CharField(max_length=10, choices=[(1,2,3),(1,2,3)])
index = models.CharField(max_length=10, db_index='bad')
field_ = models.CharField(max_length=10)
nullbool = models.BooleanField(null=True)
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash1 = models.CharField(max_length=10)
clash2 = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Clash1(models.Model):
src_safe = models.CharField(max_length=10)
foreign = models.ForeignKey(Target)
m2m = models.ManyToManyField(Target)
class Clash2(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, related_name='id')
foreign_2 = models.ForeignKey(Target, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
class Target2(models.Model):
clash3 = models.CharField(max_length=10)
foreign_tgt = models.ForeignKey(Target)
clashforeign_set = models.ForeignKey(Target)
m2m_tgt = models.ManyToManyField(Target)
clashm2m_set = models.ManyToManyField(Target)
class Clash3(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target2, related_name='foreign_tgt')
foreign_2 = models.ForeignKey(Target2, related_name='m2m_tgt')
m2m_1 = models.ManyToManyField(Target2, related_name='foreign_tgt')
m2m_2 = models.ManyToManyField(Target2, related_name='m2m_tgt')
class ClashForeign(models.Model):
foreign = models.ForeignKey(Target2)
class ClashM2M(models.Model):
m2m = models.ManyToManyField(Target2)
class SelfClashForeign(models.Model):
src_safe = models.CharField(max_length=10)
selfclashforeign = models.CharField(max_length=10)
selfclashforeign_set = models.ForeignKey("SelfClashForeign")
foreign_1 = models.ForeignKey("SelfClashForeign", related_name='id')
foreign_2 = models.ForeignKey("SelfClashForeign", related_name='src_safe')
class ValidM2M(models.Model):
src_safe = models.CharField(max_length=10)
validm2m = models.CharField(max_length=10)
# M2M fields are symmetrical by default. Symmetrical M2M fields
# on self don't require a related accessor, so many potential
# clashes are avoided.
validm2m_set = models.ManyToManyField("self")
m2m_1 = models.ManyToManyField("self", related_name='id')
m2m_2 = models.ManyToManyField("self", related_name='src_safe')
m2m_3 = models.ManyToManyField('self')
m2m_4 = models.ManyToManyField('self')
class SelfClashM2M(models.Model):
src_safe = models.CharField(max_length=10)
selfclashm2m = models.CharField(max_length=10)
# Non-symmetrical M2M fields _do_ have related accessors, so
# there is potential for clashes.
selfclashm2m_set = models.ManyToManyField("self", symmetrical=False)
m2m_1 = models.ManyToManyField("self", related_name='id', symmetrical=False)
m2m_2 = models.ManyToManyField("self", related_name='src_safe', symmetrical=False)
m2m_3 = models.ManyToManyField('self', symmetrical=False)
m2m_4 = models.ManyToManyField('self', symmetrical=False)
class Model(models.Model):
"But it's valid to call a model Model."
year = models.PositiveIntegerField() #1960
make = models.CharField(max_length=10) #Aston Martin
name = models.CharField(max_length=10) #DB 4 GT
class Car(models.Model):
colour = models.CharField(max_length=5)
model = models.ForeignKey(Model)
class MissingRelations(models.Model):
rel1 = models.ForeignKey("Rel1")
rel2 = models.ManyToManyField("Rel2")
class MissingManualM2MModel(models.Model):
name = models.CharField(max_length=5)
missing_m2m = models.ManyToManyField(Model, through="MissingM2MModel")
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership", related_name="primary")
secondary = models.ManyToManyField(Person, through="Membership", related_name="secondary")
tertiary = models.ManyToManyField(Person, through="RelationshipDoubleFK", related_name="tertiary")
class GroupTwo(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership")
secondary = models.ManyToManyField(Group, through="MembershipMissingFK")
class Membership(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
not_default_or_null = models.CharField(max_length=5)
class MembershipMissingFK(models.Model):
person = models.ForeignKey(Person)
class PersonSelfRefM2M(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="Relationship")
too_many_friends = models.ManyToManyField('self', through="RelationshipTripleFK")
class PersonSelfRefM2MExplicit(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="ExplicitRelationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set")
date_added = models.DateTimeField()
class ExplicitRelationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_to_set")
date_added = models.DateTimeField()
class RelationshipTripleFK(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set_2")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set_2")
third = models.ForeignKey(PersonSelfRefM2M, related_name="too_many_by_far")
date_added = models.DateTimeField()
class RelationshipDoubleFK(models.Model):
first = models.ForeignKey(Person, related_name="first_related_name")
second = models.ForeignKey(Person, related_name="second_related_name")
third = models.ForeignKey(Group, related_name="rel_to_set")
date_added = models.DateTimeField()
class AbstractModel(models.Model):
name = models.CharField(max_length=10)
class Meta:
abstract = True
class AbstractRelationModel(models.Model):
fk1 = models.ForeignKey('AbstractModel')
fk2 = models.ManyToManyField('AbstractModel')
class UniqueM2M(models.Model):
""" Model to test for unique ManyToManyFields, which are invalid. """
unique_people = models.ManyToManyField(Person, unique=True)
class NonUniqueFKTarget1(models.Model):
""" Model to test for non-unique FK target in yet-to-be-defined model: expect an error """
tgt = models.ForeignKey('FKTarget', to_field='bad')
class UniqueFKTarget1(models.Model):
""" Model to test for unique FK target in yet-to-be-defined model: expect no error """
tgt = models.ForeignKey('FKTarget', to_field='good')
class FKTarget(models.Model):
bad = models.IntegerField()
good = models.IntegerField(unique=True)
class NonUniqueFKTarget2(models.Model):
""" Model to test for non-unique FK target in previously seen model: expect an error """
tgt = models.ForeignKey(FKTarget, to_field='bad')
class UniqueFKTarget2(models.Model):
""" Model to test for unique FK target in previously seen model: expect no error """
tgt = models.ForeignKey(FKTarget, to_field='good')
class NonExistingOrderingWithSingleUnderscore(models.Model):
class Meta:
ordering = ("does_not_exist",)
class InvalidSetNull(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_NULL)
class InvalidSetDefault(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_DEFAULT)
class UnicodeForeignKeys(models.Model):
"""Foreign keys which can translate to ascii should be OK, but fail if
they're not."""
good = models.ForeignKey(u'FKTarget')
also_good = models.ManyToManyField(u'FKTarget', related_name='unicode2')
# In Python 3 this should become legal, but currently causes unicode errors
# when adding the errors in core/management/validation.py
#bad = models.ForeignKey(u'★')
class PrimaryKeyNull(models.Model):
my_pk_field = models.IntegerField(primary_key=True, null=True)
class OrderByPKModel(models.Model):
"""
Model to test that ordering by pk passes validation.
Refs #8291
"""
name = models.CharField(max_length=100, blank=True)
class Meta:
ordering = ('pk',)
model_errors = """invalid_models.fielderrors: "charfield": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield2": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield3": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield4": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.
invalid_models.fielderrors: "filefield": FileFields require an "upload_to" attribute.
invalid_models.fielderrors: "choices": "choices" should be iterable (e.g., a tuple or list).
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "index": "db_index" should be either None, True or False.
invalid_models.fielderrors: "field_": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.
invalid_models.fielderrors: "nullbool": BooleanFields do not accept null values. Use a NullBooleanField instead.
invalid_models.clash1: Accessor for field 'foreign' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for field 'foreign' clashes with related m2m field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Reverse query name for field 'foreign' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with related field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Reverse query name for m2m field 'm2m' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clashforeign: Accessor for field 'foreign' clashes with field 'Target2.clashforeign_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clashm2m: Accessor for m2m field 'm2m' clashes with m2m field 'Target2.clashm2m_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.selfclashforeign: Accessor for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign_set'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Reverse query name for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Accessor for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Accessor for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'selfclashm2m_set' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_3' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_4' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.missingrelations: 'rel1' has a relation with model Rel1, which has either not been installed or is abstract.
invalid_models.missingrelations: 'rel2' has an m2m relation with model Rel2, which has either not been installed or is abstract.
invalid_models.grouptwo: 'primary' is a manually-defined m2m relation through model Membership, which does not have foreign keys to Person and GroupTwo
invalid_models.grouptwo: 'secondary' is a manually-defined m2m relation through model MembershipMissingFK, which does not have foreign keys to Group and GroupTwo
invalid_models.missingmanualm2mmodel: 'missing_m2m' specifies an m2m relation through model MissingM2MModel, which has not been installed
invalid_models.group: The model Group has two manually-defined m2m relations through the model Membership, which is not permitted. Please consider using an extra field on your intermediary model instead.
invalid_models.group: Intermediary model RelationshipDoubleFK has more than one foreign key to Person, which is ambiguous and is not permitted.
invalid_models.personselfrefm2m: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.personselfrefm2m: Intermediary model RelationshipTripleFK has more than two foreign keys to PersonSelfRefM2M, which is ambiguous and is not permitted.
invalid_models.personselfrefm2mexplicit: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.abstractrelationmodel: 'fk1' has a relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.abstractrelationmodel: 'fk2' has an m2m relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.uniquem2m: ManyToManyFields cannot be unique. Remove the unique argument on 'unique_people'.
invalid_models.nonuniquefktarget1: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonuniquefktarget2: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonexistingorderingwithsingleunderscore: "ordering" refers to "does_not_exist", a field that doesn't exist.
invalid_models.invalidsetnull: 'fk' specifies on_delete=SET_NULL, but cannot be null.
invalid_models.invalidsetdefault: 'fk' specifies on_delete=SET_DEFAULT, but has no default value.
"""
if not connection.features.interprets_empty_strings_as_nulls:
model_errors += """invalid_models.primarykeynull: "my_pk_field": Primary key fields cannot have null=True.
"""
| 73.356546
| 214
| 0.793924
|
ab09bf83d4fa5149f01f0a01c26fdbc04f8e91ff
| 6,963
|
py
|
Python
|
tests/unit_tests/test_report.py
|
ljhopkins2/sqlfmt
|
439811ada91e6a274b2b757c452f5140a05ecc06
|
[
"Apache-2.0"
] | 36
|
2021-11-02T04:08:22.000Z
|
2022-03-30T14:47:49.000Z
|
tests/unit_tests/test_report.py
|
ljhopkins2/sqlfmt
|
439811ada91e6a274b2b757c452f5140a05ecc06
|
[
"Apache-2.0"
] | 85
|
2021-11-01T19:22:59.000Z
|
2022-03-31T03:33:41.000Z
|
tests/unit_tests/test_report.py
|
ljhopkins2/sqlfmt
|
439811ada91e6a274b2b757c452f5140a05ecc06
|
[
"Apache-2.0"
] | 1
|
2022-01-30T23:20:52.000Z
|
2022-01-30T23:20:52.000Z
|
from pathlib import Path
from typing import List
import pytest
from sqlfmt.api import SqlFormatResult
from sqlfmt.mode import Mode
from sqlfmt.report import Report
@pytest.fixture
def no_change_results() -> List[SqlFormatResult]:
results = [
SqlFormatResult(
source_path=Path("~/path/to/file.sql"),
source_string="select * from my_table\n",
formatted_string="select * from my_table\n",
),
SqlFormatResult(
source_path=Path("~/path/to/another_file.sql"),
source_string="select * from my_table where true\n",
formatted_string="select * from my_table where true\n",
),
]
return results
@pytest.fixture
def changed_results() -> List[SqlFormatResult]:
results = [
SqlFormatResult(
source_path=Path("~/path/to/file.sql"),
source_string="select * from my_table\n",
formatted_string="select * from my_table\n",
),
SqlFormatResult(
source_path=Path("~/path/to/another_file.sql"),
source_string="SELECT * from my_table where true",
formatted_string="select * from my_table where true\n",
),
SqlFormatResult(
source_path=Path("~/path/to/yet_another_file.sql"),
source_string="select a,\n b\n * from my_table where \n a = b\n",
formatted_string="select a, b from my_table where a = b\n",
),
]
return results
def test_no_change_report(
no_change_results: List[SqlFormatResult], default_mode: Mode
) -> None:
report = Report(no_change_results, default_mode)
assert report
assert str(report) == "2 files left unchanged."
def test_no_change_verbose_report(
no_change_results: List[SqlFormatResult], verbose_mode: Mode
) -> None:
report = Report(no_change_results, verbose_mode)
assert report
expected_report = (
"2 files left unchanged.\n"
f"{Path('~/path/to/another_file.sql')} left unchanged.\n"
f"{Path('~/path/to/file.sql')} left unchanged."
)
assert str(report) == expected_report
def test_changed_report_default_mode(
changed_results: List[SqlFormatResult], default_mode: Mode
) -> None:
report = Report(changed_results, default_mode)
assert report
expected_report = (
"\x1b[1m2 files formatted.\x1b[0m\n"
"1 file left unchanged.\n"
f"{Path('~/path/to/another_file.sql')} formatted.\n"
f"{Path('~/path/to/yet_another_file.sql')} formatted."
)
assert str(report) == expected_report
def test_changed_report_verbose_mode(
changed_results: List[SqlFormatResult], verbose_mode: Mode
) -> None:
report = Report(changed_results, verbose_mode)
assert report
expected_report = (
"\x1b[1m2 files formatted.\x1b[0m\n"
"1 file left unchanged.\n"
f"{Path('~/path/to/another_file.sql')} formatted.\n"
f"{Path('~/path/to/yet_another_file.sql')} formatted.\n"
f"{Path('~/path/to/file.sql')} left unchanged."
)
assert str(report) == expected_report
def test_changed_report_check_mode(
changed_results: List[SqlFormatResult], check_mode: Mode
) -> None:
report = Report(changed_results, check_mode)
assert report
expected_report = (
"\x1b[1m2 files failed formatting check.\x1b[0m\n"
"1 file passed formatting check.\n"
f"{Path('~/path/to/another_file.sql')} failed formatting check.\n"
f"{Path('~/path/to/yet_another_file.sql')} failed formatting check."
)
assert str(report) == expected_report
def test_changed_report_verbose_check_mode(
changed_results: List[SqlFormatResult], verbose_check_mode: Mode
) -> None:
report = Report(changed_results, verbose_check_mode)
assert report
expected_report = (
"\x1b[1m2 files failed formatting check.\x1b[0m\n"
"1 file passed formatting check.\n"
f"{Path('~/path/to/another_file.sql')} failed formatting check.\n"
f"{Path('~/path/to/yet_another_file.sql')} failed formatting check.\n"
f"{Path('~/path/to/file.sql')} passed formatting check."
)
assert str(report) == expected_report
def test_no_change_report_check_mode(
no_change_results: List[SqlFormatResult], check_mode: Mode
) -> None:
report = Report(no_change_results, check_mode)
assert report
assert str(report) == "2 files passed formatting check."
def test_no_change_report_diff_mode(
no_change_results: List[SqlFormatResult], diff_mode: Mode
) -> None:
report = Report(no_change_results, diff_mode)
assert report
assert str(report) == "2 files passed formatting check."
def test_changed_report_diff_mode(
changed_results: List[SqlFormatResult], diff_mode: Mode
) -> None:
report = Report(changed_results, diff_mode)
expected_report = (
"\x1b[1m2 files failed formatting check.\x1b[0m\n"
"1 file passed formatting check.\n"
f"{Path('~/path/to/another_file.sql')} failed formatting check.\n"
"\x1b[31m\x1b[22m--- source_query\n"
"\x1b[0m\x1b[32m\x1b[22m+++ formatted_query\n"
"\x1b[0m\x1b[36m\x1b[22m@@ -1 +1 @@\n"
"\x1b[0m\x1b[31m\x1b[22m-SELECT * from my_table where true\n"
"\x1b[0m\\ No newline at end of file\n"
"\x1b[32m\x1b[22m+select * from my_table where true\n"
"\x1b[0m\n"
f"{Path('~/path/to/yet_another_file.sql')} failed formatting check.\n"
"\x1b[31m\x1b[22m--- source_query\n"
"\x1b[0m\x1b[32m\x1b[22m+++ formatted_query\n"
"\x1b[0m\x1b[36m\x1b[22m@@ -1,4 +1 @@\n"
"\x1b[0m\x1b[31m\x1b[22m-select a,\n"
"\x1b[0m\x1b[31m\x1b[22m- b\n"
"\x1b[0m\x1b[31m\x1b[22m- * from my_table where \n"
"\x1b[0m\x1b[31m\x1b[22m- a = b\n"
"\x1b[0m\x1b[32m\x1b[22m+select a, b from my_table where a = b\n"
"\x1b[0m"
)
assert report
assert str(report) == expected_report
def test_changed_report_no_color_diff_mode(
changed_results: List[SqlFormatResult], no_color_diff_mode: Mode
) -> None:
report = Report(changed_results, no_color_diff_mode)
expected_report = (
"2 files failed formatting check.\n"
"1 file passed formatting check.\n"
f"{Path('~/path/to/another_file.sql')} failed formatting check.\n"
"--- source_query\n"
"+++ formatted_query\n"
"@@ -1 +1 @@\n"
"-SELECT * from my_table where true\n"
"\\ No newline at end of file\n"
"+select * from my_table where true\n"
"\n"
f"{Path('~/path/to/yet_another_file.sql')} failed formatting check.\n"
"--- source_query\n"
"+++ formatted_query\n"
"@@ -1,4 +1 @@\n"
"-select a,\n"
"- b\n"
"- * from my_table where \n"
"- a = b\n"
"+select a, b from my_table where a = b\n"
""
)
assert report
assert str(report) == expected_report
| 34.132353
| 78
| 0.637082
|
b87020f297335880d8429d3cb7a34e720f16a5e6
| 3,546
|
py
|
Python
|
neural_spline_flows/nde/transforms/splines/linear.py
|
VincentStimper/nsf
|
6bde505639ebcb67bffa227ea0021e3de235e03d
|
[
"MIT"
] | null | null | null |
neural_spline_flows/nde/transforms/splines/linear.py
|
VincentStimper/nsf
|
6bde505639ebcb67bffa227ea0021e3de235e03d
|
[
"MIT"
] | null | null | null |
neural_spline_flows/nde/transforms/splines/linear.py
|
VincentStimper/nsf
|
6bde505639ebcb67bffa227ea0021e3de235e03d
|
[
"MIT"
] | null | null | null |
import math
import torch
from torch.nn import functional as F
import numpy as np
from neural_spline_flows import utils
from neural_spline_flows.nde import transforms
def unconstrained_linear_spline(inputs, unnormalized_pdf,
inverse=False,
tail_bound=1.,
tails='linear'):
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
outside_interval_mask = ~inside_interval_mask
outputs = torch.zeros_like(inputs)
logabsdet = torch.zeros_like(inputs)
if tails == 'linear':
outputs[outside_interval_mask] = inputs[outside_interval_mask]
logabsdet[outside_interval_mask] = 0
else:
raise RuntimeError('{} tails are not implemented.'.format(tails))
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = linear_spline(
inputs=inputs[inside_interval_mask],
unnormalized_pdf=unnormalized_pdf[inside_interval_mask, :],
inverse=inverse,
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound
)
return outputs, logabsdet
def linear_spline(inputs, unnormalized_pdf,
inverse=False,
left=0., right=1., bottom=0., top=1.):
"""
Reference:
> Müller et al., Neural Importance Sampling, arXiv:1808.03856, 2018.
"""
if not inverse and (torch.min(inputs) < left or torch.max(inputs) > right):
raise transforms.InputOutsideDomain()
elif inverse and (torch.min(inputs) < bottom or torch.max(inputs) > top):
raise transforms.InputOutsideDomain()
if inverse:
inputs = (inputs - bottom) / (top - bottom)
else:
inputs = (inputs - left) / (right - left)
num_bins = unnormalized_pdf.size(-1)
pdf = F.softmax(unnormalized_pdf, dim=-1)
cdf = torch.cumsum(pdf, dim=-1)
cdf[..., -1] = 1.
cdf = F.pad(cdf, pad=(1, 0), mode='constant', value=0.0)
if inverse:
inv_bin_idx = utils.searchsorted(cdf, inputs)
bin_boundaries = (torch.linspace(0, 1, num_bins+1)
.view([1] * inputs.dim() + [-1])
.expand(*inputs.shape, -1))
slopes = ((cdf[..., 1:] - cdf[..., :-1])
/ (bin_boundaries[..., 1:] - bin_boundaries[..., :-1]))
offsets = cdf[..., 1:] - slopes * bin_boundaries[..., 1:]
inv_bin_idx = inv_bin_idx.unsqueeze(-1)
input_slopes = slopes.gather(-1, inv_bin_idx)[..., 0]
input_offsets = offsets.gather(-1, inv_bin_idx)[..., 0]
outputs = (inputs - input_offsets) / input_slopes
outputs = torch.clamp(outputs, 0, 1)
logabsdet = -torch.log(input_slopes)
else:
bin_pos = inputs * num_bins
bin_idx = torch.floor(bin_pos).long()
bin_idx[bin_idx >= num_bins] = num_bins - 1
alpha = bin_pos - bin_idx.float()
input_pdfs = pdf.gather(-1, bin_idx[..., None])[..., 0]
outputs = cdf.gather(-1, bin_idx[..., None])[..., 0]
outputs += alpha * input_pdfs
outputs = torch.clamp(outputs, 0, 1)
bin_width = 1.0 / num_bins
logabsdet = torch.log(input_pdfs) - np.log(bin_width)
if inverse:
outputs = outputs * (right - left) + left
logabsdet = logabsdet - math.log(top - bottom) + math.log(right - left)
else:
outputs = outputs * (top - bottom) + bottom
logabsdet = logabsdet + math.log(top - bottom) - math.log(right - left)
return outputs, logabsdet
| 33.771429
| 83
| 0.601241
|
f48317cdbaefddc652c16b156c48efb6943f9586
| 28,694
|
py
|
Python
|
src/olympia/lib/crypto/tests/test_packaged.py
|
akanksha1612/addons-server
|
b125ad213a513bcbd97805105d862b400fbf9720
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/lib/crypto/tests/test_packaged.py
|
akanksha1612/addons-server
|
b125ad213a513bcbd97805105d862b400fbf9720
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/lib/crypto/tests/test_packaged.py
|
akanksha1612/addons-server
|
b125ad213a513bcbd97805105d862b400fbf9720
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import base64
import hashlib
import os
import shutil
import tempfile
import zipfile
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.test.utils import override_settings
import mock
import pytest
import responses
from signing_clients.apps import SignatureInfo
from waffle.models import Flag
from olympia import amo
from olympia.addons.models import AddonUser
from olympia.amo.tests import TestCase, create_flag
from olympia.files.utils import extract_xpi
from olympia.lib.crypto import packaged, tasks
from olympia.versions.compare import version_int
@override_settings(
SIGNING_SERVER='http://signing.server',
ENABLE_ADDON_SIGNING=True)
class TestPackagedTrunion(TestCase):
def setUp(self):
super(TestPackagedTrunion, self).setUp()
# Change addon file name
self.addon = amo.tests.addon_factory()
self.addon.update(guid='xxxxx')
self.version = self.addon.current_version
self.file_ = self.version.all_files[0]
self.file_.update(filename='addon-a.xpi')
# Add actual file to addons
if not os.path.exists(os.path.dirname(self.file_.file_path)):
os.makedirs(os.path.dirname(self.file_.file_path))
fp = zipfile.ZipFile(self.file_.file_path, 'w')
fp.writestr('install.rdf', (
'<?xml version="1.0"?><RDF '
' xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#" '
' xmlns:em="http://www.mozilla.org/2004/em-rdf#">'
'<Description about="urn:mozilla:install-manifest">'
' <em:id>foo@jetpack</em:id>'
' <em:type>2</em:type>'
' <em:bootstrap>true</em:bootstrap>'
' <em:unpack>false</em:unpack>'
' <em:version>0.1</em:version>'
' <em:name>foo</em:name>'
' <em:description>foo bar</em:description>'
' <em:optionsType>2</em:optionsType>'
' <em:targetApplication></em:targetApplication>'
'</Description>'
'</RDF>'))
fp.close()
self._register_urls()
def tearDown(self):
if os.path.exists(self.file_.file_path):
os.unlink(self.file_.file_path)
super(TestPackagedTrunion, self).tearDown()
def _register_urls(self):
signature_path = os.path.join(
settings.ROOT, 'src/olympia/lib/crypto/tests/',
'webextension_signed.rsa')
with open(signature_path, 'rb') as fobj:
signature = fobj.read()
responses.add(
responses.POST,
'http://signing.server/1.0/sign_addon',
json={'mozilla.rsa': base64.b64encode(signature)},
status=200)
def _sign_file(self, file_):
packaged.sign_file(file_)
def assert_not_signed(self):
assert not self.file_.is_signed
assert not self.file_.cert_serial_num
assert not self.file_.hash
assert not packaged.is_signed(self.file_.file_path)
assert not responses.calls
def assert_signed(self):
assert self.file_.is_signed
assert self.file_.cert_serial_num
assert self.file_.hash
assert packaged.is_signed(self.file_.file_path)
assert len(responses.calls) == 1
@responses.activate
def test_supports_firefox_old_not_default_to_compatible(self):
max_appversion = self.version.apps.first().max
# Old, and not default to compatible.
max_appversion.update(version='4', version_int=version_int('4'))
self.file_.update(binary_components=True, strict_compatibility=True)
self.assert_not_signed()
self._sign_file(self.file_)
self.assert_signed()
@responses.activate
def test_supports_firefox_android_old_not_default_to_compatible(self):
max_appversion = self.version.apps.first().max
# Old, and not default to compatible.
max_appversion.update(application=amo.ANDROID.id,
version='4', version_int=version_int('4'))
self.file_.update(binary_components=True, strict_compatibility=True)
self.assert_not_signed()
self._sign_file(self.file_)
self.assert_signed()
@responses.activate
def test_supports_firefox_old_default_to_compatible(self):
max_appversion = self.version.apps.first().max
# Old, and default to compatible.
max_appversion.update(version='4', version_int=version_int('4'))
self.file_.update(binary_components=False, strict_compatibility=False)
self.assert_not_signed()
self._sign_file(self.file_)
self.assert_signed()
@responses.activate
def test_supports_firefox_android_old_default_to_compatible(self):
max_appversion = self.version.apps.first().max
# Old, and default to compatible.
max_appversion.update(application=amo.ANDROID.id,
version='4', version_int=version_int('4'))
self.file_.update(binary_components=False, strict_compatibility=False)
self.assert_not_signed()
self._sign_file(self.file_)
self.assert_signed()
@responses.activate
def test_supports_firefox_recent_default_to_compatible(self):
max_appversion = self.version.apps.first().max
# Recent, default to compatible.
max_appversion.update(version='37', version_int=version_int('37'))
self.file_.update(binary_components=False, strict_compatibility=False)
self.assert_not_signed()
self._sign_file(self.file_)
self.assert_signed()
@responses.activate
def test_supports_firefox_android_recent_not_default_to_compatible(self):
max_appversion = self.version.apps.first().max
# Recent, not default to compatible.
max_appversion.update(application=amo.ANDROID.id,
version='37', version_int=version_int('37'))
self.file_.update(binary_components=True, strict_compatibility=True)
self.assert_not_signed()
self._sign_file(self.file_)
self.assert_signed()
def test_get_trunion_endpoint(self):
assert self.addon.status == amo.STATUS_PUBLIC
expected = 'http://signing.server/1.0/sign_addon'
assert (
packaged.get_trunion_endpoint(settings.SIGNING_SERVER) == expected)
def test_no_server_full(self):
with self.settings(SIGNING_SERVER=''):
self._sign_file(self.file_)
self.assert_not_signed()
@responses.activate
def test_sign_file(self):
self.assert_not_signed()
self._sign_file(self.file_)
self.assert_signed()
# Make sure there's two newlines at the end of the mozilla.sf file (see
# bug 1158938).
with zipfile.ZipFile(self.file_.file_path, mode='r') as zf:
with zf.open('META-INF/mozilla.sf', 'r') as mozillasf:
assert mozillasf.read().endswith('\n\n')
@responses.activate
def test_sign_file_non_ascii_filename(self):
src = self.file_.file_path
self.file_.update(filename=u'jétpack.xpi')
shutil.move(src, self.file_.file_path)
self.assert_not_signed()
self._sign_file(self.file_)
self.assert_signed()
def test_no_sign_missing_file(self):
os.unlink(self.file_.file_path)
assert not self.file_.is_signed
assert not self.file_.cert_serial_num
assert not self.file_.hash
self._sign_file(self.file_)
assert not self.file_.is_signed
assert not self.file_.cert_serial_num
assert not self.file_.hash
assert not packaged.is_signed(self.file_.file_path)
def test_no_sign_hotfix_addons(self):
"""Don't sign hotfix addons."""
for hotfix_guid in settings.HOTFIX_ADDON_GUIDS:
self.addon.update(guid=hotfix_guid)
self._sign_file(self.file_)
self.assert_not_signed()
def test_no_sign_again_mozilla_signed_extensions(self):
"""Don't try to resign mozilla signed extensions."""
self.file_.update(is_mozilla_signed_extension=True)
self._sign_file(self.file_)
self.assert_not_signed()
@responses.activate
def test_is_signed(self):
assert not packaged.is_signed(self.file_.file_path)
self._sign_file(self.file_)
assert packaged.is_signed(self.file_.file_path)
@responses.activate
def test_size_updated(self):
unsigned_size = storage.size(self.file_.file_path)
self._sign_file(self.file_)
signed_size = storage.size(self.file_.file_path)
assert self.file_.size == signed_size
assert unsigned_size < signed_size
@responses.activate
def test_sign_file_multi_package(self):
fpath = 'src/olympia/files/fixtures/files/multi-package.xpi'
with amo.tests.copy_file(fpath, self.file_.file_path, overwrite=True):
self.file_.update(is_multi_package=True)
self.assert_not_signed()
self._sign_file(self.file_)
self.assert_not_signed()
# The multi-package itself isn't signed.
assert not packaged.is_signed(self.file_.file_path)
# The internal extensions aren't either.
folder = tempfile.mkdtemp(dir=settings.TMP_PATH)
try:
extract_xpi(self.file_.file_path, folder)
# The extension isn't.
assert not packaged.is_signed(
os.path.join(folder, 'random_extension.xpi'))
# And the theme isn't either.
assert not packaged.is_signed(
os.path.join(folder, 'random_theme.xpi'))
finally:
amo.utils.rm_local_tmp_dir(folder)
@responses.activate
def test_call_signing(self):
packaged.call_signing(self.file_)
call = responses.calls[0].request
assert call.url == 'http://signing.server/1.0/sign_addon'
assert 'name="addon_id"\r\n\r\nxxxxx' in call.body
assert (
'name="file"; filename="mozilla.sf"\r\n\r\n'
'Signature-Version: 1.0\n'
'MD5-Digest-Manifest: UrEJ9n5q8I9UW2KlFUJDkA==\n'
'SHA1-Digest-Manifest: lTdbRmVMF7o/C+BT9GnMQne2Ap4=') in call.body
@responses.activate
def test_call_signing_too_long_guid_bug_1203365(self):
long_guid = 'x' * 65
hashed = hashlib.sha256(long_guid).hexdigest()
self.addon.update(guid=long_guid)
packaged.call_signing(self.file_)
call = responses.calls[0].request
assert call.url == 'http://signing.server/1.0/sign_addon'
assert 'name="addon_id"\r\n\r\n{0}'.format(hashed) in call.body
assert (
'name="file"; filename="mozilla.sf"\r\n\r\n'
'Signature-Version: 1.0\n'
'MD5-Digest-Manifest: UrEJ9n5q8I9UW2KlFUJDkA==\n'
'SHA1-Digest-Manifest: lTdbRmVMF7o/C+BT9GnMQne2Ap4=') in call.body
def test_get_id_short_guid(self):
assert len(self.addon.guid) <= 64
assert len(packaged.get_id(self.addon)) <= 64
assert packaged.get_id(self.addon) == self.addon.guid
def test_get_id_longest_allowed_guid_bug_1203365(self):
long_guid = 'x' * 64
self.addon.update(guid=long_guid)
assert packaged.get_id(self.addon) == self.addon.guid
def test_get_id_long_guid_bug_1203365(self):
long_guid = 'x' * 65
hashed = hashlib.sha256(long_guid).hexdigest()
self.addon.update(guid=long_guid)
assert len(self.addon.guid) > 64
assert len(packaged.get_id(self.addon)) <= 64
assert packaged.get_id(self.addon) == hashed
@override_settings(ENABLE_ADDON_SIGNING=True)
class TestPackagedAutograph(TestPackagedTrunion):
def setUp(self):
create_flag('activate-autograph-signing')
super(TestPackagedAutograph, self).setUp()
def tearDown(self):
Flag.objects.filter(name='activate-autograph-signing').delete()
super(TestPackagedAutograph, self).tearDown()
def _register_urls(self):
responses.add_passthru(settings.AUTOGRAPH_CONFIG['server_url'])
def _get_signature_info(self):
with zipfile.ZipFile(self.file_.file_path, mode='r') as zobj:
with zobj.open('META-INF/mozilla.rsa', 'r') as fobj:
pkcs7 = fobj.read()
return SignatureInfo(pkcs7)
def _sign_file(self, file_):
packaged.sign_file(file_, use_autograph=True)
def assert_not_signed(self):
# Overwritten to not rely on `responses` but check the real deal
assert not self.file_.is_signed
assert not self.file_.cert_serial_num
assert not self.file_.hash
assert not packaged.is_signed(self.file_.file_path)
def assert_signed(self):
# Overwritten to not rely on `responses` but check the real deal
assert self.file_.is_signed
assert self.file_.cert_serial_num
assert self.file_.hash
assert packaged.is_signed(self.file_.file_path)
def test_no_server_full(self):
# Test not needed for autograph
return
def test_call_signing(self):
self._sign_file(self.file_)
signature_info = self._get_signature_info()
subject_info = signature_info.signer_certificate['subject']
assert subject_info['common_name'] == 'xxxxx'
def test_call_signing_too_long_guid_bug_1203365(self):
long_guid = 'x' * 65
hashed = hashlib.sha256(long_guid).hexdigest()
self.addon.update(guid=long_guid)
self._sign_file(self.file_)
signature_info = self._get_signature_info()
subject_info = signature_info.signer_certificate['subject']
assert subject_info['common_name'] == hashed
class TestTasks(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestTasks, self).setUp()
self.addon = amo.tests.addon_factory(version_kw={'version': '1.3'})
self.version = self.addon.current_version
# Make sure our file/version is at least compatible with FF
# '37'.
self.max_appversion = self.version.apps.first().max
self.set_max_appversion('37')
self.file_ = self.version.all_files[0]
self.file_.update(filename='jetpack.xpi')
def tearDown(self):
if os.path.exists(self.get_backup_file_path()):
os.unlink(self.get_backup_file_path())
super(TestTasks, self).tearDown()
def get_backup_file_path(self):
return u'{0}.backup_signature'.format(self.file_.file_path)
def set_max_appversion(self, version):
"""Set self.max_appversion to the given version."""
self.max_appversion.update(version=version,
version_int=version_int(version))
def assert_backup(self):
"""Make sure there's a backup file."""
assert os.path.exists(self.get_backup_file_path())
def assert_no_backup(self):
"""Make sure there's no backup file."""
assert not os.path.exists(self.get_backup_file_path())
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_no_bump_unreviewed(self, mock_sign_file):
"""Don't bump nor sign unreviewed files."""
for status in (amo.UNREVIEWED_FILE_STATUSES + (amo.STATUS_BETA,)):
self.file_.update(status=status)
fpath = 'src/olympia/files/fixtures/files/jetpack.xpi'
with amo.tests.copy_file(fpath, self.file_.file_path):
file_hash = self.file_.generate_hash()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
tasks.sign_addons([self.addon.pk])
assert not mock_sign_file.called
self.version.reload()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
assert file_hash == self.file_.generate_hash()
self.assert_no_backup()
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_bump_version_in_model(self, mock_sign_file):
# We want to make sure each file has been signed.
self.file2 = amo.tests.file_factory(version=self.version)
self.file2.update(filename='jetpack-b.xpi')
backup_file2_path = u'{0}.backup_signature'.format(
self.file2.file_path)
try:
fpath = 'src/olympia/files/fixtures/files/jetpack.xpi'
with amo.tests.copy_file(fpath, self.file_.file_path):
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/jetpack.xpi',
self.file2.file_path):
file_hash = self.file_.generate_hash()
file2_hash = self.file2.generate_hash()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
tasks.sign_addons([self.addon.pk])
assert mock_sign_file.call_count == 2
self.version.reload()
assert self.version.version == '1.3.1-signed'
assert self.version.version_int == version_int(
'1.3.1-signed')
assert file_hash != self.file_.generate_hash()
assert file2_hash != self.file2.generate_hash()
self.assert_backup()
assert os.path.exists(backup_file2_path)
finally:
if os.path.exists(backup_file2_path):
os.unlink(backup_file2_path)
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_sign_full(self, mock_sign_file):
"""Use the signing server if files are approved."""
self.file_.update(status=amo.STATUS_PUBLIC)
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/jetpack.xpi',
self.file_.file_path):
tasks.sign_addons([self.addon.pk])
mock_sign_file.assert_called_with(self.file_, use_autograph=False)
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_sign_supported_applications(self, mock_sign_file):
"""Make sure we sign for all supported applications."""
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/jetpack.xpi',
self.file_.file_path):
for app in (amo.ANDROID.id, amo.FIREFOX.id):
self.max_appversion.update(application=app)
tasks.sign_addons([self.addon.pk])
mock_sign_file.assert_called_with(
self.file_, use_autograph=False)
mock_sign_file.reset_mock()
def assert_not_signed(self, mock_sign_file, file_hash):
assert not mock_sign_file.called
self.version.reload()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
assert file_hash == self.file_.generate_hash()
self.assert_no_backup()
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_dont_sign_dont_bump_other_applications(self, mock_sign_file):
"""Don't sign files which are for applications we don't sign for."""
path = 'src/olympia/files/fixtures/files/jetpack.xpi'
with amo.tests.copy_file(path, self.file_.file_path):
file_hash = self.file_.generate_hash()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
apps_without_signing = [app for app in amo.APPS_ALL.keys()
if app not in packaged.SIGN_FOR_APPS]
for app in apps_without_signing:
self.max_appversion.update(application=app)
tasks.sign_addons([self.addon.pk])
self.assert_not_signed(mock_sign_file, file_hash)
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_sign_bump_non_ascii_filename(self, mock_sign_file):
"""Sign files which have non-ascii filenames."""
self.file_.update(filename=u'jétpack.xpi')
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/jetpack.xpi',
self.file_.file_path):
file_hash = self.file_.generate_hash()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
tasks.sign_addons([self.addon.pk])
assert mock_sign_file.called
self.version.reload()
assert self.version.version == '1.3.1-signed'
assert self.version.version_int == version_int('1.3.1-signed')
assert file_hash != self.file_.generate_hash()
self.assert_backup()
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_sign_bump_non_ascii_version(self, mock_sign_file):
"""Sign versions which have non-ascii version numbers."""
self.version.update(version=u'é1.3')
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/jetpack.xpi',
self.file_.file_path):
file_hash = self.file_.generate_hash()
assert self.version.version == u'é1.3'
assert self.version.version_int == version_int('1.3')
tasks.sign_addons([self.addon.pk])
assert mock_sign_file.called
self.version.reload()
assert self.version.version == u'é1.3.1-signed'
assert self.version.version_int == version_int(u'é1.3.1-signed')
assert file_hash != self.file_.generate_hash()
self.assert_backup()
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_sign_bump_old_versions_default_compat(self, mock_sign_file):
"""Sign files which are old, but default to compatible."""
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/jetpack.xpi',
self.file_.file_path):
file_hash = self.file_.generate_hash()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
self.set_max_appversion('4')
tasks.sign_addons([self.addon.pk])
assert mock_sign_file.called
self.version.reload()
assert self.version.version == '1.3.1-signed'
assert self.version.version_int == version_int('1.3.1-signed')
assert file_hash != self.file_.generate_hash()
self.assert_backup()
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_sign_bump_new_versions_not_default_compat(self, mock_sign_file):
"""Sign files which are recent, event if not default to compatible."""
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/jetpack.xpi',
self.file_.file_path):
file_hash = self.file_.generate_hash()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
self.file_.update(binary_components=True,
strict_compatibility=True)
tasks.sign_addons([self.addon.pk])
assert mock_sign_file.called
self.version.reload()
assert self.version.version == '1.3.1-signed'
assert self.version.version_int == version_int('1.3.1-signed')
assert file_hash != self.file_.generate_hash()
self.assert_backup()
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_dont_resign_dont_bump_version_in_model(self, mock_sign_file):
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/new-addon-signature.xpi',
self.file_.file_path):
self.file_.update(is_signed=True)
file_hash = self.file_.generate_hash()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
tasks.sign_addons([self.addon.pk])
assert not mock_sign_file.called
self.version.reload()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
assert file_hash == self.file_.generate_hash()
self.assert_no_backup()
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_dont_sign_dont_bump_version_bad_zipfile(self, mock_sign_file):
with amo.tests.copy_file(__file__, self.file_.file_path):
file_hash = self.file_.generate_hash()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
tasks.sign_addons([self.addon.pk])
assert not mock_sign_file.called
self.version.reload()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
assert file_hash == self.file_.generate_hash()
self.assert_no_backup()
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_dont_sign_dont_bump_sign_error(self, mock_sign_file):
mock_sign_file.side_effect = IOError()
fpath = 'src/olympia/files/fixtures/files/jetpack.xpi'
with amo.tests.copy_file(fpath, self.file_.file_path):
file_hash = self.file_.generate_hash()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
tasks.sign_addons([self.addon.pk])
assert mock_sign_file.called
self.version.reload()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
assert file_hash == self.file_.generate_hash()
self.assert_no_backup()
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_dont_bump_not_signed(self, mock_sign_file):
mock_sign_file.return_value = None # Pretend we didn't sign.
fpath = 'src/olympia/files/fixtures/files/jetpack.xpi'
with amo.tests.copy_file(fpath, self.file_.file_path):
file_hash = self.file_.generate_hash()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
tasks.sign_addons([self.addon.pk])
assert mock_sign_file.called
self.version.reload()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
assert file_hash == self.file_.generate_hash()
self.assert_no_backup()
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_resign_bump_version_in_model_if_force(self, mock_sign_file):
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/new-addon-signature.xpi',
self.file_.file_path):
self.file_.update(is_signed=True)
file_hash = self.file_.generate_hash()
assert self.version.version == '1.3'
assert self.version.version_int == version_int('1.3')
tasks.sign_addons([self.addon.pk], force=True)
assert mock_sign_file.called
self.version.reload()
assert self.version.version == '1.3.1-signed'
assert self.version.version_int == version_int('1.3.1-signed')
assert file_hash != self.file_.generate_hash()
self.assert_backup()
@mock.patch('olympia.lib.crypto.tasks.sign_file')
def test_sign_mail(self, mock_sign_file):
"""Check that an email reason can be provided."""
self.file_.update(status=amo.STATUS_PUBLIC)
AddonUser.objects.create(addon=self.addon, user_id=999)
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/jetpack.xpi',
self.file_.file_path):
tasks.sign_addons([self.addon.pk], reason='expiry')
mock_sign_file.assert_called_with(self.file_, use_autograph=False)
assert 'expiration' in mail.outbox[0].message().as_string()
@pytest.mark.parametrize(('old', 'new'), [
('1.1', '1.1.1-signed'),
('1.1.1-signed.1', '1.1.1-signed.1.1-signed'),
('1.1.1-signed', '1.1.1-signed-2'),
('1.1.1-signed-3', '1.1.1-signed-4'),
('1.1.1-signed.1-signed-16', '1.1.1-signed.1-signed-17')
])
def test_get_new_version_number(old, new):
assert tasks.get_new_version_number(old) == new
| 41.889051
| 79
| 0.637903
|
f36dec897f5598b673a91a63b8334367fd242101
| 1,633
|
py
|
Python
|
sample-demo/venv/Lib/site-packages/PyQt6/lupdate/translations.py
|
rupc/bsp-protos
|
58833e7ab9ff53f3633708fb5f95edfdd152c5ea
|
[
"Apache-2.0"
] | null | null | null |
sample-demo/venv/Lib/site-packages/PyQt6/lupdate/translations.py
|
rupc/bsp-protos
|
58833e7ab9ff53f3633708fb5f95edfdd152c5ea
|
[
"Apache-2.0"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
sample-demo/venv/Lib/site-packages/PyQt6/lupdate/translations.py
|
rupc/bsp-protos
|
58833e7ab9ff53f3633708fb5f95edfdd152c5ea
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of PyQt6.
#
# This file may be used under the terms of the GNU General Public License
# version 3.0 as published by the Free Software Foundation and appearing in
# the file LICENSE included in the packaging of this file. Please review the
# following information to ensure the GNU General Public License version 3.0
# requirements will be met: http://www.gnu.org/copyleft/gpl.html.
#
# If you do not wish to use this file under the terms of the GPL version 3.0
# then you may purchase a commercial license. For more information contact
# info@riverbankcomputing.com.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
class Context:
""" Encapsulate a message context. """
def __init__(self, name):
""" Initialise the context. """
self.name = name
self.messages = []
class EmbeddedComments:
""" Encapsulate information for a translator embedded in comments. """
def __init__(self):
""" Initialise the object. """
self.message_id = ''
self.extra_comments = []
self.extras = []
class Message:
""" Encapsulate a message. """
def __init__(self, filename, line_nr, source, comment, numerus):
""" Initialise the message. """
self.filename = filename
self.line_nr = line_nr
self.source = source
self.comment = comment
self.numerus = numerus
self.embedded_comments = EmbeddedComments()
| 31.403846
| 78
| 0.685242
|
67840a505e9240bb969e4c9e53e0341ec6912859
| 138,027
|
py
|
Python
|
tensorflow/python/keras/engine/training_v1.py
|
ProctorU/tensorflow
|
fd05051846fd9ceb090206600afd1a71ba852e20
|
[
"Apache-2.0"
] | 1
|
2020-02-15T14:00:01.000Z
|
2020-02-15T14:00:01.000Z
|
tensorflow/python/keras/engine/training_v1.py
|
alubanana/tensorflow
|
454f89ab3baacbac567d6bcceef4c743f23ce58b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/engine/training_v1.py
|
alubanana/tensorflow
|
454f89ab3baacbac567d6bcceef4c743f23ce58b
|
[
"Apache-2.0"
] | 1
|
2020-02-14T10:12:19.000Z
|
2020-02-14T10:12:19.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""V1 Training-related part of the Keras engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import composite_tensor_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.engine import network
from tensorflow.python.keras.engine import training as training_lib
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_distributed
from tensorflow.python.keras.engine import training_eager
from tensorflow.python.keras.engine import training_generator
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.engine import training_v2
from tensorflow.python.keras.engine import training_v2_utils
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.saving.saved_model import model_serialization
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import util as tf_losses_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
try:
from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
except ImportError:
issparse = None
_keras_api_gauge = monitoring.BoolGauge('/tensorflow/api/keras/model_v1',
'keras model v1 usage', 'method')
class Model(training_lib.Model):
"""`Model` groups layers into an object with training and inference features.
There are two ways to instantiate a `Model`:
1 - With the "functional API", where you start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
2 - By subclassing the `Model` class: in that case, you should define your
layers in `__init__` and you should implement the model's forward pass
in `call`.
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call`, which you can use to specify
a different behavior in training and inference:
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
if training:
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
"""
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
_keras_api_gauge.get_cell('model_v1').set(True)
# initializing _distribution_strategy here since it is possible to call
# predict on a model without compiling it.
self._distribution_strategy = None
self._compile_time_distribution_strategy = None
if (ops.executing_eagerly_outside_functions() and
distribution_strategy_context.has_strategy()):
self._set_strategy(
distribution_strategy_context.get_strategy())
# This flag is used to track if the user is using the deprecated path of
# passing distribution strategy to compile rather than creating the model
# under distribution strategy scope.
self._compile_distribution = False
self._run_eagerly = None
self._experimental_run_tf_function = (
ops.executing_eagerly_outside_functions())
@trackable.no_automatic_dependency_tracking
def _set_strategy(self, strategy):
self._compile_time_distribution_strategy = strategy
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays.
"""
strategy = (self._distribution_strategy or
self._compile_time_distribution_strategy)
if strategy:
with strategy.scope():
return network.Network.get_weights(self)
return network.Network.get_weights(self)
def load_weights(self, filepath, by_name=False, skip_mismatch=False):
"""Loads all layer weights, either from a TensorFlow or an HDF5 weight file.
If `by_name` is False weights are loaded based on the network's
topology. This means the architecture should be the same as when the weights
were saved. Note that layers that don't have weights are not taken into
account in the topological ordering, so adding or removing layers is fine as
long as they don't have weights.
If `by_name` is True, weights are loaded into layers only if they share the
same name. This is useful for fine-tuning or transfer-learning models where
some of the layers have changed.
Only topological loading (`by_name=False`) is supported when loading weights
from the TensorFlow format. Note that topological loading differs slightly
between TensorFlow and HDF5 formats for user-defined classes inheriting from
`tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the
TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor.
Arguments:
filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed
to `save_weights`).
by_name: Boolean, whether to load weights by name or by topological
order. Only topological loading is supported for weight files in
TensorFlow format.
skip_mismatch: Boolean, whether to skip loading of layers where there is
a mismatch in the number of weights, or a mismatch in the shape of
the weight (only valid when `by_name=True`).
Returns:
When loading a weight file in TensorFlow format, returns the same status
object as `tf.train.Checkpoint.restore`. When graph building, restore
ops are run automatically as soon as the network is built (on first call
for user-defined classes inheriting from `Model`, immediately if it is
already built).
When loading weights in HDF5 format, returns `None`.
Raises:
ImportError: If h5py is not available and the weight file is in HDF5
format.
ValueError: If `skip_mismatch` is set to `True` when `by_name` is
`False`.
"""
if distributed_training_utils.is_tpu_strategy(self._distribution_strategy):
if (self._distribution_strategy.extended.steps_per_run > 1 and
(not network._is_hdf5_filepath(filepath))): # pylint: disable=protected-access
raise ValueError('Load weights is not yet supported with TPUStrategy '
'with steps_per_run greater than 1.')
return super(Model, self).load_weights(filepath, by_name, skip_mismatch)
@trackable.no_automatic_dependency_tracking
def compile(self,
optimizer='rmsprop',
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
distribute=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer instance.
See `tf.keras.optimizers`.
loss: String (name of objective function), objective function or
`tf.keras.losses.Loss` instance. See `tf.keras.losses`. An objective
function is any callable with the signature
`scalar_loss = fn(y_true, y_pred)`. If the model has multiple
outputs, you can use a different loss on each output by passing a
dictionary or a list of losses. The loss value that will be
minimized by the model will then be the sum of all individual
losses.
metrics: List of metrics to be evaluated by the model during training
and testing. Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary, such as
`metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`.
You can also pass a list (len = len(outputs)) of lists of metrics
such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or
`metrics=['accuracy', ['accuracy', 'mse']]`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
distribute: NOT SUPPORTED IN TF 2.0, please create and compile the
model under distribution strategy scope instead of passing it to
compile.
**kwargs: Any additional arguments.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
self._run_eagerly = kwargs.pop('run_eagerly', None)
self._experimental_run_tf_function = kwargs.pop(
'experimental_run_tf_function', True)
# Prepare Session arguments (legacy).
kwargs.pop('cloning', None) # Legacy DistStrat argument, never used.
allowed_kwargs = {'feed_dict', 'fetches', 'options', 'run_metadata'}
unknown_kwargs = set(kwargs.keys()) - allowed_kwargs
if unknown_kwargs:
raise TypeError(
'Invalid keyword argument(s) in `compile`: %s' % (unknown_kwargs,))
self._function_kwargs = kwargs
if self._function_kwargs:
self._experimental_run_tf_function = False
if self.run_eagerly:
raise ValueError(
'Session keyword arguments are not supported '
'when `run_eagerly=True`. You passed the following '
'Session arguments: %s' % (self._function_kwargs,))
self._set_optimizer(optimizer)
is_any_keras_optimizer_v1 = any(
(isinstance(opt, optimizers.Optimizer)
and not isinstance(opt, optimizers.TFOptimizer)
) for opt in nest.flatten(self.optimizer))
if is_any_keras_optimizer_v1 and ops.executing_eagerly_outside_functions():
raise ValueError('`tf.compat.v1.keras` Optimizer (', optimizer, ') is '
'not supported when eager execution is enabled. Use a '
'`tf.keras` Optimizer instead, or disable eager '
'execution.')
if ((target_tensors is not None)
or not ops.executing_eagerly_outside_functions()):
# Fallback out of things that aren't supported with v2 loops
self._experimental_run_tf_function = False
if distribute is not None:
if tf2.enabled() or self._experimental_run_tf_function:
raise ValueError(
'Distribute argument in compile is not available in TF 2.0 please '
'create the model under the distribution strategy scope.')
logging.warning('Distribute argument in compile is deprecated please '
'create the model under the distribution strategy scope.')
self._distribution_strategy = distribute
self._compile_distribution = True
else:
if distribution_strategy_context.has_strategy():
# When the user builds the model in the DS scope and cross replica
# context we want distribution strategy to be set but when building the
# replica copies of the models internally we should not be compiling
# with distribution strategy and use the default compilation path.
if distribution_strategy_context.in_cross_replica_context():
self._distribution_strategy = (
distribution_strategy_context.get_strategy())
if not self._experimental_run_tf_function:
self._validate_compile_param_for_distribution_strategy(self.run_eagerly,
sample_weight_mode,
target_tensors,
weighted_metrics)
# We've disabled automatic dependency tracking for this method, but do want
# to add a checkpoint dependency on the optimizer if it's trackable.
if isinstance(self.optimizer, trackable.Trackable):
self._track_trackable(
self.optimizer, name='optimizer', overwrite=True)
self.loss = loss or {}
self.loss_weights = loss_weights
self.sample_weight_mode = sample_weight_mode
self._compile_metrics = metrics or []
self._compile_weighted_metrics = weighted_metrics
if self.run_eagerly and target_tensors is not None:
raise ValueError(
'target_tensors argument is not supported when '
'running a model eagerly.')
# _training_endpoints contains a list of _TrainingEndpoint object, which has
# all the model output/target/loss and related metadata.
self._training_endpoints = []
# Used to freeze the behavior of the Model once `compile` has been called.
self._compiled_trainable_state = self._get_trainable_state()
# Set tf.distribute.Strategy specific parameters.
self._distributed_model_cache = {}
self._distributed_function_cache = {}
# Clear any `_eager_losses` that was added.
self._clear_losses()
if (not context.executing_eagerly() and
self._distribution_strategy is not None):
# Ensures a Session is created and configured correctly for Distribution
# Strategy.
K.configure_and_create_distributed_session(self._distribution_strategy)
# Initialize model metric attributes.
self._init_metric_attributes()
if not self.built or not self.inputs or not self.outputs:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
self._is_compiled = True
_keras_api_gauge.get_cell('compile_v1').set(True)
# Prepare list of loss functions, same size of model outputs.
self.loss_functions = training_utils.prepare_loss_functions(
self.loss, self.output_names)
target_tensors = self._process_target_tensor_for_compile(target_tensors)
for o, n, l, t in zip(self.outputs, self.output_names,
self.loss_functions, target_tensors):
endpoint = _TrainingEndpoint(o, n, l)
endpoint.create_training_target(t, run_eagerly=self.run_eagerly)
self._training_endpoints.append(endpoint)
# Prepare list loss weights, same size of model outputs.
training_utils.prepare_loss_weights(self._training_endpoints, loss_weights)
# Initialization for Eager mode execution.
if self.run_eagerly:
self._compile_eagerly(metrics, weighted_metrics, sample_weight_mode)
return
with K.get_graph().as_default():
# Save all metric attributes per output of the model.
self._cache_output_metric_attributes(metrics, weighted_metrics)
# Set metric attributes on model.
self._set_metric_attributes()
# Invoke metric functions (unweighted) for all the outputs.
self._handle_metrics(
self.outputs,
targets=self._targets,
skip_target_masks=self._prepare_skip_target_masks(),
masks=self._prepare_output_masks())
# Prepare sample weight modes. List with the same length as model outputs.
training_utils.prepare_sample_weight_modes(
self._training_endpoints, sample_weight_mode)
# Creates the model loss and weighted metrics sub-graphs.
self._compile_weights_loss_and_weighted_metrics()
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
self._collected_trainable_weights = self.trainable_weights
# Validate all variables were correctly created in distribution scope.
if self._distribution_strategy and not self._compile_distribution:
for v in self.variables:
strategy = self._distribution_strategy
if not strategy.extended.variable_created_in_scope(v):
raise ValueError(
'Variable (%s) was not created in the distribution strategy '
'scope of (%s). It is most likely due to not all layers or '
'the model or optimizer being created outside the distribution '
'strategy scope. Try to make sure your code looks similar '
'to the following.\n'
'with strategy.scope():\n'
' model=_create_model()\n'
' model.compile(...)'% (v, strategy))
@trackable.no_automatic_dependency_tracking
def _init_distributed_function_cache_if_not_compiled(self):
if not hasattr(self, '_distributed_function_cache'):
self._distributed_function_cache = {}
@property
def metrics(self):
"""Returns the model's metrics added using `compile`, `add_metric` APIs."""
metrics = []
if self._is_compiled:
metrics += self._compile_metric_functions
metrics.extend(self._metrics)
metrics.extend(_get_metrics_from_layers(self._layers))
return metrics
@property
def metrics_names(self):
"""Returns the model's display labels for all outputs."""
# This property includes all output names including `loss` and per-output
# losses for backward compatibility.
metrics_names = ['loss']
if self._is_compiled:
# Add output loss metric names to the metric names list.
if len(self._training_endpoints) > 1:
metrics_names.extend([
e.loss_name()
for e in self._training_endpoints
if not e.should_skip_target()
])
# Add all metric names.
metrics_names += [m.name for m in self.metrics]
return metrics_names
@property
def run_eagerly(self):
"""Settable attribute indicating whether the model should run eagerly.
Running eagerly means that your model will be run step by step,
like Python code. Your model might run slower, but it should become easier
for you to debug it by stepping into individual layer calls.
By default, we will attempt to compile your model to a static graph to
deliver the best execution performance.
Returns:
Boolean, whether the model should run eagerly.
"""
if self._run_eagerly is True and not context.executing_eagerly():
raise ValueError('You can only set `run_eagerly=True` if eager execution '
'is enabled.')
if not self.dynamic:
if self._run_eagerly is None:
# Respect `tf.config.experimental_run_functions_eagerly` unless
# `run_eagerly` was explicitly passed to `compile`.
return def_function.RUN_FUNCTIONS_EAGERLY
else:
return self._run_eagerly
else:
if not context.executing_eagerly():
raise ValueError('Your model contains layers that can only be '
'successfully run in eager execution (layers '
'constructed with `dynamic=True`). '
'You must enable eager execution with '
'`tf.enable_eager_execution()`.')
if self._run_eagerly is False:
# TODO(fchollet): consider using py_func to enable this.
raise ValueError('Your model contains layers that can only be '
'successfully run in eager execution (layers '
'constructed with `dynamic=True`). '
'You cannot set `run_eagerly=False`.')
return context.executing_eagerly()
@run_eagerly.setter
def run_eagerly(self, value):
self._run_eagerly = value
def _select_training_loop(self, inputs):
"""Select training loop for fit/eval/predict based on the inputs."""
# TODO(kaftan) or TODO(scottzhu): This check should eventually be nicely
# integrated into the data adapters in the v2 loop. We can't do this yet
# because we currently have to fall back for unhandled data types.
if isinstance(inputs, (iterator_ops.Iterator,
iterator_ops.OwnedIterator)):
raise ValueError('For performance reasons Keras `fit`, `evaluate` and'
'`predict` accept tf.data `Datasets` as input but not '
'iterators that have been manually generated from '
'Datasets by users. Please directly pass in the '
'original `Dataset` object instead of passing in '
'`iter(dataset)`.')
# Experiment training loop with default DS path.
if context.executing_eagerly() and self._experimental_run_tf_function:
if self._in_multi_worker_mode():
return training_distributed.DistributionMultiWorkerTrainingLoop(
training_v2.Loop())
else:
return training_v2.Loop()
# Case 1: distribution strategy.
if self._distribution_strategy:
if self._in_multi_worker_mode():
return training_distributed.DistributionMultiWorkerTrainingLoop(
training_distributed.DistributionSingleWorkerTrainingLoop())
else:
return training_distributed.DistributionSingleWorkerTrainingLoop()
# Case 2: generator-like. Input is Python generator, or Sequence object,
# or a non-distributed Dataset or iterator in eager execution.
if data_utils.is_generator_or_sequence(inputs):
return training_generator.GeneratorOrSequenceTrainingLoop()
if training_utils.is_eager_dataset_or_iterator(inputs):
return training_generator.EagerDatasetOrIteratorTrainingLoop()
# Case 3: Symbolic tensors or Numpy array-like.
# This includes Datasets and iterators in graph mode (since they
# generate symbolic tensors).
if self.run_eagerly:
return training_generator.GeneratorLikeTrainingLoop()
else:
return training_arrays.ArrayLikeTrainingLoop()
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset. Should return a tuple
of either `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A generator or `keras.utils.Sequence` returning `(inputs, targets)`
or `(inputs, targets, sample weights)`.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset, generator,
or `keras.utils.Sequence` instance, `y` should
not be specified (since targets will be obtained from `x`).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, datasets,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
Note that the progress bar is not particularly useful when
logged to a file, so verbose=2 is recommended when not running
interactively (eg, in a production environment).
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See `tf.keras.callbacks`.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset, generator or
`keras.utils.Sequence` instance.
validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
`validation_data` will override `validation_split`.
`validation_data` could be:
- tuple `(x_val, y_val)` of Numpy arrays or tensors
- tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays
- dataset
For the first two cases, `batch_size` must be provided.
For the last case, `validation_steps` could be provided.
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch').
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset, generator, or
`keras.utils.Sequence` instance, instead provide the sample_weights
as the third element of `x`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined. If x is a
`tf.data` dataset, and 'steps_per_epoch'
is None, the epoch will run until the input dataset is exhausted.
This argument is not supported with array inputs.
validation_steps: Only relevant if `validation_data` is provided and
is a `tf.data` dataset. Total number of steps (batches of
samples) to draw before stopping when performing validation
at the end of every epoch. If 'validation_steps' is None, validation
will run until the `validation_data` dataset is exhausted. In the
case of a infinite dataset, it will run into a infinite loop.
If 'validation_steps' is specified and only part of the dataset
will be consumed, the evaluation will start from the beginning of
the dataset at each epoch. This ensures that the same validation
samples are used every time.
validation_freq: Only relevant if validation data is provided. Integer
or `collections_abc.Container` instance (e.g. list, tuple, etc.).
If an integer, specifies how many training epochs to run before a
new validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up
when using process-based threading. If unspecified, `workers`
will default to 1. If 0, will execute the generator on the main
thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
**kwargs: Used for backwards compatibility.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: If the model was never compiled.
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
_keras_api_gauge.get_cell('fit_v1').set(True)
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning(
'The `nb_epoch` argument in `fit` has been renamed `epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
self._assert_compile_was_called()
self._check_call_args('fit')
func = self._select_training_loop(x)
return func.fit(
self,
x=x,
y=y,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
shuffle=shuffle,
class_weight=class_weight,
sample_weight=sample_weight,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset.
- A generator or `keras.utils.Sequence` instance.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely).
If `x` is a dataset, generator or
`keras.utils.Sequence` instance, `y` should not be specified (since
targets will be obtained from the iterator/dataset).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, dataset,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset, instead pass
sample weights as the third element of `x`.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
If x is a `tf.data` dataset and `steps` is
None, 'evaluate' will run until the dataset is exhausted.
This argument is not supported with array inputs.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during evaluation.
See [callbacks](/api_docs/python/tf/keras/callbacks).
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
"""
_keras_api_gauge.get_cell('evaluate_v1').set(True)
self._assert_compile_was_called()
self._check_call_args('evaluate')
func = self._select_training_loop(x)
return func.evaluate(
self,
x=x,
y=y,
batch_size=batch_size,
verbose=verbose,
sample_weight=sample_weight,
steps=steps,
callbacks=callbacks,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: Input samples. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset.
- A generator or `keras.utils.Sequence` instance.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, dataset,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`. If x is a `tf.data`
dataset and `steps` is None, `predict` will
run until the input dataset is exhausted.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during prediction.
See [callbacks](/api_docs/python/tf/keras/callbacks).
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
_keras_api_gauge.get_cell('predict_v1').set(True)
self._check_call_args('predict')
func = self._select_training_loop(x)
return func.predict(
self,
x=x,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def reset_metrics(self):
"""Resets the state of metrics."""
metrics = self._get_training_eval_metrics()
for m in metrics:
m.reset_states()
# Reset metrics on all the distributed (cloned) models.
if self._distribution_strategy:
distributed_training_utils._reset_metrics(self) # pylint: disable=protected-access
def train_on_batch(self,
x,
y=None,
sample_weight=None,
class_weight=None,
reset_metrics=True):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset.
y: Target data. Like the input data `x`, it could be either Numpy
array(s) or TensorFlow tensor(s). It should be consistent with `x`
(you cannot have Numpy inputs and tensor targets, or inversely). If
`x` is a dataset, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample. In the case of
temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of
every sample. In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset.
class_weight: Optional dictionary mapping class indices (integers) to a
weight (float) to apply to the model's loss for the samples from this
class during training. This can be useful to tell the model to "pay
more attention" to samples from an under-represented class.
reset_metrics: If `True`, the metrics returned will be only for this
batch. If `False`, the metrics will be statefully accumulated across
batches.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
self._assert_compile_was_called()
self._check_call_args('train_on_batch')
if self._experimental_run_tf_function:
outputs = training_v2_utils.train_on_batch(
self, x, y=y, sample_weight=sample_weight,
class_weight=class_weight, reset_metrics=reset_metrics,
standalone=True)
outputs = (outputs['total_loss'] + outputs['output_losses'] +
outputs['metrics'])
outputs = [
training_v2_utils._non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access
if len(outputs) == 1:
outputs = outputs[0]
return outputs
# If at this point we are in the replica context, then it is okay to execute
# the Eager code path. The expected way to get here is to call `fit` that
# calls `train_on_batch` on each replica.
if (self._distribution_strategy and
distribution_strategy_context.in_cross_replica_context()):
raise NotImplementedError('`train_on_batch` is not supported for models '
'distributed with tf.distribute.Strategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, class_weight=class_weight,
extract_tensors_from_dataset=True)
# If `self._distribution_strategy` is True, then we are in a replica context
# at this point because of the check above. `train_on_batch` is being run
# for each replica by `self._distribution_strategy` and the same code path
# as Eager is expected to be taken.
if self.run_eagerly or self._distribution_strategy:
output_dict = training_eager.train_on_batch(
self,
x,
y,
sample_weights=sample_weights,
output_loss_metrics=self._output_loss_metrics)
outputs = (output_dict['total_loss'] + output_dict['output_losses']
+ output_dict['metrics'])
outputs = [
training_v2_utils._non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access
else:
x = training_utils.ModelInputs(x).as_list()
ins = x + list(y or []) + list(sample_weights or [])
if not isinstance(K.symbolic_learning_phase(), int):
ins += [True] # Add learning phase value.
self._update_sample_weight_modes(sample_weights=sample_weights)
self._make_train_function()
outputs = self.train_function(ins) # pylint: disable=not-callable
if reset_metrics:
self.reset_metrics()
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True):
"""Test the model on a single batch of samples.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset `y` should
not be specified (since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset.
reset_metrics: If `True`, the metrics returned will be only for this
batch. If `False`, the metrics will be statefully accumulated across
batches.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
self._assert_compile_was_called()
self._check_call_args('test_on_batch')
if self._experimental_run_tf_function:
outputs = training_v2_utils.test_on_batch(
self, x, y=y, sample_weight=sample_weight,
reset_metrics=reset_metrics, standalone=True)
outputs = (outputs['total_loss'] + outputs['output_losses'] +
outputs['metrics'])
outputs = [
training_v2_utils._non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access
if len(outputs) == 1:
outputs = outputs[0]
return outputs
if (self._distribution_strategy and
distribution_strategy_context.in_cross_replica_context()):
raise NotImplementedError('`test_on_batch` is not supported for models '
'distributed with tf.distribute.Strategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, extract_tensors_from_dataset=True)
# If `self._distribution_strategy` is True, then we are in a replica context
# at this point.
if self.run_eagerly or self._distribution_strategy:
output_dict = training_eager.test_on_batch(
self,
x,
y,
sample_weights=sample_weights,
output_loss_metrics=self._output_loss_metrics)
outputs = (output_dict['total_loss'] + output_dict['output_losses']
+ output_dict['metrics'])
outputs = [
training_v2_utils._non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access
else:
x = training_utils.ModelInputs(x).as_list()
inputs = x + list(y or []) + list(sample_weights or [])
self._update_sample_weight_modes(sample_weights=sample_weights)
self._make_test_function()
outputs = self.test_function(inputs) # pylint: disable=not-callable
if reset_metrics:
self.reset_metrics()
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
self._check_call_args('predict_on_batch')
if self._experimental_run_tf_function:
return training_v2_utils.predict_on_batch(self, x, standalone=True)
if (self._distribution_strategy and
distribution_strategy_context.in_cross_replica_context()):
raise NotImplementedError(
'`predict_on_batch` is not supported for models distributed with'
' tf.distribute.Strategy.')
# Validate and standardize user data.
inputs, _, _ = self._standardize_user_data(
x, extract_tensors_from_dataset=True)
# If `self._distribution_strategy` is True, then we are in a replica context
# at this point.
if self.run_eagerly or self._distribution_strategy:
inputs = training_utils.cast_if_floating_dtype(inputs)
if isinstance(inputs, collections_abc.Sequence):
# Unwrap lists with only one input, as we do when training on batch
if len(inputs) == 1:
inputs = inputs[0]
return self(inputs) # pylint: disable=not-callable
self._make_predict_function()
outputs = self.predict_function(inputs)
if len(outputs) == 1:
return outputs[0]
return outputs
@deprecation.deprecated(
None, 'Please use Model.fit, which supports generators.')
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
validation_freq=1,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
DEPRECATED:
`Model.fit` now supports generators, so there is no longer any need to use
this endpoint.
"""
return self.fit(
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
validation_freq=validation_freq,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
@deprecation.deprecated(
None, 'Please use Model.evaluate, which supports generators.')
def evaluate_generator(self,
generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Evaluates the model on a data generator.
DEPRECATED:
`Model.evaluate` now supports generators, so there is no longer any need
to use this endpoint.
"""
self._check_call_args('evaluate_generator')
return self.evaluate(
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
callbacks=callbacks)
@deprecation.deprecated(
None, 'Please use Model.predict, which supports generators.')
def predict_generator(self,
generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
DEPRECATED:
`Model.predict` now supports generators, so there is no longer any need
to use this endpoint.
"""
return self.predict(
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
callbacks=callbacks)
def _check_call_args(self, method_name):
"""Check that `call` has only one positional arg."""
# Always allow first arg, regardless of arg name.
fullargspec = self._call_full_argspec
if fullargspec.defaults:
positional_args = fullargspec.args[:-len(fullargspec.defaults)]
else:
positional_args = fullargspec.args
if 'training' in positional_args:
positional_args.remove('training')
# self and first arg can be positional.
if len(positional_args) > 2:
extra_args = positional_args[2:]
raise ValueError(
'Models passed to `' + method_name + '` can only have `training` '
'and the first argument in `call` as positional arguments, '
'found: ' + str(extra_args) + '.')
def _set_optimizer(self, optimizer):
"""Sets self.optimizer.
Sets self.optimizer to `optimizer`, potentially wrapping it with a
LossScaleOptimizer.
Args:
optimizer: The optimizer(s) to assign to self.optimizer.
"""
if isinstance(optimizer, (list, tuple)):
self.optimizer = [optimizers.get(opt) for opt in optimizer]
else:
self.optimizer = optimizers.get(optimizer)
if (self._dtype_policy.loss_scale is not None and
not isinstance(self.optimizer,
loss_scale_optimizer.LossScaleOptimizer)):
if isinstance(self.optimizer, list):
raise ValueError('When a dtype policy with a loss scale is used, you '
'can only pass a single optimizer. Using policy %s '
'and got optimizers: %s' %
self._dtype_policy, self.optimizer)
if not isinstance(self.optimizer, optimizer_v2.OptimizerV2):
raise ValueError('"optimizer" must be an instance of '
'tf.keras.optimizers.Optimizer when a dype policy '
'with a loss scale used, but got: %s. Using policy: '
'%s' %
(self.optimizer, self._dtype_policy))
self.optimizer = loss_scale_optimizer.LossScaleOptimizer(
self.optimizer, self._dtype_policy.loss_scale)
if (isinstance(self.optimizer, loss_scale_optimizer.LossScaleOptimizer) and
self._dtype_policy.loss_scale and
self.optimizer.loss_scale != self._dtype_policy.loss_scale):
logging.warning('LossScale of LossScaleOptimizer passed to compile (%s) '
'is not the same as the dtype policy\'s loss scale (%s). '
'Because the dtype policy has a loss scale, you should '
'pass an optimizer that is not wrapped with a '
'LossScaleOptimizer,'
% (self.optimizer.loss_scale,
self._dtype_policy.loss_scale))
def _prepare_validation_data(self, validation_data, batch_size,
validation_steps):
"""Unpack and check the validation data."""
val_x, val_y, val_sample_weights = training_utils.unpack_validation_data(
validation_data)
return self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weights,
batch_size=batch_size,
steps=validation_steps,
steps_name='validation_steps')
def _validate_compile_param_for_distribution_strategy(
self, run_eagerly, sample_weight_mode, target_tensors, weighted_metrics):
# Validate that arguments passed by the user to `compile` are supported by
# tf.distribute.Strategy.
if self._distribution_strategy:
if sample_weight_mode:
raise NotImplementedError('sample_weight_mode is not supported with '
'tf.distribute.Strategy.')
if weighted_metrics:
raise NotImplementedError('weighted_metrics is not supported with '
'tf.distribute.Strategy.')
if target_tensors:
raise ValueError('target_tensors is not supported with '
'tf.distribute.Strategy.')
if run_eagerly:
raise ValueError(
'We currently do not support enabling `run_eagerly` with '
'distribution strategy.')
if (distributed_training_utils.is_distributing_by_cloning(self) and
(not self.built or not self.inputs or not self.outputs)):
raise ValueError(
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without `input_shape`/'
'`input_dim` set in its first layer or a subclassed model.')
def _process_target_tensor_for_compile(self, target_tensors):
if self.run_eagerly:
# target tensor is not supported with run_eagerly. Create a list with None
# as placeholder for each output.
return [None for _ in self.output_names]
if target_tensors is not None and not (isinstance(target_tensors, list) and
target_tensors == []): # pylint: disable=g-explicit-bool-comparison
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError(
'When passing a list as `target_tensors`, '
'it should have one entry per model output. '
'The model has %s outputs, but you passed target_tensors=%s' %
(len(self.outputs), target_tensors))
elif isinstance(target_tensors, dict):
unexpected_target_tensor_names = set(target_tensors.keys()).difference(
self.output_names)
if unexpected_target_tensor_names:
raise ValueError(
'Unknown entry in `target_tensors` dictionary: "{name}". '
'Only expected the following keys: {keys}'.format(
name=unexpected_target_tensor_names,
keys=str(self.output_names)))
tmp_target_tensors = []
for name in self.output_names:
tmp_target_tensors.append(target_tensors.get(name, None))
target_tensors = tmp_target_tensors
elif tensor_util.is_tensor(target_tensors):
target_tensors = [target_tensors]
else:
raise TypeError('Expected `target_tensors` to be a list or tuple or '
'dict or a single tensor, but got:', target_tensors)
else:
# In case target tensor is empty or None, create a list with Nones
# that has same length as self.output_names. With that, the None check of
# target tensor can be skipped downstream.
target_tensors = [None for _ in self.output_names]
return target_tensors
def _compile_eagerly(self, metrics, weighted_metrics, sample_weight_mode):
# Prepare sample weight modes. List with the same length as model outputs.
training_utils.prepare_sample_weight_modes(
self._training_endpoints, sample_weight_mode)
# Prepare sample weights.
self._prepare_sample_weights()
# Save all metric attributes per output of the model.
self._cache_output_metric_attributes(metrics, weighted_metrics)
self.total_loss = None
# Set metric attributes on model.
self._set_metric_attributes()
self._collected_trainable_weights = self.trainable_weights
def _update_sample_weight_modes(self, sample_weights=None):
"""Updates sample weight modes based on training/eval inputs.
Sample weight placeholders will be created for all or no outputs
based on whether sample_weight is provided for any output.
If model contains `_sample_weight_modes` we check if the input
`sample_weights` corresponds to the sample weight modes.
1. Set sample weight mode to be 'temporal' for output i, if `compile`
sample_weight_mode was set to `temporal` and sample weight inputs
are given for one or more outputs.
2. Set sample weight mode to be 'samplewise' for output i, if `compile`
sample_weight_mode was not set and sample weight inputs are given for
one or more outputs.
3. Reset sample weight mode to None for output i if sample weight mode
was set but there is no sample weight input.
Args:
sample_weights: List of sample weights of the same length as model outputs
or None.
"""
if not self._is_compiled:
return
if sample_weights and any(s is not None for s in sample_weights):
for endpoint in self._training_endpoints:
endpoint.sample_weight_mode = (
endpoint.sample_weight_mode or 'samplewise')
else:
for endpoint in self._training_endpoints:
endpoint.sample_weight_mode = None
def _recompile_weights_loss_and_weighted_metrics(self):
if not self._is_compiled:
return False
recompile = any([e.sample_weights_mismatch()
for e in self._training_endpoints])
if recompile:
self._compile_weights_loss_and_weighted_metrics()
return recompile
@trackable.no_automatic_dependency_tracking
def _compile_weights_loss_and_weighted_metrics(self, sample_weights=None):
"""Compiles the model loss and weighted metric sub-graphs.
This may be used to set graph tensors as sample weights (instead of creating
placeholders). This functionality is necessary for
`tf.keras.estimator.model_to_estimator`, which calls Keras models in a v1
graph, and creates iterator tensors for inputs, targets, and sample weights.
Args:
sample_weights: List of tensors to use as the sample weights. Must be the
same length as the number of outputs. If left as `None`, placeholders
are used instead.
"""
with K.get_graph().as_default():
if sample_weights is not None:
self._update_sample_weight_modes(sample_weights)
self._prepare_sample_weights(sample_weights)
masks = self._prepare_output_masks()
# Compute weighted metrics.
self._handle_metrics(
self.outputs,
targets=self._targets,
skip_target_masks=self._prepare_skip_target_masks(),
sample_weights=self.sample_weights,
masks=masks,
return_weighted_metrics=True)
# Compute total loss.
# Used to keep track of the total loss value (stateless).
# eg., total_loss = loss_weight_1 * output_1_loss_fn(...) +
# loss_weight_2 * output_2_loss_fn(...) +
# layer losses.
self.total_loss = self._prepare_total_loss(masks)
def _prepare_skip_target_masks(self):
"""Boolean mask for whether the target in the output list should be skipped.
If the loss function corresponding to a model output is None, then this
output will be skipped during total loss calculation and feed targets
preparation.
Returns:
A boolean list for whether the corresponding target in the output list
should be skipped during loss calculation.
"""
return [l is None for l in self.loss_functions]
def _prepare_output_masks(self):
"""Returns masks corresponding to model outputs."""
return [getattr(x, '_keras_mask', None) for x in self.outputs]
def _prepare_total_loss(self, masks):
"""Computes total loss from loss functions.
Arguments:
masks: List of mask values corresponding to each model output.
Returns:
A list of loss weights of python floats.
Raises:
TypeError: If model run_eagerly is True.
"""
if self.run_eagerly:
raise TypeError('total loss can not be computed when compiled with '
'run_eagerly = True.')
total_loss = None
with K.name_scope('loss'):
for endpoint, mask in zip(self._training_endpoints, masks):
if endpoint.should_skip_target():
continue
y_true = endpoint.training_target.target
y_pred = endpoint.output
loss_fn = endpoint.loss_fn
loss_weight = endpoint.loss_weight
loss_name = endpoint.loss_name()
sample_weight = endpoint.sample_weight
with K.name_scope(loss_name):
if mask is not None:
mask = math_ops.cast(mask, y_pred.dtype)
# Update weights with mask.
if sample_weight is None:
sample_weight = mask
else:
# Update dimensions of weights to match with mask if possible.
mask, _, sample_weight = (
tf_losses_utils.squeeze_or_expand_dimensions(
mask, sample_weight=sample_weight))
sample_weight *= mask
if hasattr(loss_fn, 'reduction'):
per_sample_losses = loss_fn.call(y_true, y_pred)
weighted_losses = losses_utils.compute_weighted_loss(
per_sample_losses,
sample_weight=sample_weight,
reduction=losses_utils.ReductionV2.NONE)
loss_reduction = loss_fn.reduction
# `AUTO` loss reduction defaults to `SUM_OVER_BATCH_SIZE` for all
# compile use cases.
if loss_reduction == losses_utils.ReductionV2.AUTO:
loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
# Compute the stateless loss value.
output_loss = losses_utils.reduce_weighted_loss(
weighted_losses, reduction=loss_reduction)
else:
# Compute the stateless loss value for a custom loss class.
# Here we assume that the class takes care of loss reduction
# because if this class returns a vector value we cannot
# differentiate between use case where a custom optimizer
# expects a vector loss value vs unreduced per-sample loss value.
output_loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
if len(self.outputs) > 1:
# Keep track of stateful result tensor for the loss.
endpoint.output_loss_metric(output_loss)
# Scale output loss for distribution. For custom losses we assume
# reduction was mean.
if loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE:
output_loss = losses_utils.scale_loss_for_distribution(output_loss)
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties and other layer-specific losses.
custom_losses = self.get_losses_for(None) + self.get_losses_for(
self.inputs)
if custom_losses:
total_loss += losses_utils.scale_loss_for_distribution(
math_ops.add_n(custom_losses))
return total_loss
def _get_callback_model(self):
"""Returns the Callback Model for this Model."""
if hasattr(self, '_replicated_model') and self._replicated_model:
# When using training_distributed, we set the callback model
# to an instance of the `DistributedModel` that we create in
# the `compile` call. The `DistributedModel` is initialized
# with the first replicated model. We need to set the callback
# model to a DistributedModel to allow us to override saving
# and loading weights when we checkpoint the model during training.
return self._replicated_model
if hasattr(self, 'callback_model') and self.callback_model:
return self.callback_model
return self
@trackable.no_automatic_dependency_tracking
def _make_callback_model(self, grouped_model):
first_replicated_model = self._distribution_strategy.unwrap(
grouped_model)[0]
# We initialize the callback model with the first replicated model.
self._replicated_model = DistributedCallbackModel(first_replicated_model)
self._replicated_model.set_original_model(self)
def _validate_or_infer_batch_size(self, batch_size, steps, x):
"""Validates that the `batch_size` provided is consistent with InputLayer.
It's possible that the user specified a static batch size in their
InputLayer. If so, this method checks the provided `batch_size` and `x`
arguments are consistent with this static batch size. Also, if
`batch_size` is `None`, this method will attempt to infer the batch size
from the static batch size of the InputLayer. Lastly, ValueError will be
raised if `x` is a tf.data.Dataset and `batch_size` is specified as we
expect users to provide batched datasets.
Arguments:
batch_size: The batch_size provided as an argument to
fit/evaluate/predict.
steps: The steps provided as an argument to fit/evaluate/predict.
x: The data passed as `x` to fit/evaluate/predict.
Returns:
The validated batch_size, auto-inferred from the first layer if not
provided.
"""
if (isinstance(x, (dataset_ops.DatasetV1,
dataset_ops.DatasetV2,
data_utils.Sequence)) or
tf_inspect.isgenerator(x)):
if batch_size is not None:
raise ValueError(
'The `batch_size` argument must not be specified for the given '
'input type. Received input: {}, batch_size: {}'.format(
x, batch_size))
return
# Avoids the override in Sequential.layers which filters Input layers.
# (Which are often the very layers that we're after.)
layers = trackable_layer_utils.filter_empty_layer_containers(self._layers)
first_layer = next(layers, None)
if first_layer:
# The per-replica static batch size.
static_batch_size = training_utils.get_static_batch_size(first_layer)
if static_batch_size is not None:
# Determine number of times the user-supplied batch size will be split.
if (self._distribution_strategy and
distributed_training_utils.global_batch_size_supported(
self._distribution_strategy)):
num_splits_for_ds = self._distribution_strategy.num_replicas_in_sync
else:
num_splits_for_ds = 1
# Check `batch_size` argument is consistent with InputLayer.
if batch_size is not None:
if batch_size % num_splits_for_ds != 0:
raise ValueError('The `batch_size` argument ({}) must be divisible '
'the by number of replicas ({})'.format(
batch_size, num_splits_for_ds))
per_replica_batch_size = batch_size // num_splits_for_ds
if per_replica_batch_size != static_batch_size:
raise ValueError('The `batch_size` argument value {} is '
'incompatible with the specified batch size of '
'your Input Layer: {}'.format(
per_replica_batch_size, static_batch_size))
# Check Dataset/Iterator batch size is consistent with InputLayer.
if isinstance(x, (dataset_ops.DatasetV2, iterator_ops.Iterator,
iterator_ops.OwnedIterator)):
ds_batch_size = tensor_shape.as_dimension(
nest.flatten(dataset_ops.get_legacy_output_shapes(x))[0][0]).value
if ds_batch_size is not None:
if ds_batch_size % num_splits_for_ds != 0:
raise ValueError(
'The batch output shape of your `Dataset` {} '
'cannot be divisible by number of replicas {}'.format(
ds_batch_size, num_splits_for_ds))
ds_per_replica_batch_size = ds_batch_size // num_splits_for_ds
if ds_per_replica_batch_size != static_batch_size:
raise ValueError('The batch output shape of your `Dataset` is '
'{}, which is incompatible with the specified '
'batch size of your Input Layer: {}'.format(
ds_per_replica_batch_size,
static_batch_size))
# Set inferred batch size from the InputLayer.
if steps is None:
batch_size = static_batch_size * num_splits_for_ds
if batch_size is None and steps is None:
# Backwards compatibility
batch_size = 32
return batch_size
def _prepare_sample_weights(self, sample_weights=None):
"""Sets sample weight attribute on the model."""
# List with the same length as model outputs.
if sample_weights is not None:
if len(sample_weights) != len(self._training_endpoints):
raise ValueError('Provided sample weights must have same length as the '
'number of outputs. Expected: {}, got: {}.'.format(
len(self._training_endpoints),
len(sample_weights)))
else:
sample_weights = [None] * len(self._training_endpoints)
for endpoint, weight in zip(self._training_endpoints, sample_weights):
endpoint.populate_sample_weight(weight, endpoint.sample_weight_mode)
def _cache_output_metric_attributes(self, metrics, weighted_metrics):
"""Caches metric name and function attributes for every model output."""
output_shapes = []
for output in self.outputs:
if output is None or output.shape.rank is None:
output_shapes.append(None)
else:
output_shapes.append(output.shape.as_list())
self._per_output_metrics = training_utils.collect_per_output_metric_info(
metrics, self.output_names, output_shapes, self.loss_functions)
self._per_output_weighted_metrics = (
training_utils.collect_per_output_metric_info(
weighted_metrics,
self.output_names,
output_shapes,
self.loss_functions,
is_weighted=True))
def _add_unique_metric_name(self, metric_name, output_index):
"""Makes the metric name unique and adds it to the model's metric name list.
If there are multiple outputs for which the metrics are calculated, the
metric names have to be made unique by appending an integer.
Arguments:
metric_name: Metric name that corresponds to the metric specified by the
user. For example: 'acc'.
output_index: The index of the model output for which the metric name is
being added.
Returns:
string, name of the model's unique metric name
"""
if len(self.output_names) > 1:
metric_name = '%s_%s' % (self.output_names[output_index], metric_name)
j = 1
base_metric_name = metric_name
while metric_name in self.metrics_names:
metric_name = '%s_%d' % (base_metric_name, j)
j += 1
return metric_name
def _init_metric_attributes(self):
"""Initialized model metric attributes."""
# List of stateful metric functions. Used for resetting metric state during
# training/eval.
self._compile_metric_functions = []
def _set_per_output_metric_attributes(self, metrics_dict, output_index):
"""Sets the metric attributes on the model for the given output.
Arguments:
metrics_dict: A dict with metric names as keys and metric fns as values.
output_index: The index of the model output for which the metric
attributes are added.
Returns:
Metrics dict updated with unique metric names as keys.
"""
updated_metrics_dict = collections.OrderedDict()
for metric_name, metric_fn in metrics_dict.items():
metric_name = self._add_unique_metric_name(metric_name, output_index)
# Update the name on the metric class to be the unique generated name.
metric_fn._name = metric_name # pylint: disable=protected-access
updated_metrics_dict[metric_name] = metric_fn
# Keep track of metric name and function.
self._compile_metric_functions.append(metric_fn)
return updated_metrics_dict
def _set_metric_attributes(self):
"""Sets the metric attributes on the model for all the model outputs."""
updated_per_output_metrics = []
updated_per_output_weighted_metrics = []
for i, endpoint in enumerate(self._training_endpoints):
if endpoint.should_skip_target():
updated_per_output_metrics.append(self._per_output_metrics[i])
updated_per_output_weighted_metrics.append(
self._per_output_weighted_metrics[i])
continue
updated_per_output_metrics.append(
self._set_per_output_metric_attributes(self._per_output_metrics[i],
i))
updated_per_output_weighted_metrics.append(
self._set_per_output_metric_attributes(
self._per_output_weighted_metrics[i], i))
# Create a metric wrapper for each output loss. This computes mean of an
# output loss across mini-batches (irrespective of how we reduce within a
# batch).
if len(self._training_endpoints) > 1:
for endpoint in self._training_endpoints:
if not endpoint.should_skip_target():
endpoint.output_loss_metric = metrics_module.Mean(
name=endpoint.loss_name())
self._per_output_metrics = updated_per_output_metrics
self._per_output_weighted_metrics = updated_per_output_weighted_metrics
def _handle_per_output_metrics(self,
metrics_dict,
y_true,
y_pred,
mask,
weights=None):
"""Calls metric functions for a single output.
Arguments:
metrics_dict: A dict with metric names as keys and metric fns as values.
y_true: Target output.
y_pred: Predicted output.
mask: Computed mask value for the current output.
weights: Weights to be applied on the current output.
Returns:
A list of metric result tensors.
"""
metric_results = []
for metric_name, metric_fn in metrics_dict.items():
with K.name_scope(metric_name):
metric_result = training_utils.call_metric_function(
metric_fn, y_true, y_pred, weights=weights, mask=mask)
metric_results.append(metric_result)
return metric_results
def _handle_metrics(self,
outputs,
targets=None,
skip_target_masks=None,
sample_weights=None,
masks=None,
return_weighted_metrics=False,
return_weighted_and_unweighted_metrics=False):
"""Handles calling metric functions.
Arguments:
outputs: List of outputs (predictions).
targets: List of targets.
skip_target_masks: Optional. List of boolean for whether the corresponding
target should be ignored or not.
sample_weights: Optional list of sample weight arrays.
masks: List of computed output mask values.
return_weighted_metrics: Flag that indicates whether weighted metrics
should be computed instead of unweighted metrics. This flag is ignored
when `return_weighted_and_unweighted_metrics` is enabled.
return_weighted_and_unweighted_metrics: Flag that is used to indicate
whether both weighted and unweighted metrics should be computed. When
this is not enabled, we use `return_weighted_metrics` param to indicate
whether weighted or unweighted metrics should be returned.
Returns:
A list of metric result tensors.
"""
# TODO(scottzhu): Update this to use the new training_endpoints. Currently
# the eager and graph logic is bit different.
skip_target_masks = skip_target_masks or [False] * len(outputs)
metric_results = []
with K.name_scope('metrics'):
# Invoke all metrics added using `compile`.
for i in range(len(outputs)):
if skip_target_masks[i]:
continue
output = outputs[i] if outputs else None
target = targets[i] if targets else None
output_mask = masks[i] if masks else None
if (return_weighted_and_unweighted_metrics or
not return_weighted_metrics):
metric_results.extend(
self._handle_per_output_metrics(self._per_output_metrics[i],
target, output, output_mask))
if return_weighted_and_unweighted_metrics or return_weighted_metrics:
metric_results.extend(
self._handle_per_output_metrics(
self._per_output_weighted_metrics[i],
target,
output,
output_mask,
weights=sample_weights[i] if sample_weights else None))
return metric_results
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This will raise a warning if `trainable_weights` and
`_collected_trainable_weights` are inconsistent (i.e. have different
number of parameters).
Inconsistency will typically arise when one modifies `model.trainable`
without calling `model.compile` again.
"""
if not hasattr(self, '_collected_trainable_weights'):
return
if len(self.trainable_weights) != len(self._collected_trainable_weights):
logging.log_first_n(
logging.WARN, 'Discrepancy between trainable weights and collected'
' trainable weights, did you set `model.trainable`'
' without calling `model.compile` after ?', 1)
def _make_train_function(self):
has_recompiled = self._recompile_weights_loss_and_weighted_metrics()
self._check_trainable_weights_consistency()
if isinstance(self.optimizer, list):
raise ValueError('The `optimizer` in `compile` should be a single '
'optimizer.')
# If we have re-compiled the loss/weighted metric sub-graphs then create
# train function even if one exists already. This is because
# `_feed_sample_weights` list has been updated on re-compile.
if getattr(self, 'train_function', None) is None or has_recompiled:
# Restore the compiled trainable state.
current_trainable_state = self._get_trainable_state()
self._set_trainable_state(self._compiled_trainable_state)
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if not isinstance(K.symbolic_learning_phase(), int):
inputs += [K.symbolic_learning_phase()]
with K.get_graph().as_default():
with K.name_scope('training'):
# Training updates
updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
metrics = self._get_training_eval_metrics()
metrics_tensors = [
m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access
]
with K.name_scope('training'):
# Gets loss and metrics. Updates weights at each call.
fn = K.function(
inputs, [self.total_loss] + metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
setattr(self, 'train_function', fn)
# Restore the current trainable state
self._set_trainable_state(current_trainable_state)
def _make_test_function(self):
has_recompiled = self._recompile_weights_loss_and_weighted_metrics()
# If we have re-compiled the loss/weighted metric sub-graphs then create
# test function even if one exists already. This is because
# `_feed_sample_weights` list has been updated on re-compile.
if getattr(self, 'test_function', None) is None or has_recompiled:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
with K.get_graph().as_default():
metrics = self._get_training_eval_metrics()
metrics_tensors = [
m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access
]
with K.name_scope('evaluation'):
updates = self.state_updates
# Return loss and metrics, no gradient updates.
# Does update the network states.
fn = K.function(
inputs, [self.total_loss] + metrics_tensors,
updates=updates,
name='test_function',
**self._function_kwargs)
setattr(self, 'test_function', fn)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
with K.name_scope(ModeKeys.PREDICT):
self.predict_function = K.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _make_execution_function(self, mode):
if mode == ModeKeys.TRAIN:
self._make_train_function()
return self.train_function
if mode == ModeKeys.TEST:
self._make_test_function()
return self.test_function
if mode == ModeKeys.PREDICT:
self._make_predict_function()
return self.predict_function
def _distribution_standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
validation_split=0,
shuffle=False,
epochs=1,
allow_partial_batch=False):
"""Runs validation checks on input and target data passed by the user.
This is called when using tf.distribute.Strategy to train, evaluate or serve
the model.
Args:
x: Input data. A numpy array or `tf.data` dataset.
y: Target data. A numpy array or None if x is a `tf.data` dataset.
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
shuffle: Boolean whether to shuffle the training data before each epoch.
epochs: Integer epochs. If > 1, repeat the numpy training data epochs
times when converting to training dataset.
allow_partial_batch: Boolean whether to enforce that all batches have the
same size.
Returns:
Dataset instance.
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if class_weight:
raise NotImplementedError('`class_weight` is currently not supported '
'when using tf.distribute.Strategy.')
if (sample_weight is not None and sample_weight.all() and
distributed_training_utils.is_tpu_strategy(
self._distribution_strategy)):
raise NotImplementedError('`sample_weight` is currently not supported '
'when using TPUStrategy.')
# Validates `steps` and `shuffle` arguments right at the beginning
# since we use it to construct the dataset object.
# TODO(anjalisridhar): Remove this check once we refactor the
# _standardize_user_data code path. This check is already present elsewhere
# in the codebase.
if isinstance(x, dataset_ops.DatasetV2):
if shuffle:
training_utils.verify_dataset_shuffled(x)
strategy = self._distribution_strategy
with strategy.scope():
# We should be sure to call get_session() inside the strategy.scope()
# so the strategy can affect the session options.
if ops.executing_eagerly_outside_functions():
session = None
else:
session = K.get_session()
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
x = training_utils.list_to_tuple(x)
if y is not None:
y = training_utils.list_to_tuple(y)
if sample_weight is not None:
sample_weight = training_utils.list_to_tuple(sample_weight)
in_tuple = (x, y, sample_weight)
else:
in_tuple = (x, y)
else:
in_tuple = x
ds = strategy.extended.experimental_make_numpy_dataset(in_tuple,
session=session)
if shuffle:
# We want a buffer size that is larger than the batch size provided by
# the user and provides sufficient randomness. Note that larger
# numbers introduce more memory usage based on the size of each
# sample.
ds = ds.shuffle(max(1024, batch_size * 8))
if epochs > 1:
ds = ds.repeat(epochs)
# We need to use the drop_remainder argument to get a known static
# input shape which is required for TPUs.
drop_remainder = (not allow_partial_batch and
strategy.extended.experimental_require_static_shapes)
# TODO(b/131720208): We still drop remainder here if number of examples
# is divisible by batch size, as sometimes dynamic padder will time out
# with keras.metrics.CategoricalAccuracy() metric.
if distributed_training_utils.is_tpu_strategy(
strategy) and not drop_remainder:
dataset_size = first_x_value.shape[0]
if dataset_size % batch_size == 0:
drop_remainder = True
x = ds.batch(batch_size, drop_remainder=drop_remainder)
else:
assert isinstance(x, dataset_ops.DatasetV2)
training_utils.validate_dataset_input(x, y, sample_weight,
validation_split)
return x
def _standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0,
shuffle=False,
extract_tensors_from_dataset=False):
"""Runs validation checks on input and target data passed by the user.
Also standardizes the data to lists of arrays, in order.
Also builds and compiles the model on the fly if it is a subclassed model
that has never been called before (and thus has no inputs/outputs).
This is a purely internal method, subject to refactoring at any time.
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset, `y` should not be
specified (since targets will be obtained from the iterator).
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`. If both `sample_weight` and `class_weight` are
provided, the weights are multiplied.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
check_steps: boolean, True if we want to check for validity of `steps` and
False, otherwise. For example, when we are standardizing one batch of
data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps`
value is not required and we should not check for its validity in these
cases.
steps_name: The public API's parameter name for `steps`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
shuffle: Boolean whether to shuffle the training data before each epoch.
extract_tensors_from_dataset: Boolean. When `x` is a dataset instance,
this indicates whether to extract actual tensors from the dataset or
instead output the dataset instance itself.
Set to True when calling from `train_on_batch`/etc.
Returns:
A tuple of 3: inputs (arrays or dicts, depending on whether `x` was a dict
or not), target arrays, sample-weight arrays.
If the model's input and targets are symbolic, these lists are empty
(since the model takes no user-provided data, instead the data comes
from the symbolic inputs/targets).
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
# Graph mode dataset. We'll pass the dataset as-is (unless
# `extract_tensors_from_dataset` is True, in which case we extract
# the tensors from the dataset and we output them.
training_utils.validate_dataset_input(x, y, sample_weight,
validation_split)
if shuffle:
training_utils.verify_dataset_shuffled(x)
is_dataset = True
if extract_tensors_from_dataset:
# We do this for `train_on_batch`/etc.
x, y, sample_weight = training_utils.extract_tensors_from_dataset(x)
elif isinstance(x, iterator_ops.Iterator):
# Graph mode iterator. We extract the symbolic tensors.
training_utils.validate_dataset_input(x, y, sample_weight,
validation_split)
iterator = x
x, y, sample_weight = training_utils.unpack_iterator_input(iterator)
is_dataset = True
else:
is_dataset = False
# Validates `steps` argument based on x's type.
if check_steps:
training_utils.check_steps_argument(x, steps, steps_name)
# First, we build the model on the fly if necessary.
if not self.inputs:
all_inputs, y_input, dict_inputs = self._build_model_with_inputs(x, y)
is_build_called = True
else:
all_inputs = []
# Whether this is a subclassed model that expects dictionary inputs
# rather than list inputs (e.g. FeatureColumn-based models).
dict_inputs = isinstance(self.inputs, dict)
is_build_called = False
y_input = y
# Second, we compile the model on the fly if necessary, mostly for subclass
# models.
is_compile_called = False
if not self._is_compiled and self.optimizer:
self._compile_from_inputs(all_inputs, y_input, x, y)
is_compile_called = True
# In graph mode, if we had just set inputs and targets as symbolic tensors
# by invoking build and compile on the model respectively, we do not have to
# feed anything to the model. Model already has input and target data as
# part of the graph.
# Note: in this case, `any` and `all` are equivalent since we disallow
# mixed symbolic/value inputs.
# self.run_eagerly is not free to compute, so we want to reuse the value.
run_eagerly = self.run_eagerly
if (not run_eagerly and is_build_called and is_compile_called and
not is_dataset and any(_is_symbolic_tensor(v) for v in all_inputs)):
return [], [], None
return self._standardize_tensors(
x, y, sample_weight,
run_eagerly=run_eagerly,
dict_inputs=dict_inputs,
is_dataset=is_dataset,
class_weight=class_weight,
batch_size=batch_size)
def _standardize_tensors(self, x, y, sample_weight, run_eagerly, dict_inputs,
is_dataset, class_weight=None, batch_size=None):
if run_eagerly:
# In eager mode, do not do shape validation
# since the network has no input nodes (placeholders) to be fed.
feed_input_names = self.input_names
feed_input_shapes = None
elif not self._is_graph_network:
# Case: symbolic-mode subclassed network. Do not do shape validation.
feed_input_names = self._feed_input_names
feed_input_shapes = None
else:
# Case: symbolic-mode graph network.
# In this case, we run extensive shape validation checks.
feed_input_names = self._feed_input_names
feed_input_shapes = self._feed_input_shapes
# Standardize the inputs.
if not isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
# TODO(fchollet): run static checks with dataset output shape(s).
x = training_utils.standardize_input_data(
x,
feed_input_names,
feed_input_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='input')
# Get typespecs for the input data and sanitize it if necessary.
# TODO(momernick): This should be capable of doing full input validation
# at all times - validate that this is so and refactor the standardization
# code.
if isinstance(x, dataset_ops.DatasetV2):
x_shapes = dataset_ops.get_structure(x)
if isinstance(x_shapes, tuple):
# If the output of a Dataset is a tuple, we assume it's either of the
# form (x_data, y_data) or (x_data, y_data, sample_weights). In either
# case, we only care about x_data here.
x_shapes = x_shapes[0]
else:
flat_inputs = nest.flatten(x, expand_composites=False)
flat_expected_inputs = nest.flatten(self.inputs, expand_composites=False)
converted_x = []
for (a, b) in zip(flat_inputs, flat_expected_inputs):
converted_x.append(_convert_scipy_sparse_tensor(a, b))
x = nest.pack_sequence_as(x, converted_x, expand_composites=False)
def _type_spec_from_value(value):
"""Grab type_spec without converting array-likes to tensors."""
if isinstance(value, composite_tensor.CompositeTensor):
return value._type_spec # pylint: disable=protected-access
# Get a TensorSpec for array-like data without
# converting the data to a Tensor
if hasattr(value, 'shape') and hasattr(value, 'dtype'):
return tensor_spec.TensorSpec(value.shape, value.dtype)
else:
return type_spec.type_spec_from_value(value)
x_shapes = nest.map_structure(_type_spec_from_value, x)
flat_inputs = nest.flatten(x_shapes, expand_composites=False)
flat_expected_inputs = nest.flatten(self.inputs, expand_composites=False)
for (a, b) in zip(flat_inputs, flat_expected_inputs):
nest.assert_same_structure(a, b, expand_composites=True)
if y is not None:
# Prepare self._sample_weight_modes. List with the same length as
# model outputs.
training_utils.prepare_sample_weight_modes(self._training_endpoints,
self.sample_weight_mode)
feed_output_names = self._feed_output_names
feed_sample_weight_modes = self._sample_weight_modes
if not self._is_graph_network:
feed_output_shapes = None
else:
feed_output_shapes = self._feed_output_shapes
# Standardize the outputs.
y = training_utils.standardize_input_data(
y,
feed_output_names,
# Don't enforce target shapes to match output shapes.
# Precise checks will be run in `check_loss_and_target_compatibility`.
shapes=None,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='target')
# Generate sample-wise weight values given the `sample_weight` and
# `class_weight` arguments.
sample_weights = training_utils.standardize_sample_weights(
sample_weight, feed_output_names)
class_weights = training_utils.standardize_class_weights(
class_weight, feed_output_names)
sample_weights = [
training_utils.standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
feed_sample_weight_modes)
]
# Check that all arrays have the same length.
if not self._distribution_strategy:
training_utils.check_array_lengths(x, y, sample_weights)
if self._is_graph_network and not run_eagerly:
# Additional checks to avoid users mistakenly using improper loss fns.
training_utils.check_loss_and_target_compatibility(
y, self._feed_loss_fns, feed_output_shapes)
sample_weights, _, _ = training_utils.handle_partial_sample_weights(
y, sample_weights, feed_sample_weight_modes, check_all_flat=True)
else:
y = []
sample_weights = None
if self.stateful and batch_size and not is_dataset:
# Check that for stateful networks, number of samples is a multiple
# of the static batch size.
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
# If dictionary inputs were provided, we return a dictionary as well.
if dict_inputs and not isinstance(x, (dataset_ops.DatasetV1,
dataset_ops.DatasetV2)):
x = dict(zip(feed_input_names, x))
return x, y, sample_weights
def _build_model_with_inputs(self, inputs, targets):
"""Build the model (set model inputs/outputs), mainly for subclass model."""
processed_inputs = []
is_dict_inputs = False
orig_inputs = inputs
# We need to use `inputs` to set the model inputs.
# If input data is a dataset iterator in graph mode or if it is an eager
# iterator and only one batch of samples is required, we fetch the data
# tensors from the iterator and then standardize them.
if isinstance(inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)):
inputs, targets, _ = training_utils.extract_tensors_from_dataset(inputs)
# We type-check that `inputs` and `targets` are either single arrays
# or lists of arrays, and extract a flat list of inputs from the passed
# structure.
training_utils.validate_input_types(inputs, orig_inputs)
if isinstance(inputs, (list, tuple)):
processed_inputs += list(inputs)
elif isinstance(inputs, dict):
is_dict_inputs = True
keys = sorted(inputs.keys())
processed_inputs = [inputs[k] for k in keys]
else:
processed_inputs.append(inputs)
# Now that we have a flat set of inputs, we make sure that none of them
# are CompositeTensors or CompositeTensorValues of any type (or scipy
# sparse arrays, which we treat as SparseTensor values). We cannot safely
# infer input data from an arbitrary composite tensor, so we don't try -
# users should explicitly add composite tensor inputs to their subclassed
# models.
for input_tensor in processed_inputs:
if composite_tensor_utils.is_composite_or_composite_value(input_tensor):
# TODO(b/132691975): Document subclass-model CT input handling.
raise ValueError(
'All SparseTensor and RaggedTensor inputs must be explicitly '
'declared using a keras.Input() with sparse=True or ragged=True. '
'We found an undeclared input %s. For Sequential models, please '
'add a keras.Input() as your first Layer. For subclassed models, '
'please call self._set_inputs() on your input set, which you can '
'create using keras.Input() for each input to your model.' %
(input_tensor,))
# Build the model using the retrieved inputs (value or symbolic).
# If values are generated from a dataset, then in symbolic-mode
# placeholders will be created to match the value shapes.
if isinstance(orig_inputs, (dataset_ops.DatasetV1, dataset_ops.DatasetV2,
iterator_ops.Iterator)):
if not self.inputs:
# For subclassed models, a robust input spec is not available so we
# must cast to the model dtype.
inputs = training_utils.cast_if_floating_dtype(inputs, self.dtype)
def create_tensor_spec(t):
return tensor_spec.TensorSpec(t.shape, t.dtype)
cast_inputs = nest.map_structure(create_tensor_spec, inputs)
elif training_utils.has_tensors(inputs):
cast_inputs = training_utils.cast_if_floating_dtype(inputs)
else:
cast_inputs = inputs
self._set_inputs(cast_inputs)
return processed_inputs, targets, is_dict_inputs
def _compile_from_inputs(self, all_inputs, target, orig_inputs, orig_target):
if target is not None:
# We need to use `y` to set the model targets.
if training_utils.has_tensors(target):
target = training_utils.cast_if_floating_dtype_and_mismatch(
target, self.outputs)
training_utils.validate_input_types(target, orig_target,
allow_dict=False, field_name='target')
if isinstance(target, (list, tuple)):
all_inputs += list(target)
else:
all_inputs.append(target)
# Type check that all inputs are *either* value *or* symbolic.
# TODO(fchollet): this check could be removed in Eager mode?
if any(tensor_util.is_tensor(v) for v in all_inputs):
if not all(tensor_util.is_tensor(v) for v in all_inputs):
raise ValueError('Do not pass inputs that mix Numpy arrays and '
'TensorFlow tensors. '
'You passed: x=' + str(orig_inputs) +
'; y=' + str(orig_target))
is_dataset = isinstance(orig_inputs, (dataset_ops.DatasetV1,
dataset_ops.DatasetV2,
iterator_ops.Iterator))
if is_dataset or context.executing_eagerly():
target_tensors = None
else:
# Handle target tensors if any passed.
if target is not None:
if not isinstance(target, (list, tuple)):
target = [target]
target_tensors = [v for v in target if _is_symbolic_tensor(v)]
else:
target_tensors = None
self.compile(
optimizer=self.optimizer,
loss=self.loss,
metrics=self._compile_metrics,
weighted_metrics=self._compile_weighted_metrics,
loss_weights=self.loss_weights,
target_tensors=target_tensors,
sample_weight_mode=self.sample_weight_mode,
run_eagerly=self.run_eagerly,
experimental_run_tf_function=self._experimental_run_tf_function)
# TODO(omalleyt): Consider changing to a more descriptive function name.
def _set_inputs(self, inputs, outputs=None, training=None):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Single array, or list of arrays. The arrays could be placeholders,
Numpy arrays, data tensors, or TensorSpecs.
- if placeholders: the model is built on top of these placeholders,
and we expect Numpy data to be fed for them when calling `fit`/etc.
- if Numpy data or TensorShapes: we create placeholders matching the
TensorShapes or shapes of the Numpy arrays. We expect Numpy data to be
fed for these placeholders when calling `fit`/etc.
- if data tensors: the model is built on top of these tensors.
We do not expect any Numpy data to be provided when calling `fit`/etc.
outputs: None, a data tensor, or a list of tensors. If None, the
outputs will be determined by invoking `self.call()`, otherwise the
provided value will be used.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
Raises:
ValueError: If dict inputs are passed to a Sequential Model where the
first layer isn't FeatureLayer.
"""
inputs = self._set_input_attrs(inputs)
if outputs is None:
kwargs = {}
if self._expects_training_arg:
# In V2 mode, feeding `training=None` is not allowed because any value
# explicitly passed by the user is respected, even `None`.`
if training is None and not ops.executing_eagerly_outside_functions():
training = K.learning_phase()
if training is not None:
kwargs['training'] = training
try:
outputs = self(inputs, **kwargs)
except NotImplementedError:
# This Model or a submodel is dynamic and hasn't overridden
# `compute_output_shape`.
outputs = None
self._set_output_attrs(outputs)
@trackable.no_automatic_dependency_tracking
def _set_input_attrs(self, inputs):
"""Sets attributes related to the inputs of the Model."""
if self.inputs:
raise ValueError('Model inputs are already set.')
if self.__class__.__name__ == 'Sequential' and not self.built:
if tensor_util.is_tensor(inputs):
input_shape = (None,) + tuple(inputs.shape.as_list()[1:])
elif isinstance(inputs, tensor_shape.TensorShape):
input_shape = (None,) + tuple(inputs.as_list()[1:])
elif isinstance(inputs, dict):
# We assert that the first layer is a FeatureLayer.
if not training_utils.is_feature_layer(self.layers[0]):
raise ValueError('Passing a dictionary input to a Sequential Model '
'which doesn\'t have FeatureLayer as the first layer'
' is an error.')
input_shape = (None,)
else:
input_shape = (None,) + tuple(inputs.shape[1:])
self._build_input_shape = input_shape
# Cast inputs to the compute dtype. This is primarily used
# when saving to determine the correct dtype in the input signature.
inputs = self._maybe_cast_inputs(inputs)
# On-the-fly setting of symbolic model inputs (either by using the tensor
# provided, or by creating a placeholder if Numpy data was provided).
model_inputs = training_utils.ModelInputs(inputs)
inputs = model_inputs.get_symbolic_inputs()
self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.input_names = model_inputs.get_input_names()
self._feed_inputs = []
self._feed_input_names = []
self._feed_input_shapes = []
for k, v in model_inputs.as_dict():
if K.is_placeholder(v):
self._feed_input_names.append(k)
self._feed_inputs.append(v)
self._feed_input_shapes.append(K.int_shape(v))
return inputs
@trackable.no_automatic_dependency_tracking
def _set_output_attrs(self, outputs):
"""Sets attributes related to the outputs of the Model."""
# NOTE(taylorrobie): This convention cannot be changed without updating the
# data adapter since it assumes nest.flatten ordering.
outputs = nest.flatten(outputs)
self.outputs = outputs
self.output_names = training_utils.generic_output_names(outputs)
# TODO(scottzhu): Should we cleanup the self._training_endpoints here?
self.built = True
@property
def _targets(self):
"""The output target tensors for the model."""
return [
e.training_target.target
for e in self._training_endpoints
if e.has_training_target()
]
@property
def _feed_targets(self):
return [
e.training_target.target
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _feed_output_names(self):
return [
e.output_name
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _feed_output_shapes(self):
return [
e.feed_output_shape
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _feed_loss_fns(self):
return [
e.loss_fn
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _loss_weights_list(self):
return [e.loss_weight for e in self._training_endpoints]
@property
def _output_loss_metrics(self):
if hasattr(self, '_training_endpoints'):
return [
e.output_loss_metric
for e in self._training_endpoints
if e.output_loss_metric is not None
]
return None
@property
def sample_weights(self):
return [e.sample_weight for e in self._training_endpoints]
@property
def _sample_weight_modes(self):
return [e.sample_weight_mode for e in self._training_endpoints]
@property
def _feed_sample_weights(self):
return [e.sample_weight for e in self._training_endpoints
if e.sample_weight is not None]
def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):
"""Maybe load initial epoch from ckpt considering possible worker recovery.
Refer to tensorflow/python/keras/distribute/multi_worker_training_state.py
for more information.
Arguments:
initial_epoch: The original initial_epoch user passes in in `fit()`.
mode: The mode for running `model.fit()`.
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the epoch the training is supposed to continue
at. Otherwise, return the `initial_epoch` the user passes in.
"""
if hasattr(self, '_training_state'):
return self._training_state.maybe_load_initial_epoch_from_ckpt(
initial_epoch, mode)
return initial_epoch
def _get_training_eval_metrics(self):
"""Returns all the metrics that are to be reported.
This includes the output loss metrics, compile metrics/weighted metrics,
add_metric metrics.
"""
metrics = []
metrics.extend(getattr(self, '_output_loss_metrics', None) or [])
metrics.extend(getattr(self, 'metrics', None) or [])
return metrics
def _assert_compile_was_called(self):
# Checks whether `compile` has been called. If it has been called,
# then the optimizer is set. This is different from whether the
# model is compiled
# (i.e. whether the model is built and its inputs/outputs are set).
if not self.optimizer:
raise RuntimeError('You must compile your model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
def _in_multi_worker_mode(self):
"""Method to infer if this `Model` is working in multi-worker settings.
Multi-worker training refers to the setup where the training is
distributed across multiple workers, as opposed to the case where
only a local process performs the training. This function is
used to infer for example whether or not a distribute coordinator
should be run, and thus TensorFlow servers should be started for
communication with other servers in the cluster, or whether or not
saving/restoring checkpoints is relevant for preemption fault tolerance.
Experimental. Signature and implementation are subject to change.
Returns:
Whether this model indicates it's working in multi-worker settings.
"""
strategy = self._get_distribution_strategy()
return strategy and strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access
def _get_distribution_strategy(self):
# If the model was compiled under the scope of a `tf.distribute.Strategy',
# `self._distribution_strategy` would have been set and model should infer
# that as the used strategy (even if it's out of strategy scope already).
strategy = self._distribution_strategy
# Otherwise, use the strategy whose scope this is in.
if not strategy and distribution_strategy_context.has_strategy():
strategy = distribution_strategy_context.get_strategy()
return strategy
@property
def _trackable_saved_model_saver(self):
return model_serialization.ModelSavedModelSaver(self)
class DistributedCallbackModel(Model):
"""Model that is used for callbacks with tf.distribute.Strategy."""
def __init__(self, model):
super(DistributedCallbackModel, self).__init__()
self.optimizer = model.optimizer
def set_original_model(self, orig_model):
self._original_model = orig_model
def save_weights(self, filepath, overwrite=True, save_format=None):
self._replicated_model.save_weights(filepath, overwrite=overwrite,
save_format=save_format)
def save(self, filepath, overwrite=True, include_optimizer=True):
# save weights from the distributed model to the original model
distributed_model_weights = self.get_weights()
self._original_model.set_weights(distributed_model_weights)
# TODO(anjalisridhar): Do we need to save the original model here?
# Saving the first replicated model works as well.
self._original_model.save(filepath, overwrite=True, include_optimizer=False)
def load_weights(self, filepath, by_name=False):
self._original_model.load_weights(filepath, by_name=False)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = self._original_model.get_weights()
distributed_training_utils.set_weights(
self._original_model._distribution_strategy, self, # pylint: disable=protected-access
orig_model_weights)
def __getattr__(self, item):
# Whitelisted attributes of the model that can be accessed by the user
# during a callback.
if item not in ('_setattr_tracking', '_layers'):
logging.warning('You are accessing attribute ' + item + ' of the '
'DistributedCallbackModel that may not have been set '
'correctly.')
return super(DistributedCallbackModel, self).__getattr__(item)
class _TrainingEndpoint(object):
"""A container for the training output/target and related entities.
In the case of model with multiple outputs, there is a one-to-one mapping
between model output (y_pred), model target (y_true), loss, metrics etc.
By unifying these entities into one class, different entity can access
information between each other, rather than currently access different list of
attributes of the model.
"""
def __init__(self,
output,
output_name,
loss_fn,
loss_weight=None,
training_target=None,
output_loss_metric=None,
sample_weight=None,
sample_weight_mode=None):
"""Initialize the _TrainingEndpoint.
Note that the output and output_name should be stable as long as the model
structure doesn't change. The training_target suppose to be mutable since
the information is provided via `compile()`
Args:
output: the output tensor of the model.
output_name: the unique name of the output tensor.
loss_fn: the loss function for the output tensor.
loss_weight: float, the weights for the loss.
training_target: the _TrainingTarget for the model.
output_loss_metric: the metric object for the loss function.
sample_weight: the weights for how a sample is weighted during metric and
loss calculation. Could be None.
sample_weight_mode: string, 'temporal', 'samplewise' or None. The mode for
how the sample_weight is populated.
"""
self._output = output
self._output_name = output_name
self._loss_fn = loss_fn
self._loss_weight = loss_weight
self._training_target = training_target
self._output_loss_metric = output_loss_metric
self._sample_weight = sample_weight
self._sample_weight_mode = sample_weight_mode
@property
def output(self):
return self._output
@property
def output_name(self):
return self._output_name
@property
def shape(self):
return K.int_shape(self.output)
@property
def loss_fn(self):
return self._loss_fn
@property
def loss_weight(self):
return self._loss_weight
@loss_weight.setter
def loss_weight(self, value):
self._loss_weight = value
@property
def training_target(self):
return self._training_target
@training_target.setter
def training_target(self, value):
self._training_target = value
def create_training_target(self, target, run_eagerly=False):
"""Create training_target instance and update the self.training_target.
Note that the input target should just be a tensor or None, and
corresponding training target will be created based on the output and
loss_fn.
Args:
target: the target tensor for the current output. Could be None.
run_eagerly: boolean, whether the model is in run_eagerly mode.
Raises:
ValueError if the training_target field for the current instance has
already been populated.
"""
if self.has_training_target():
raise ValueError('The training_target field for the _TrainingEndpoint '
'instance has already been populated')
if run_eagerly:
# When run_eagerly, the target tensor is ignored, and the None placeholder
# is created instead.
self.training_target = _TrainingTarget(
None, feedable=True, skip_target_weights=False)
return
if self.should_skip_target():
self.training_target = _TrainingTarget(None)
else:
if target is not None and not K.is_placeholder(target):
feedable = False
skip_target_weights = True
else:
feedable = True
skip_target_weights = False
if target is None:
target_dtype = losses.LABEL_DTYPES_FOR_LOSSES.get(
self.loss_fn, K.dtype(self.output))
target = K.placeholder(
ndim=len(self.shape),
name=self.output_name + '_target',
sparse=K.is_sparse(self.output),
dtype=target_dtype)
self.training_target = _TrainingTarget(
target,
feedable=feedable,
skip_target_weights=skip_target_weights)
@property
def output_loss_metric(self):
return self._output_loss_metric
@output_loss_metric.setter
def output_loss_metric(self, value):
self._output_loss_metric = value
@property
def sample_weight(self):
return self._sample_weight
@sample_weight.setter
def sample_weight(self, value):
self._sample_weight = value
@property
def sample_weight_mode(self):
return self._sample_weight_mode
@sample_weight_mode.setter
def sample_weight_mode(self, value):
self._sample_weight_mode = value
def should_skip_target(self):
return self._loss_fn is None
def should_skip_target_weights(self):
return (self.should_skip_target() or self.training_target is None or
self.training_target.skip_target_weights)
def has_training_target(self):
return self.training_target is not None
def has_feedable_training_target(self):
return (not self.should_skip_target() and
self.training_target is not None and self.training_target.feedable)
def loss_name(self):
if self._loss_fn is not None:
return self._output_name + '_loss'
return None
@property
def feed_output_shape(self):
"""The output shape for the feedable target."""
if not self.has_feedable_training_target():
return None
if ((isinstance(self.loss_fn, losses.LossFunctionWrapper) and
self.loss_fn.fn == losses.sparse_categorical_crossentropy)) or (
isinstance(self.loss_fn, losses.SparseCategoricalCrossentropy)):
if K.image_data_format() == 'channels_first':
return (self.shape[0], 1) + self.shape[2:]
else:
return self.shape[:-1] + (1,)
elif (not isinstance(self.loss_fn, losses.Loss) or
(isinstance(self.loss_fn, losses.LossFunctionWrapper) and
(getattr(losses, self.loss_fn.fn.__name__, None) is None))):
# If the given loss is not an instance of the `Loss` class (custom
# class) or if the loss function that is wrapped is not in the
# `losses` module, then it is a user-defined loss and we make no
# assumptions about it.
return None
else:
return self.shape
def sample_weights_mismatch(self):
"""Check if the sample weight and the mode match or not."""
# If there is a mismatch between sample weight mode and the placeholders
# created, then recompile the sub-graphs that depend on sample weights.
return (
(self.sample_weight_mode is not None and self.sample_weight is None) or
(self.sample_weight_mode is None and self.sample_weight is not None))
def populate_sample_weight(self, sample_weight, sample_weight_mode):
"""Populate the sample weight and based on the sample weight mode."""
if (sample_weight is None and
(self.should_skip_target_weights() or sample_weight_mode is None or
context.executing_eagerly())):
self._sample_weight = None
return
assert sample_weight_mode in ['temporal', 'samplewise']
if sample_weight_mode == 'temporal':
default_value = [[1.]]
shape = [None, None]
else:
# sample_weight_mode == 'samplewise'
default_value = [1.]
shape = [None]
if sample_weight is not None:
if not sample_weight.shape.is_compatible_with(shape):
raise ValueError('Received sample weight with shape {}. Expected shape '
'{}.'.format(sample_weight.shape, shape))
self._sample_weight = sample_weight
else:
self._sample_weight = array_ops.placeholder_with_default(
constant_op.constant(default_value, dtype=K.floatx()),
shape=shape,
name=self.output_name + '_sample_weights')
class _TrainingTarget(object):
"""Container for a target tensor (y_true) and its metadata (shape, loss...).
Arguments:
target: A target tensor for the model. It may be `None` if the
output is excluded from loss computation. It is still kept as None
since each output of the model should have a corresponding target. If
the target is None, the rest of the attributes will be None as well.
feedable: Boolean, whether the target is feedable (requires data to be
passed in `fit` or `train_on_batch`), or not (model compiled with
`target_tensors` argument).
skip_target_weights: Boolean, whether the target should be skipped during
weights calculation.
"""
def __init__(self, target, feedable=False, skip_target_weights=True):
self._target = target
self._feedable = feedable
self._skip_target_weights = skip_target_weights
@property
def target(self):
return self._target
@property
def feedable(self):
return self._feedable
@property
def skip_target_weights(self):
return self._skip_target_weights
def _is_symbolic_tensor(x):
return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)
def _convert_scipy_sparse_tensor(value, expected_input):
"""Handle scipy sparse tensor conversions.
This method takes a value 'value' and returns the proper conversion. If
value is a scipy sparse tensor and the expected input is a dense tensor,
we densify 'value'. If value is a scipy sparse tensor and the expected input
is a TF SparseTensor, we convert 'value' to a SparseTensor. If 'value' is
not a scipy sparse tensor, or scipy is not imported, we pass it through
unchanged.
Arguments:
value: An object that may be a scipy sparse tensor
expected_input: The expected input placeholder.
Returns:
The possibly-converted 'value'.
"""
if issparse is not None and issparse(value):
if ops.is_dense_tensor_like(expected_input):
if ops.executing_eagerly_outside_functions():
# In TF2 we do not silently densify sparse matrices.
raise ValueError('A SciPy sparse matrix was passed to a model '
'that expects dense inputs. Please densify your '
'inputs first, such as by calling `x.toarray().')
return value.toarray()
else:
sparse_coo = value.tocoo()
row, col = sparse_coo.row, sparse_coo.col
data, shape = sparse_coo.data, sparse_coo.shape
indices = np.concatenate((np.expand_dims(row, 1), np.expand_dims(col, 1)),
1)
return sparse_tensor.SparseTensor(indices, data, shape)
else:
return value
def _get_metrics_from_layers(layers):
"""Returns list of metrics from the given layers.
This will not include the `compile` metrics of a model layer.
Arguments:
layers: List of layers.
Returns:
List of metrics.
"""
metrics = []
layers = trackable_layer_utils.filter_empty_layer_containers(layers)
for layer in layers:
if isinstance(layer, Model):
# We cannot call 'metrics' on the model because we do not want to
# include the metrics that were added in compile API of a nested model.
metrics.extend(layer._metrics) # pylint: disable=protected-access
metrics.extend(_get_metrics_from_layers(layer.layers))
else:
metrics.extend(layer.metrics)
return metrics
| 43.241541
| 111
| 0.667898
|
e7a4d1b45612a008c503a518e0d5976b688b53d0
| 940
|
py
|
Python
|
magic/magic/items.py
|
TxarlyToad/Magic-Cardmarket-Spider
|
c2f5c5eeefbea0a30855dc8396d9fdd85af07637
|
[
"MIT"
] | 2
|
2021-09-11T23:30:57.000Z
|
2021-09-14T17:45:58.000Z
|
magic/magic/items.py
|
TxarlyToad/Magic-Cardmarket-Spider
|
c2f5c5eeefbea0a30855dc8396d9fdd85af07637
|
[
"MIT"
] | null | null | null |
magic/magic/items.py
|
TxarlyToad/Magic-Cardmarket-Spider
|
c2f5c5eeefbea0a30855dc8396d9fdd85af07637
|
[
"MIT"
] | 1
|
2022-03-09T16:56:28.000Z
|
2022-03-09T16:56:28.000Z
|
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class MagicCardMarketInformation(Item):
url = Field(type=str)
name = Field(type=str)
set_number = Field(type=str)
card_set = Field(type=str)
#price
minimun = Field(type=float)
price_trend = Field(type=float)
average_price_30_days = Field(type=float)
average_price_7_days = Field(type=float)
average_price_1_day = Field(type=float)
class MagicCardMarketOffer(Item):
country = Field(type=str)
seller = Field(type=str)
card_condition = Field(type=str)
card_language = Field(type=str)
professional_type = Field(type=str)
is_foil = Field(type=bool)
is_signed = Field(type=bool)
is_playset = Field(type=bool)
product_comments = Field(type=str)
price = Field(type=float)
item_count = Field(type=int)
| 28.484848
| 53
| 0.705319
|
aae80ffd40b36ec673a1221c0a5bf18116bc41b0
| 22,611
|
py
|
Python
|
lib/termineter/core.py
|
jayaram24/Termineter-Modified
|
2cab514ff1640809337c6fe17f24433bcdec2260
|
[
"MIT"
] | null | null | null |
lib/termineter/core.py
|
jayaram24/Termineter-Modified
|
2cab514ff1640809337c6fe17f24433bcdec2260
|
[
"MIT"
] | null | null | null |
lib/termineter/core.py
|
jayaram24/Termineter-Modified
|
2cab514ff1640809337c6fe17f24433bcdec2260
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# termineter/core.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import unicode_literals
import binascii
import importlib
import logging
import logging.handlers
import os
import re
import sys
import c1218.connection
import c1218.errors
import termineter.module
import termineter.errors
import termineter.options
import termineter.utilities
import serial
import serial.serialutil
import tabulate
import termcolor
import pdb
class Framework(object):
"""
This is the main instance of the framework. It contains and
manages the serial connection as well as all of the loaded
modules.
"""
def __init__(self, stdout=None): #initialize/constructor. When we create method in a class. They receive the instance as the first argument automatically. Call the instance self.
#After that we can add other arguments.er
self.__package__ = '.'.join(self.__module__.split('.')[:-1])
package_path = importlib.import_module(self.__package__).__path__[0] # that's some python black magic trickery for you
if stdout is None:
stdout = sys.stdout
self.stdout = stdout
self.logger = logging.getLogger('termineter.framework')
#Calling the class Namespace from termineter/utilities.py. It doesn't really do anything, it's used for organizational purposes only.
self.directories = termineter.utilities.Namespace()
#os.path.expanduser: On Unix and Windows, return argument with an initial component of ~ or ~user replaced by that user's home directory. On Unix, an initial ~ is replaced by the
#environment variable HOME if it is set; otherwise the current user's home directory is looked up in the password directory through the built-in module pwd. An initial ~user is looked up
#directly in the password directory. On Windows, HOME and USERPROFILE will be used if set, otherwise a combination of HOMEPATH and HOMEDRIVE will be used. An initial ~user is handled by
#stripping the last directory component from the created user path derived above.
self.directories.user_data = os.path.abspath(os.path.join(os.path.expanduser('~'), '.termineter'))
#os.path.join(path,*paths): Join one or more path components intelligently. The return value is the concatenation of path and any members of path and any members of *paths with exactly one directory separator
#(os.sep) following each non-empty part except the last, meaning that the result will only end in a separator if the last part is empty.
#If a component is an absolute path, all previous components are thrown away and joining continues from the absolute path component.
self.directories.data_path = os.path.abspath(os.path.join(package_path, 'data'))
if not os.path.isdir(self.directories.data_path):
self.logger.critical('path to data not found')
raise termineter.errors.FrameworkConfigurationError('path to data not found')
if not os.path.isdir(self.directories.user_data):
os.mkdir(self.directories.user_data)
self.serial_connection = None
self._serial_connected = False
# setup logging stuff
main_file_handler = logging.handlers.RotatingFileHandler(os.path.join(self.directories.user_data, self.__package__ + '.log'), maxBytes=262144, backupCount=5)
main_file_handler.setLevel(logging.DEBUG)
main_file_handler.setFormatter(logging.Formatter("%(asctime)s %(name)-50s %(levelname)-10s %(message)s"))
logging.getLogger('').addHandler(main_file_handler)
# setup and configure options
# Whether or not these are 'required' is really enforced by the individual
# modules get_missing_options method and by which options they require based
# on their respective types. See framework/templates.py for more info.
# call option function from termineter/options
self.options = termineter.options.Options(self.directories)
self.options.add_boolean('USE_COLOR', 'enable color on the console interface', default=False)
self.options.add_string('SERIAL_CONNECTION', 'serial connection string')
self.options.add_string('USERNAME', 'serial username', default='0000') #Original Code
#self.options.add_string('USERNAME', 'serial username', default='1') #Modified Code
#self.options.add_integer('USERNAME', 'serial username', default=80) #50 in HEX and 80 in decimal. Modified.
#self.options.add_integer('USER_ID', 'serial user id', default=0) #Original Code
self.options.add_integer('USER_ID', 'serial user id', default=5) # Modified
self.options.add_string('PASSWORD', 'serial c12.18 password', default='00000000000000000000')
self.options.add_boolean('PASSWORD_HEX', 'if the password is in hex', default=True)
# Call AdvancedOption function from termineter/options but pass.
# Keywords: AUTO_CONNECT, CACHE_TABLES and etc.
self.advanced_options = termineter.options.AdvancedOptions(self.directories)
self.advanced_options.add_boolean('AUTO_CONNECT', 'automatically handle connections for modules', default=True)
self.advanced_options.add_boolean('CACHE_TABLES', 'cache certain read-only tables', default=True)
self.advanced_options.set_callback('CACHE_TABLES', self._opt_callback_set_cache_tables)
#self.advanced_options.add_integer('C1218_MAX_PACKETS', 'c12.18 maximum packets for reassembly', default=2) # Original Code
self.advanced_options.add_integer('C1218_MAX_PACKETS', 'c12.18 maximum packets for reassembly', default=1) # try default=1, 2, 3 or 4.
#self.advanced_options.add_integer('C1218_PACKET_SIZE', 'c12.18 maximum packet size', default=512) # Original
self.advanced_options.add_integer('C1218_PACKET_SIZE', 'c12.18 maximum packet size', default=64) #64, 128, 256 and 512 are working and pass the errors
#C12.18 | 4.7.1/Page 23 Basic data: Data rate The maximum transmitting speed shall be at least 9600.,n
self.advanced_options.add_integer('SERIAL_BAUD_RATE', 'serial connection baud rate', default=9600)
self.advanced_options.add_integer('SERIAL_BYTE_SIZE', 'serial connection byte size', default=serial.EIGHTBITS)
self.advanced_options.add_integer('SERIAL_STOP_BITS', 'serial connection stop bits', default=serial.STOPBITS_ONE)
self.advanced_options.add_string('TABLE_FORMAT', 'the format to print tables in', default='simple')
self.advanced_options.set_callback('TABLE_FORMAT', self._opt_callback_set_table_format)
if sys.platform.startswith('linux'):
self.options.set_option_value('USE_COLOR', 'True')
# start loading modules
self.current_module = None
self.modules = termineter.module.ManagerManager(self, [
os.path.abspath(os.path.join(__file__, '..', 'modules')),
os.path.abspath(os.path.join(self.directories.user_data, 'modules'))
])
self.logger.info("successfully loaded {0:,} modules into the framework".format(len(self.modules)))
return
def __repr__(self):
return '<' + self.__class__.__name__ + ' Loaded Modules: ' + str(len(self.modules)) + ', Serial Connected: ' + str(self.is_serial_connected()) + ' >'
def _opt_callback_set_cache_tables(self, policy, _):
if self.is_serial_connected():
self.serial_connection.set_table_cache_policy(policy)
return True
def _opt_callback_set_table_format(self, table_format, _):
if table_format not in tabulate.tabulate_formats:
self.print_error('TABLE_FORMAT must be one of: ' + ', '.join(tabulate.tabulate_formats))
return False
return True
def _run_optical(self, module):
print("core.py-155*** Inside Run_Optical *** ")
if not self._serial_connected:
self.print_error('The serial interface has not been connected')
return False
try:
self.serial_get()
except Exception as error:
self.print_exception(error)
return False
ConnectionState = termineter.module.ConnectionState
if not self.advanced_options['AUTO_CONNECT']:
return True
if module.connection_state == ConnectionState.none:
return True
try:
self.serial_connect()
except Exception as error:
self.print_exception(error)
return
self.print_good('*****core.py-174: Successfully connected and the device is responding')
pdb.set_trace()
if module.connection_state == ConnectionState.connected:
return True
if not self.serial_login():
self.logger.warning('meter login failed, some tables may not be accessible')
if module.connection_state == ConnectionState.authenticated:
return True
self.logger.warning('unknown optical connection state: ' + module.connection_state.name)
return True
def reload_module(self, module_path=None):
"""
Reloads a module into the framework. If module_path is not
specified, then the current_module variable is used. Returns True
on success, False on error.
@type module_path: String
@param module_path: The name of the module to reload
"""
if module_path is None:
if self.current_module is not None:
module_path = self.current_module.name
else:
self.logger.warning('must specify module if not module is currently being used')
return False
if module_path not in self.module:
self.logger.error('invalid module requested for reload')
raise termineter.errors.FrameworkRuntimeError('invalid module requested for reload')
self.logger.info('reloading module: ' + module_path)
module_instance = self.import_module(module_path, reload_module=True)
if not isinstance(module_instance, termineter.module.TermineterModule):
self.logger.error('module: ' + module_path + ' is not derived from the TermineterModule class')
raise termineter.errors.FrameworkRuntimeError('module: ' + module_path + ' is not derived from the TermineterModule class')
if not hasattr(module_instance, 'run'):
self.logger.error('module: ' + module_path + ' has no run() method')
raise termineter.errors.FrameworkRuntimeError('module: ' + module_path + ' has no run() method')
if not isinstance(module_instance.options, termineter.options.Options) or not isinstance(module_instance.advanced_options, termineter.options.Options):
self.logger.error('module: ' + module_path + ' options and advanced_options must be termineter.options.Options instances')
raise termineter.errors.FrameworkRuntimeError('options and advanced_options must be termineter.options.Options instances')
module_instance.name = module_path.split('/')[-1]
module_instance.path = module_path
self.modules[module_path] = module_instance
if self.current_module is not None:
if self.current_module.path == module_instance.path:
self.current_module = module_instance
return True
def run(self, module=None):
#print("\n\n*******core.py: Beginning of self and module ********\n\n")
if not isinstance(module, termineter.module.TermineterModule) and not isinstance(self.current_module, termineter.module.TermineterModule):
raise termineter.errors.FrameworkRuntimeError('either the module or the current_module must be sent')
if module is None:
module = self.current_module
#print("\n***** core.py.230: Run Func **** \n")
if isinstance(module, termineter.module.TermineterModuleOptical) and not self._run_optical(module):
return
self.logger.info('running module: ' + module.path)
#print("\n***** core.py.234 **** \n")
try:
result = module.run()
#print("***********core.py/ result={}".format(result))
finally:
if isinstance(module, termineter.module.TermineterModuleOptical) and self.serial_connection and self.advanced_options['AUTO_CONNECT']:
self.serial_connection.stop()
#print("\n***** core.py.240: End of Run - 240 **** \n")
return result
@property
def use_colors(self):
return self.options['USE_COLOR']
@use_colors.setter
def use_colors(self, value):
self.options.set_option_value('USE_COLOR', str(value))
def get_module_logger(self, name):
"""
This returns a logger for individual modules to allow them to be
inherited from the framework and thus be named appropriately.
@type name: String
@param name: The name of the module requesting the logger
"""
return logging.getLogger('termineter.module.' + name)
def import_module(self, module_path, reload_module=False):
module = self.__package__ + '.modules.' + module_path.replace('/', '.')
try:
module = importlib.import_module(module)
if reload_module:
importlib.reload(module)
module_instance = module.Module(self)
except Exception:
self.logger.error('failed to load module: ' + module_path, exc_info=True)
raise termineter.errors.FrameworkRuntimeError('failed to load module: ' + module_path)
return module_instance
def print_exception(self, error):
message = 'Caught ' + error.__class__.__name__ + ': ' + str(error)
self.logger.error(message, exc_info=True)
self.print_error(message)
def print_error(self, message):
prefix = '[-] '
if self.options['USE_COLOR']:
prefix = termcolor.colored(prefix, 'red', attrs=('bold',))
self.stdout.write(prefix + (os.linesep + prefix).join(message.split(os.linesep)) + os.linesep)
self.stdout.flush()
def print_good(self, message):
prefix = '[+] '
if self.options['USE_COLOR']:
prefix = termcolor.colored(prefix, 'green', attrs=('bold',))
self.stdout.write(prefix + (os.linesep + prefix).join(message.split(os.linesep)) + os.linesep)
self.stdout.flush()
def print_hexdump(self, data):
data_len = len(data)
i = 0
while i < data_len:
self.stdout.write("{0:04x} ".format(i))
for j in range(16):
if i + j < data_len:
self.stdout.write("{0:02x} ".format(data[i + j]))
else:
self.stdout.write(' ')
if j % 16 == 7:
self.stdout.write(' ')
self.stdout.write(' ')
r = ''
for j in data[i:i + 16]:
if 32 < j < 128:
r += chr(j)
else:
r += '.'
self.stdout.write(r + os.linesep)
i += 16
self.stdout.flush()
def print_line(self, message):
self.stdout.write(message + os.linesep)
self.stdout.flush()
def print_status(self, message):
prefix = '[*] '
if self.options['USE_COLOR']:
prefix = termcolor.colored(prefix, 'blue', attrs=('bold',))
self.stdout.write(prefix + (os.linesep + prefix).join(message.split(os.linesep)) + os.linesep)
self.stdout.flush()
def print_table(self, table, headers=(), line_prefix=None, tablefmt=None):
tablefmt = tablefmt or self.advanced_options['TABLE_FORMAT']
text = tabulate.tabulate(table, headers=headers, tablefmt=tablefmt)
if line_prefix:
text = '\n'.join(line_prefix + line for line in text.split('\n'))
self.print_line(text)
def print_warning(self, message):
prefix = '[!] '
if self.options['USE_COLOR']:
prefix = termcolor.colored(prefix, '', attrs=('bold',))
self.stdout.write(prefix + (os.linesep + prefix).join(message.split(os.linesep)) + os.linesep)
self.stdout.flush()
def is_serial_connected(self):
"""
Returns True if the serial interface is connected.
"""
#print("core.py - 338: Serial Connection: {}".format(self._serial_connected))
return self._serial_connected
def serial_disconnect(self):
"""
Closes the serial connection to the meter and disconnects from the
device.
"""
if self._serial_connected:
try:
self.serial_connection.close()
except c1218.errors.C1218IOError as error:
self.logger.error('caught C1218IOError: ' + str(error))
except serial.serialutil.SerialException as error:
self.logger.error('caught SerialException: ' + str(error))
self._serial_connected = False
self.logger.warning('the serial interface has been disconnected')
return True
def serial_get(self):
"""
Create the serial connection from the framework settings and return
it, setting the framework instance in the process.
"""
#print("core.py - 372 - serial_get")
frmwk_c1218_settings = {
'nbrpkts': self.advanced_options['C1218_MAX_PACKETS'],
'pktsize': self.advanced_options['C1218_PACKET_SIZE']
}
frmwk_serial_settings = termineter.utilities.get_default_serial_settings()
frmwk_serial_settings['baudrate'] = self.advanced_options['SERIAL_BAUD_RATE']
frmwk_serial_settings['bytesize'] = self.advanced_options['SERIAL_BYTE_SIZE']
frmwk_serial_settings['stopbits'] = self.advanced_options['SERIAL_STOP_BITS']
self.logger.info('opening serial device: ' + self.options['SERIAL_CONNECTION'])
try:
#pdb.set_trace()
self.serial_connection = c1218.connection.Connection(self.options['SERIAL_CONNECTION'], c1218_settings=frmwk_c1218_settings, serial_settings=frmwk_serial_settings, enable_cache=self.advanced_options['CACHE_TABLES'])
except Exception as error:
self.logger.error('could not open the serial device')
#raise error
pass
return self.serial_connection
def serial_connect(self):
"""
Connect to the serial device.
"""
#print("core.py - 396 - serial_connect")
self.serial_get()
try:
self.serial_connection.start()
#print("Hi - you are in try")
except c1218.errors.C1218IOError as error:
#print("Hi - you got except")
self.logger.error('serial connection has been opened but the meter is unresponsive')
raise error
self._serial_connected = True
#print("Hi - you missed except")
return True
def serial_login(self):
#print("\n\n****SERIAL LOGIN***\n\n\n")
"""
Attempt to log into the meter over the C12.18 protocol. Returns True on success, False on a failure. This can be
called by modules in order to login with a username and password configured within the framework instance.
"""
#print("core.py - 399: Serial Connection: {}".format(self._serial_connected))
if not self._serial_connected:
raise termineter.errors.FrameworkRuntimeError('the serial interface is disconnected')
username = self.options['USERNAME']
user_id = self.options['USER_ID']
password = self.options['PASSWORD']
if self.options['PASSWORD_HEX']:
hex_regex = re.compile('^([0-9a-fA-F]{2})+$')
if hex_regex.match(password) is None:
self.print_error('Invalid characters in password')
raise termineter.errors.FrameworkConfigurationError('invalid characters in password')
password = binascii.a2b_hex(password)
'''
Original Code
'''
#if len(username) > 10:
#self.print_error('Username cannot be longer than 10 characters')
#raise termineter.errors.FrameworkConfigurationError('username cannot be longer than 10 characters')
if not (0 <= user_id <= 0xffff):
self.print_error('User id must be between 0 and 0xffff')
raise termineter.errors.FrameworkConfigurationError('user id must be between 0 and 0xffff')
if len(password) > 20:
self.print_error('Password cannot be longer than 20 characters')
raise termineter.errors.FrameworkConfigurationError('password cannot be longer than 20 characters')
if not self.serial_connection.login(username, user_id, password):
return False
return True
def test_serial_connection(self):
"""
Connect to the serial device and then verifies that the meter is
responding. Once the serial device is open, this function attempts
to retrieve the contents of table #0 (GEN_CONFIG_TBL) to configure
the endianess it will use. Returns True on success.
"""
#print("core.py- 448: Before Serial Connection")
self.serial_connect()
#pdb.set_trace()
#print("\n\n\n****core.py- 449: Test Serial Connection ****\n\n\n")
username = self.options['USERNAME']
user_id = self.options['USER_ID']
#print("core.py-454-Username: {} and User_id {}".format(username,user_id))
'''
Original Code
if len(username) > 10:
self.logger.error('username cannot be longer than 10 characters')
raise termineter.errors.FrameworkConfigurationError('username cannot be longer than 10 characters')
'''
if not (0 <= user_id <= 0xffff):
self.logger.error('user id must be between 0 and 0xffff')
raise termineter.errors.FrameworkConfigurationError('user id must be between 0 and 0xffff')
try:
#print("core.py-445-Username: {} and User_id {}".format(username,user_id))
#print("core.py - 446 - self.serial_connection.login(username, user_id): ".format(self.serial_connection.login(username, user_id)))
if not self.serial_connection.login(username, user_id):
self.logger.error('the meter has rejected the username and user id')
raise termineter.errors.FrameworkConfigurationError('the meter has rejected the username and user id')
except c1218.errors.C1218IOError as error:
self.logger.error('serial connection has been opened but the meter is unresponsive')
raise error
try:
#print("\n\n\n*****BEFORE serial_connection****")
general_config_table = self.serial_connection.get_table_data(0)
except c1218.errors.C1218ReadTableError as error:
self.logger.error('serial connection as been opened but the general configuration table (table #0) could not be read')
raise error
if general_config_table[0] & 1:
self.logger.info('setting the connection to use big-endian for C12.19 data')
self.serial_connection.c1219_endian = '>'
else:
self.logger.info('setting the connection to use little-endian for C12.19 data')
self.serial_connection.c1219_endian = '<'
try:
self.serial_connection.stop()
except c1218.errors.C1218IOError as error:
self.logger.error('serial connection has been opened but the meter is unresponsive')
raise error
self.logger.warning('the serial interface has been connected')
#print("\n\n\n***the serial interface has been connected****\n\n\n")
return True
| 44.863095
| 218
| 0.745699
|
25624f2c81c8cd43f0926551bcc2493a90f111d0
| 6,084
|
py
|
Python
|
kws_streaming/models/utils_test.py
|
ojInc/google-research
|
9929c88b664800a25b8716c22068dd77d80bd5ee
|
[
"Apache-2.0"
] | 1
|
2020-10-25T04:07:57.000Z
|
2020-10-25T04:07:57.000Z
|
kws_streaming/models/utils_test.py
|
ojInc/google-research
|
9929c88b664800a25b8716c22068dd77d80bd5ee
|
[
"Apache-2.0"
] | null | null | null |
kws_streaming/models/utils_test.py
|
ojInc/google-research
|
9929c88b664800a25b8716c22068dd77d80bd5ee
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kws_streaming.models.utils."""
from absl import flags
from absl.testing import parameterized
from kws_streaming.layers import modes
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
from kws_streaming.models import model_params
from kws_streaming.models import models
from kws_streaming.models import utils
from kws_streaming.train import model_flags
tf1.disable_eager_execution()
FLAGS = flags.FLAGS
# two models are tested with all cobinations of speech frontend
# and all models are tested with one frontend
class UtilsTest(tf.test.TestCase, parameterized.TestCase):
def _testTFLite(self,
preprocess='raw',
feature_type='mfcc_op',
model_name='svdf'):
params = model_params.HOTWORD_MODEL_PARAMS[model_name]
params.clip_duration_ms = 100 # make it shorter for testing
# set parameters to test
params.preprocess = preprocess
params.feature_type = feature_type
params = model_flags.update_flags(params)
# create model
model = models.MODELS[params.model_name](params)
# convert TF non streaming model to TFLite non streaming inference
self.assertTrue(
utils.model_to_tflite(self.sess, model, params,
modes.Modes.NON_STREAM_INFERENCE))
def setUp(self):
super(UtilsTest, self).setUp()
tf1.reset_default_graph()
config = tf1.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf1.Session(config=config)
tf1.keras.backend.set_session(self.sess)
@parameterized.named_parameters([
{
'testcase_name': 'raw with mfcc_tf',
'preprocess': 'raw',
'feature_type': 'mfcc_tf'
},
{
'testcase_name': 'raw with mfcc_op',
'preprocess': 'raw',
'feature_type': 'mfcc_op'
},
{
'testcase_name': 'mfcc',
'preprocess': 'mfcc',
'feature_type': 'mfcc_op'
}, # feature_type will be ignored
{
'testcase_name': 'micro',
'preprocess': 'micro',
'feature_type': 'mfcc_op'
}, # feature_type will be ignored
])
def testPreprocessNonStreamInferenceTFandTFLite(self,
preprocess,
feature_type,
model_name='svdf'):
# Validate that model with different preprocessing
# can be converted to non stream inference mode.
self._testTFLite(preprocess, feature_type, model_name)
@parameterized.named_parameters([
{
'testcase_name': 'raw with mfcc_tf',
'preprocess': 'raw',
'feature_type': 'mfcc_tf'
},
{
'testcase_name': 'raw with mfcc_op',
'preprocess': 'raw',
'feature_type': 'mfcc_op'
},
{
'testcase_name': 'mfcc',
'preprocess': 'mfcc',
'feature_type': 'mfcc_op'
}, # feature_type will be ignored
{
'testcase_name': 'micro',
'preprocess': 'micro',
'feature_type': 'mfcc_op'
}, # feature_type will be ignored
])
def testPreprocessStreamInferenceModeTFandTFLite(self,
preprocess,
feature_type,
model_name='gru'):
# Validate that model with different preprocessing
# can be converted to stream inference mode with TF and TFLite.
params = model_params.HOTWORD_MODEL_PARAMS[model_name]
# set parameters to test
params.preprocess = preprocess
params.feature_type = feature_type
params = model_flags.update_flags(params)
# create model
model = models.MODELS[params.model_name](params)
# convert TF non streaming model to TFLite streaming inference
# with external states
self.assertTrue(utils.model_to_tflite(
self.sess, model, params, modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE))
# convert TF non streaming model to TF streaming with external states
self.assertTrue(utils.to_streaming_inference(
model, params, modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE))
# convert TF non streaming model to TF streaming with internal states
self.assertTrue(utils.to_streaming_inference(
model, params, modes.Modes.STREAM_INTERNAL_STATE_INFERENCE))
def test_model_to_saved(self, model_name='dnn'):
"""SavedModel supports both stateless and stateful graphs."""
params = model_params.HOTWORD_MODEL_PARAMS[model_name]
params = model_flags.update_flags(params)
# create model
model = models.MODELS[params.model_name](params)
utils.model_to_saved(model, params, FLAGS.test_tmpdir)
def testNextPowerOfTwo(self):
self.assertEqual(utils.next_power_of_two(11), 16)
@parameterized.parameters('att_mh_rnn', 'att_rnn', 'dnn', 'ds_cnn', 'cnn',
'tc_resnet', 'crnn', 'gru', 'lstm', 'svdf',
'mobilenet', 'mobilenet_v2', 'xception',
'inception', 'inception_resnet', 'ds_tc_resnet')
def testNonStreamInferenceTFandTFLite(self, model_name):
# Validate that all models with selected preprocessing
# can be converted to non stream inference mode.
self._testTFLite(model_name=model_name)
if __name__ == '__main__':
tf.test.main()
| 36.214286
| 79
| 0.649244
|
9f08c6feaf839c3954a367eec3687cffcefb80f1
| 1,526
|
py
|
Python
|
test/functional/disablewallet.py
|
BitcoinBridgeOffical/Bitcoin-Bridge
|
d800625c9b4b6fe1ddc0f0615a854e43463b82ad
|
[
"MIT"
] | 1
|
2018-01-13T18:02:47.000Z
|
2018-01-13T18:02:47.000Z
|
test/functional/disablewallet.py
|
BitcoinBridgeOffical/Bitcoin-Bridge
|
d800625c9b4b6fe1ddc0f0615a854e43463b82ad
|
[
"MIT"
] | null | null | null |
test/functional/disablewallet.py
|
BitcoinBridgeOffical/Bitcoin-Bridge
|
d800625c9b4b6fe1ddc0f0615a854e43463b82ad
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test a node with the -disablewallet option.
- Test that validateaddress RPC works when running with -disablewallet
- Test that it is not possible to mine to an invalid address.
"""
from test_framework.test_framework import BitcoinBridgeTestFramework
from test_framework.util import *
class DisableWalletTest (BitcoinBridgeTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-disablewallet"]]
def run_test (self):
# Make sure wallet is really disabled
assert_raises_jsonrpc(-32601, 'Method not found', self.nodes[0].getwalletinfo)
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert(x['isvalid'] == True)
# Checking mining to an address without a wallet. Generating to a valid address should succeed
# but generating to an invalid address will fail.
self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert_raises_jsonrpc(-5, "Invalid address", self.nodes[0].generatetoaddress, 1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
if __name__ == '__main__':
DisableWalletTest ().main ()
| 43.6
| 126
| 0.731324
|
bce2c57196cb60c3d9b93df390eeb480d205fe0b
| 779
|
py
|
Python
|
webdriver/clickSpeedTest.py
|
DarkMaguz/CP-Python
|
aa1113d6c70c8f2c32fd29cf49bb39c41e819fae
|
[
"MIT"
] | 1
|
2019-02-23T13:50:46.000Z
|
2019-02-23T13:50:46.000Z
|
webdriver/clickSpeedTest.py
|
DarkMaguz/CP-Python
|
aa1113d6c70c8f2c32fd29cf49bb39c41e819fae
|
[
"MIT"
] | null | null | null |
webdriver/clickSpeedTest.py
|
DarkMaguz/CP-Python
|
aa1113d6c70c8f2c32fd29cf49bb39c41e819fae
|
[
"MIT"
] | 1
|
2019-03-08T14:40:47.000Z
|
2019-03-08T14:40:47.000Z
|
from coockieClickerUtils import *
# import os
# import time
#
# from selenium import webdriver
# from selenium.webdriver.common.by import By
#
# os.environ['PATH'] += os.pathsep + 'bin/'
# driver = webdriver.Chrome()
driver.get("https://clickspeedtest.com/5-seconds.html")
driver.find_element(By.ID, 'ez-accept-all').click()
time.sleep(1)
driver.execute_script('''
document.getElementById('clicker').setAttribute('target', '_blank');
''')
print('Start clicking')
id = driver.find_element(By.ID, 'clicker')
while True: # click for ever
try:
id.click()
except Exception as ex: # until it breaks
print('Time is over')
break
time.sleep(1) # results are slow
result = driver.find_element(By.CSS_SELECTOR, '.times')
print(f'Result: {result.text}')
driver.close()
| 22.911765
| 68
| 0.709884
|
33d20c1775dbde862204a516a8313cd41a24ccf9
| 5,605
|
py
|
Python
|
userbot/modules/chat_info.py
|
HitaloSama/PaperplaneMinimal
|
5cf45ca4ae90ad4a52ee6d6dc679053a69fbed32
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 9
|
2020-06-11T18:47:48.000Z
|
2021-11-08T18:05:37.000Z
|
userbot/modules/chat_info.py
|
HitaloSama/PaperplaneMinimal
|
5cf45ca4ae90ad4a52ee6d6dc679053a69fbed32
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3
|
2020-08-28T18:37:46.000Z
|
2020-09-25T15:32:29.000Z
|
userbot/modules/chat_info.py
|
HitaloSama/PaperplaneMinimal
|
5cf45ca4ae90ad4a52ee6d6dc679053a69fbed32
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 8
|
2020-06-14T02:08:41.000Z
|
2020-12-15T13:25:15.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
from typing import Union
from kantex.md import (Bold, Link, SubSection, SubSubSection,
KeyValueItem, Section, Code)
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import Channel, User, ChatInviteExported
from telethon.tl.types.messages import ChatFull
from userbot import CMD_HELP
from userbot.events import register
from userbot.utils import (parse_arguments, list_admins, inline_mention,
list_bots, get_chat_from_event)
class FormattedBase:
def __add__(self, other: Union[str, 'FormattedBase']) -> str:
return str(self) + str(other)
def __repr__(self) -> str:
return f'{type(self).__name__}({self.text})'
def __str__(self) -> str:
return self.text
class String(FormattedBase):
def __init__(self, text: Union[str, int]) -> None:
self.text = str(text)
class TGDoc:
def __init__(self, *args: Union[String, 'Section']) -> None:
self.sections = args
def __str__(self) -> str:
return '\n\n'.join([str(section) for section in self.sections])
@register(outgoing=True, pattern=r"^\.c(?:hat)?(\s+[\S\s]+|$)")
async def chat_info(e):
params = e.pattern_match.group(1) or ""
args, chat = parse_arguments(
params, ['id', 'general', 'admins', 'bots', 'all'])
args['chat'] = chat
if isinstance(e.chat, User):
from .user_info import fetch_info as fetch_user_info
replied_user = await e.client(GetFullUserRequest(e.chat.id))
response = await fetch_user_info(replied_user, **args)
else:
full_chat: ChatFull = await get_chat_from_event(e, **args)
await e.edit("**Fetching chat info...**")
response = await fetch_info(e, full_chat, **args)
await e.edit(str(response))
async def fetch_info(event, full_chat, **kwargs):
chat = full_chat.chats[0]
show_all = kwargs.get('all', False)
id_only = kwargs.get('id', False)
show_general = kwargs.get('general', True)
show_admins = kwargs.get('admins', False)
show_bots = kwargs.get('bots', False)
is_private = False
if isinstance(chat, Channel) and chat.username:
name = chat.title if chat.title else chat.username
title = Link(name, f"https://t.me/{chat.username}")
elif chat.title:
is_private = True
title = Bold(chat.title)
else:
is_private = True
title = Bold(f"Chat {chat.id}")
if show_all:
show_general = True
show_admins = True
show_bots = True
elif id_only:
return KeyValueItem(title, Code(str(chat.id)))
admin_list = await list_admins(event)
if show_general:
exported_invite = full_chat.full_chat.exported_invite
invite_link = exported_invite.link if isinstance(
exported_invite, ChatInviteExported) else None
admin_count = full_chat.full_chat.admins_count or len(admin_list)
general = SubSection(Bold("general"),
KeyValueItem("id",
Code(str(chat.id))),
KeyValueItem("title",
Code(chat.title)),
KeyValueItem("private",
Code(str(is_private))),
KeyValueItem("invite link",
Link(invite_link.split('/')[-1],
invite_link)) if invite_link else None,
SubSubSection("participants",
KeyValueItem("admins",
Code(str(admin_count))),
KeyValueItem("online",
Code(str(full_chat.full_chat.online_count))),
KeyValueItem("total",
Code(str(full_chat.full_chat.participants_count)))))
else:
general = None
if show_admins:
admins = SubSection(Bold("admins"))
for admin in admin_list:
admins.items.append(String(inline_mention(admin)))
if not admins:
admins.items.append(String("No admins"))
if show_bots:
bots_list = await list_bots(event)
bots = SubSection(Bold("bots"))
for bot in bots_list:
bots.items.append(String(inline_mention(bot)))
if not bots:
bots.items.append(String("No bots"))
return TGDoc(Section(title,
general if show_general else None,
admins if show_admins else None,
bots if show_bots else None))
CMD_HELP.update({"chat info": ['Chat Info',
" - `chat [options]`: Returns stats for the current chat\n\n"
"**Options:**\n\n"
"`.id:` Return only the ID.\n"
"`.general`: Show general information related to the chat.\n"
"`.admins`: Show chat admins (does not mention them).\n"
"`.all`: Show everything.\n\n"
"**All commands can be used with** `.`"]})
| 36.875
| 108
| 0.551115
|
82c4ac6067a013e715ebe38452a97c9a40478b08
| 206
|
py
|
Python
|
dirutility.py
|
rlowrance/re-avm
|
d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2
|
[
"BSD-3-Clause"
] | 25
|
2016-10-07T05:08:15.000Z
|
2022-03-22T01:36:51.000Z
|
dirutility.py
|
rlowrance/re-avm
|
d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2
|
[
"BSD-3-Clause"
] | 1
|
2021-01-14T22:27:23.000Z
|
2021-01-14T22:27:23.000Z
|
dirutility.py
|
rlowrance/re-avm
|
d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2
|
[
"BSD-3-Clause"
] | 8
|
2016-08-12T07:26:29.000Z
|
2021-07-05T01:22:42.000Z
|
'''utilities for managing directories'''
import os
def assure_exists(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path) # make all intermediate directories
return dir_path
| 22.888889
| 66
| 0.728155
|
2222a7cb37d4cf28316c4987311199244a6cd379
| 1,358
|
py
|
Python
|
fix_settings.py
|
prehensilecode/votca_helper
|
ebbe61aff6df1c5ca36a70ddc390bd150b57c639
|
[
"Unlicense"
] | null | null | null |
fix_settings.py
|
prehensilecode/votca_helper
|
ebbe61aff6df1c5ca36a70ddc390bd150b57c639
|
[
"Unlicense"
] | null | null | null |
fix_settings.py
|
prehensilecode/votca_helper
|
ebbe61aff6df1c5ca36a70ddc390bd150b57c639
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import sys
import os
from pathlib import Path
import xml.dom.minidom
### README
### * Save this file as fix_settings.py in the same directory as your job script
### * Make it executable: chmod +x fix_settings.py
def generate_hostfile(pe_hostfile):
'''Convert Univa Grid Engine hostfile to Open MPI hostfile'''
ompi_hostfile = Path('./hostfile.{}'.format(os.getenv('JOB_ID'))).resolve()
with open(pe_hostfile, 'r') as f, open(ompi_hostfile, 'w') as g:
for l in f:
hostname, nslots = l.strip().split()[:2]
g.write('{} slots={} max-slots={}\n'.format(hostname, nslots, nslots))
return ompi_hostfile
def fix_settings_xml(ompi_hostfile):
'''Fix VOTCA CSG settings.xml file'''
settings = xml.dom.minidom.parse('settings.xml')
### read environment variable MPI_RUN for full path to mpirun command
settings.getElementsByTagName('command')[0].childNodes[0].data = '{} -x LD_LIBRARY_PATH -x BASH_ENV --hostfile {} gmx_mpi mdrun'.format(os.getenv('MPI_RUN'), ompi_hostfile)
### XXX caution - this overwrites the settings.xml file
with open('settings.xml', 'w') as f:
f.write(settings.toxml())
if __name__ == '__main__':
pe_hostfile = Path(os.getenv('PE_HOSTFILE'))
ompi_hostfile = generate_hostfile(pe_hostfile)
fix_settings_xml(ompi_hostfile)
| 34.820513
| 176
| 0.688513
|
84df66448edbd29765d091a8fc64fbab77432037
| 2,900
|
py
|
Python
|
stdplugins/_help.py
|
andromechanic/BotHub
|
18853e3a5f2a1ecdc93f9d6173411baf89dd8f00
|
[
"Apache-2.0"
] | 25
|
2019-10-26T08:01:11.000Z
|
2022-02-21T08:18:00.000Z
|
stdplugins/_help.py
|
andromechanic/BotHub
|
18853e3a5f2a1ecdc93f9d6173411baf89dd8f00
|
[
"Apache-2.0"
] | 2
|
2020-05-11T08:42:33.000Z
|
2020-05-21T02:30:09.000Z
|
stdplugins/_help.py
|
andromechanic/BotHub
|
18853e3a5f2a1ecdc93f9d6173411baf89dd8f00
|
[
"Apache-2.0"
] | 291
|
2019-11-06T04:25:13.000Z
|
2021-10-03T15:56:23.000Z
|
"""COMMAND : .helpme, .dc, .exec ls stdplugins, .stdplugins, .syntax"""
import sys
from telethon import events, functions, __version__
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="helpme ?(.*)", allow_sudo=False)) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
splugin_name = event.pattern_match.group(1)
if splugin_name in borg._plugins:
s_helpme_string = borg._plugins[splugin_name].__doc__
else:
s_helpme_string = "****:"
helpme_string = """@Bot_Hub_Official™️ ( **Custom Built By** @Three_Cube_TeKnoways_bot ) \n**Verified Account**: ✅\n**Official \n**NOTICE**: **COMMANDS** are CASE **sensitive**\n**DESCRIPTION**: https://telegra.ph/command-list-for-BotHub-Userbot-11-08\n
""".format(
sys.version,
__version__
)
tgbotusername = Config.TG_BOT_USER_NAME_BF_HER # pylint:disable=E0602
if tgbotusername is not None:
results = await borg.inline_query( # pylint:disable=E0602
tgbotusername,
helpme_string + "\n\n" + s_helpme_string
)
await results[0].click(
event.chat_id,
reply_to=event.reply_to_msg_id,
hide_via=True
)
await event.delete()
else:
await event.reply(helpme_string + "\n\n" + s_helpme_string)
await event.delete()
@borg.on(admin_cmd(pattern="dc")) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
result = await borg(functions.help.GetNearestDcRequest()) # pylint:disable=E0602
await event.edit(f"**Country** : `{result.country}`\n"
f"**Nearest DC** : `{result.nearest_dc}`\n"
f"**This DC** : `{result.this_dc}`")
@borg.on(admin_cmd(pattern="config")) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
result = await borg(functions.help.GetConfigRequest()) # pylint:disable=E0602
result = result.stringify()
logger.info(result) # pylint:disable=E0602
await event.edit("""Telethon UserBot powered by @Bot_Hub_Official""")
@borg.on(admin_cmd(pattern="syntax ?(.*)" ))
async def _(event):
if event.fwd_from:
return
plugin_name = event.pattern_match.group(1)
if plugin_name in borg._plugins:
helpme_string = borg._plugins[plugin_name].__doc__
unload_string = f"Use `.unload {plugin_name}` to remove this plugin.\n © @Three_Cube_TeKnoways_Bot"
if helpme_string:
plugin_syntax = f"Syntax for plugin **{plugin_name}**:\n\n{helpme_string}\n{unload_string}"
else:
plugin_syntax = f"No DOCSTRING has been setup for {plugin_name} plugin."
else:
plugin_syntax = "Enter valid **Plugin** name.\nDo `.exec ls stdplugins` or `.helpme` or `.stdplugins` to get list of valid plugin names."
await event.edit(plugin_syntax)
| 39.189189
| 257
| 0.64931
|
d2babe1e89a20554eac46fe4704f4b54f9ec3e14
| 677
|
py
|
Python
|
api/index.py
|
add830830/tg-serverless
|
6955387d8b8aece6c6e08766b11eeac6c5d7f03d
|
[
"MIT"
] | null | null | null |
api/index.py
|
add830830/tg-serverless
|
6955387d8b8aece6c6e08766b11eeac6c5d7f03d
|
[
"MIT"
] | null | null | null |
api/index.py
|
add830830/tg-serverless
|
6955387d8b8aece6c6e08766b11eeac6c5d7f03d
|
[
"MIT"
] | null | null | null |
from jinja2 import Environment, FileSystemLoader
from sanic import Sanic, response
env = Environment(loader=FileSystemLoader("api/templates"))
app = Sanic(__name__)
@app.route("/")
async def index(request):
title = "tg-serverless"
description = "A Telegram bot Python app use Vercel as Serverless Function!"
color = "#2962ff"
repo = "https://github.com/illvart/tg-serverless"
template = env.get_template("app.html")
content = template.render(title=title, description=description, color=color, repo=repo)
return response.html(content, status=200)
if __name__ == "__main__":
app.run(debug=True, auto_reload=True, host="0.0.0.0", port=3000)
| 29.434783
| 91
| 0.720827
|
50b430bb45d4ace632e4d04ead7aa7002077f144
| 17,590
|
py
|
Python
|
qlib/config.py
|
lpd6375/qlib
|
3a911bc09ba5136cd7c61c2c8dcca8a63339e738
|
[
"MIT"
] | null | null | null |
qlib/config.py
|
lpd6375/qlib
|
3a911bc09ba5136cd7c61c2c8dcca8a63339e738
|
[
"MIT"
] | null | null | null |
qlib/config.py
|
lpd6375/qlib
|
3a911bc09ba5136cd7c61c2c8dcca8a63339e738
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
About the configs
=================
The config will be based on _default_config.
Two modes are supported
- client
- server
"""
from __future__ import annotations
import os
import re
import copy
import logging
import platform
import multiprocessing
from pathlib import Path
from typing import Callable, Optional, Union
from typing import TYPE_CHECKING
from qlib.constant import REG_CN, REG_US, REG_TW
if TYPE_CHECKING:
from qlib.utils.time import Freq
class Config:
def __init__(self, default_conf):
self.__dict__["_default_config"] = copy.deepcopy(default_conf) # avoiding conflicts with __getattr__
self.reset()
def __getitem__(self, key):
return self.__dict__["_config"][key]
def __getattr__(self, attr):
if attr in self.__dict__["_config"]:
return self.__dict__["_config"][attr]
raise AttributeError(f"No such `{attr}` in self._config")
def get(self, key, default=None):
return self.__dict__["_config"].get(key, default)
def __setitem__(self, key, value):
self.__dict__["_config"][key] = value
def __setattr__(self, attr, value):
self.__dict__["_config"][attr] = value
def __contains__(self, item):
return item in self.__dict__["_config"]
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __str__(self):
return str(self.__dict__["_config"])
def __repr__(self):
return str(self.__dict__["_config"])
def reset(self):
self.__dict__["_config"] = copy.deepcopy(self._default_config)
def update(self, *args, **kwargs):
self.__dict__["_config"].update(*args, **kwargs)
def set_conf_from_C(self, config_c):
self.update(**config_c.__dict__["_config"])
# pickle.dump protocol version: https://docs.python.org/3/library/pickle.html#data-stream-format
PROTOCOL_VERSION = 4
NUM_USABLE_CPU = max(multiprocessing.cpu_count() - 2, 1)
DISK_DATASET_CACHE = "DiskDatasetCache"
SIMPLE_DATASET_CACHE = "SimpleDatasetCache"
DISK_EXPRESSION_CACHE = "DiskExpressionCache"
DEPENDENCY_REDIS_CACHE = (DISK_DATASET_CACHE, DISK_EXPRESSION_CACHE)
_default_config = {
# data provider config
"calendar_provider": "LocalCalendarProvider",
"instrument_provider": "LocalInstrumentProvider",
"feature_provider": "LocalFeatureProvider",
"expression_provider": "LocalExpressionProvider",
"dataset_provider": "LocalDatasetProvider",
"provider": "LocalProvider",
# config it in qlib.init()
# "provider_uri" str or dict:
# # str
# "~/.qlib/stock_data/cn_data"
# # dict
# {"day": "~/.qlib/stock_data/cn_data", "1min": "~/.qlib/stock_data/cn_data_1min"}
# NOTE: provider_uri priority:
# 1. backend_config: backend_obj["kwargs"]["provider_uri"]
# 2. backend_config: backend_obj["kwargs"]["provider_uri_map"]
# 3. qlib.init: provider_uri
"provider_uri": "",
# cache
"expression_cache": None,
"dataset_cache": None,
"calendar_cache": None,
# for simple dataset cache
"local_cache_path": None,
# kernels can be a fixed value or a callable function lie `def (freq: str) -> int`
# If the kernels are arctic_kernels, `min(NUM_USABLE_CPU, 30)` may be a good value
"kernels": NUM_USABLE_CPU,
# pickle.dump protocol version
"dump_protocol_version": PROTOCOL_VERSION,
# How many tasks belong to one process. Recommend 1 for high-frequency data and None for daily data.
"maxtasksperchild": None,
# If joblib_backend is None, use loky
"joblib_backend": "multiprocessing",
"default_disk_cache": 1, # 0:skip/1:use
"mem_cache_size_limit": 500,
"mem_cache_limit_type": "length",
# memory cache expire second, only in used 'DatasetURICache' and 'client D.calendar'
# default 1 hour
"mem_cache_expire": 60 * 60,
# cache dir name
"dataset_cache_dir_name": "dataset_cache",
"features_cache_dir_name": "features_cache",
# redis
# in order to use cache
"redis_host": "127.0.0.1",
"redis_port": 6379,
"redis_task_db": 1,
# This value can be reset via qlib.init
"logging_level": logging.INFO,
# Global configuration of qlib log
# logging_level can control the logging level more finely
"logging_config": {
"version": 1,
"formatters": {
"logger_format": {
"format": "[%(process)s:%(threadName)s](%(asctime)s) %(levelname)s - %(name)s - [%(filename)s:%(lineno)d] - %(message)s"
}
},
"filters": {
"field_not_found": {
"()": "qlib.log.LogFilter",
"param": [".*?WARN: data not found for.*?"],
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": logging.DEBUG,
"formatter": "logger_format",
"filters": ["field_not_found"],
}
},
"loggers": {"qlib": {"level": logging.DEBUG, "handlers": ["console"]}},
},
# Default config for experiment manager
"exp_manager": {
"class": "MLflowExpManager",
"module_path": "qlib.workflow.expm",
"kwargs": {
"uri": "file:" + str(Path(os.getcwd()).resolve() / "mlruns"),
"default_exp_name": "Experiment",
},
},
# Default config for MongoDB
"mongo": {
"task_url": "mongodb://localhost:27017/",
"task_db_name": "default_task_db",
},
# Shift minute for highfreq minite data, used in backtest
# if min_data_shift == 0, use default market time [9:30, 11:29, 1:00, 2:59]
# if min_data_shift != 0, use shifted market time [9:30, 11:29, 1:00, 2:59] - shift*minute
"min_data_shift": 0,
}
MODE_CONF = {
"server": {
# data provider config
"calendar_provider": "LocalCalendarProvider",
"instrument_provider": "LocalInstrumentProvider",
"feature_provider": "LocalFeatureProvider",
"expression_provider": "LocalExpressionProvider",
"dataset_provider": "LocalDatasetProvider",
"provider": "LocalProvider",
# config it in qlib.init()
"provider_uri": "",
# redis
"redis_host": "127.0.0.1",
"redis_port": 6379,
"redis_task_db": 1,
"kernels": NUM_USABLE_CPU,
# cache
"expression_cache": DISK_EXPRESSION_CACHE,
"dataset_cache": DISK_DATASET_CACHE,
"local_cache_path": Path("~/.cache/qlib_simple_cache").expanduser().resolve(),
"mount_path": None,
},
"client": {
# data provider config
"calendar_provider": "LocalCalendarProvider",
"instrument_provider": "LocalInstrumentProvider",
"feature_provider": "LocalFeatureProvider",
"expression_provider": "LocalExpressionProvider",
"dataset_provider": "LocalDatasetProvider",
"provider": "LocalProvider",
# config it in user's own code
"provider_uri": "~/.qlib/qlib_data/cn_data",
# cache
# Using parameter 'remote' to announce the client is using server_cache, and the writing access will be disabled.
# Disable cache by default. Avoid introduce advanced features for beginners
"expression_cache": None,
"dataset_cache": None,
# SimpleDatasetCache directory
"local_cache_path": Path("~/.cache/qlib_simple_cache").expanduser().resolve(),
"calendar_cache": None,
# client config
"kernels": NUM_USABLE_CPU,
"mount_path": None,
"auto_mount": False, # The nfs is already mounted on our server[auto_mount: False].
# The nfs should be auto-mounted by qlib on other
# serversS(such as PAI) [auto_mount:True]
"timeout": 100,
"logging_level": logging.INFO,
"region": REG_CN,
# custom operator
# each element of custom_ops should be Type[ExpressionOps] or dict
# if element of custom_ops is Type[ExpressionOps], it represents the custom operator class
# if element of custom_ops is dict, it represents the config of custom operator and should include `class` and `module_path` keys.
"custom_ops": [],
},
}
HIGH_FREQ_CONFIG = {
"provider_uri": "~/.qlib/qlib_data/cn_data_1min",
"dataset_cache": None,
"expression_cache": "DiskExpressionCache",
"region": REG_CN,
}
_default_region_config = {
REG_CN: {
"trade_unit": 100,
"limit_threshold": 0.095,
"deal_price": "close",
},
REG_US: {
"trade_unit": 1,
"limit_threshold": None,
"deal_price": "close",
},
REG_TW: {
"trade_unit": 1000,
"limit_threshold": 0.1,
"deal_price": "close",
},
}
class QlibConfig(Config):
# URI_TYPE
LOCAL_URI = "local"
NFS_URI = "nfs"
DEFAULT_FREQ = "__DEFAULT_FREQ"
def __init__(self, default_conf):
super().__init__(default_conf)
self._registered = False
class DataPathManager:
"""
Motivation:
- get the right path (e.g. data uri) for accessing data based on given information(e.g. provider_uri, mount_path and frequency)
- some helper functions to process uri.
"""
def __init__(self, provider_uri: Union[str, Path, dict], mount_path: Union[str, Path, dict]):
"""
The relation of `provider_uri` and `mount_path`
- `mount_path` is used only if provider_uri is an NFS path
- otherwise, provider_uri will be used for accessing data
"""
self.provider_uri = provider_uri
self.mount_path = mount_path
@staticmethod
def format_provider_uri(provider_uri: Union[str, dict, Path]) -> dict:
if provider_uri is None:
raise ValueError("provider_uri cannot be None")
if isinstance(provider_uri, (str, dict, Path)):
if not isinstance(provider_uri, dict):
provider_uri = {QlibConfig.DEFAULT_FREQ: provider_uri}
else:
raise TypeError(f"provider_uri does not support {type(provider_uri)}")
for freq, _uri in provider_uri.items():
if QlibConfig.DataPathManager.get_uri_type(_uri) == QlibConfig.LOCAL_URI:
provider_uri[freq] = str(Path(_uri).expanduser().resolve())
return provider_uri
@staticmethod
def get_uri_type(uri: Union[str, Path]):
uri = uri if isinstance(uri, str) else str(uri.expanduser().resolve())
is_win = re.match("^[a-zA-Z]:.*", uri) is not None # such as 'C:\\data', 'D:'
# such as 'host:/data/' (User may define short hostname by themselves or use localhost)
is_nfs_or_win = re.match("^[^/]+:.+", uri) is not None
if is_nfs_or_win and not is_win:
return QlibConfig.NFS_URI
else:
return QlibConfig.LOCAL_URI
def get_data_uri(self, freq: Optional[Union[str, Freq]] = None) -> Path:
"""
please refer DataPathManager's __init__ and class doc
"""
if freq is not None:
freq = str(freq) # converting Freq to string
if freq is None or freq not in self.provider_uri:
freq = QlibConfig.DEFAULT_FREQ
_provider_uri = self.provider_uri[freq]
if self.get_uri_type(_provider_uri) == QlibConfig.LOCAL_URI:
return Path(_provider_uri)
elif self.get_uri_type(_provider_uri) == QlibConfig.NFS_URI:
if "win" in platform.system().lower():
# windows, mount_path is the drive
_path = str(self.mount_path[freq])
return Path(f"{_path}:\\") if ":" not in _path else Path(_path)
return Path(self.mount_path[freq])
else:
raise NotImplementedError(f"This type of uri is not supported")
def set_mode(self, mode):
# raise KeyError
self.update(MODE_CONF[mode])
# TODO: update region based on kwargs
def set_region(self, region):
# raise KeyError
self.update(_default_region_config[region])
@staticmethod
def is_depend_redis(cache_name: str):
return cache_name in DEPENDENCY_REDIS_CACHE
@property
def dpm(self):
return self.DataPathManager(self["provider_uri"], self["mount_path"])
def resolve_path(self):
# resolve path
_mount_path = self["mount_path"]
_provider_uri = self.DataPathManager.format_provider_uri(self["provider_uri"])
if not isinstance(_mount_path, dict):
_mount_path = {_freq: _mount_path for _freq in _provider_uri.keys()}
# check provider_uri and mount_path
_miss_freq = set(_provider_uri.keys()) - set(_mount_path.keys())
assert len(_miss_freq) == 0, f"mount_path is missing freq: {_miss_freq}"
# resolve
for _freq in _provider_uri.keys():
# mount_path
_mount_path[_freq] = (
_mount_path[_freq] if _mount_path[_freq] is None else str(Path(_mount_path[_freq]).expanduser())
)
self["provider_uri"] = _provider_uri
self["mount_path"] = _mount_path
def set(self, default_conf: str = "client", **kwargs):
"""
configure qlib based on the input parameters
The configuration will act like a dictionary.
Normally, it literally is replaced the value according to the keys.
However, sometimes it is hard for users to set the config when the configuration is nested and complicated
So this API provides some special parameters for users to set the keys in a more convenient way.
- region: REG_CN, REG_US
- several region-related config will be changed
Parameters
----------
default_conf : str
the default config template chosen by user: "server", "client"
"""
from .utils import set_log_with_config, get_module_logger, can_use_cache # pylint: disable=C0415
self.reset()
_logging_config = kwargs.get("logging_config", self.logging_config)
# set global config
if _logging_config:
set_log_with_config(_logging_config)
# FIXME: this logger ignored the level in config
logger = get_module_logger("Initialization", level=logging.INFO)
logger.info(f"default_conf: {default_conf}.")
self.set_mode(default_conf)
self.set_region(kwargs.get("region", self["region"] if "region" in self else REG_CN))
for k, v in kwargs.items():
if k not in self:
logger.warning("Unrecognized config %s" % k)
self[k] = v
self.resolve_path()
if not (self["expression_cache"] is None and self["dataset_cache"] is None):
# check redis
if not can_use_cache():
log_str = ""
# check expression cache
if self.is_depend_redis(self["expression_cache"]):
log_str += self["expression_cache"]
self["expression_cache"] = None
# check dataset cache
if self.is_depend_redis(self["dataset_cache"]):
log_str += f" and {self['dataset_cache']}" if log_str else self["dataset_cache"]
self["dataset_cache"] = None
if log_str:
logger.warning(
f"redis connection failed(host={self['redis_host']} port={self['redis_port']}), "
f"{log_str} will not be used!"
)
def register(self):
from .utils import init_instance_by_config # pylint: disable=C0415
from .data.ops import register_all_ops # pylint: disable=C0415
from .data.data import register_all_wrappers # pylint: disable=C0415
from .workflow import R, QlibRecorder # pylint: disable=C0415
from .workflow.utils import experiment_exit_handler # pylint: disable=C0415
register_all_ops(self)
register_all_wrappers(self)
# set up QlibRecorder
exp_manager = init_instance_by_config(self["exp_manager"])
qr = QlibRecorder(exp_manager)
R.register(qr)
# clean up experiment when python program ends
experiment_exit_handler()
# Supporting user reset qlib version (useful when user want to connect to qlib server with old version)
self.reset_qlib_version()
self._registered = True
def reset_qlib_version(self):
import qlib # pylint: disable=C0415
reset_version = self.get("qlib_reset_version", None)
if reset_version is not None:
qlib.__version__ = reset_version
else:
qlib.__version__ = getattr(qlib, "__version__bak")
# Due to a bug? that converting __version__ to _QlibConfig__version__bak
# Using __version__bak instead of __version__
def get_kernels(self, freq: str):
"""get number of processors given frequency"""
if isinstance(self["kernels"], Callable):
return self["kernels"](freq)
return self["kernels"]
@property
def registered(self):
return self._registered
# global config
C = QlibConfig(_default_config)
| 36.418219
| 138
| 0.620523
|
dcbed37e387d963bef6365f7bf8074e902cdf13c
| 20,600
|
py
|
Python
|
chemprop/web/app/views.py
|
anonymous20201002/chemprop
|
3e36f6a3bb36194366feadb31be94dfc7e98fd91
|
[
"MIT"
] | 1
|
2022-02-12T06:39:32.000Z
|
2022-02-12T06:39:32.000Z
|
chemprop/web/app/views.py
|
anonymous20201002/chemprop
|
3e36f6a3bb36194366feadb31be94dfc7e98fd91
|
[
"MIT"
] | null | null | null |
chemprop/web/app/views.py
|
anonymous20201002/chemprop
|
3e36f6a3bb36194366feadb31be94dfc7e98fd91
|
[
"MIT"
] | null | null | null |
"""Defines a number of routes/views for the flask app."""
from functools import wraps
import io
import os
import sys
import shutil
from tempfile import TemporaryDirectory, NamedTemporaryFile
import time
from typing import Callable, List, Tuple
import multiprocessing as mp
import zipfile
from flask import json, jsonify, redirect, render_template, request, send_file, send_from_directory, url_for
import numpy as np
from rdkit import Chem
from werkzeug.utils import secure_filename
from chemprop.web.app import app, db
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from chemprop.args import PredictArgs, TrainArgs
from chemprop.constants import MODEL_FILE_NAME, TRAIN_LOGGER_NAME
from chemprop.data import get_data, get_header, get_smiles, validate_data
from chemprop.train import make_predictions, run_training
from chemprop.utils import create_logger, load_task_names, load_args
TRAINING = 0
PROGRESS = mp.Value('d', 0.0)
def check_not_demo(func: Callable) -> Callable:
"""
View wrapper, which will redirect request to site
homepage if app is run in DEMO mode.
:param func: A view which performs sensitive behavior.
:return: A view with behavior adjusted based on DEMO flag.
"""
@wraps(func)
def decorated_function(*args, **kwargs):
if app.config['DEMO']:
return redirect(url_for('home'))
return func(*args, **kwargs)
return decorated_function
def progress_bar(args: TrainArgs, progress: mp.Value):
"""
Updates a progress bar displayed during training.
:param args: Arguments.
:param progress: The current progress.
"""
# no code to handle crashes in model training yet, though
current_epoch = -1
while current_epoch < args.epochs - 1:
if os.path.exists(os.path.join(args.save_dir, 'verbose.log')):
with open(os.path.join(args.save_dir, 'verbose.log'), 'r') as f:
content = f.read()
if 'Epoch ' + str(current_epoch + 1) in content:
current_epoch += 1
progress.value = (current_epoch + 1) * 100 / args.epochs
else:
pass
time.sleep(0)
def find_unused_path(path: str) -> str:
"""
Given an initial path, finds an unused path by appending different numbers to the filename.
:param path: An initial path.
:return: An unused path.
"""
if not os.path.exists(path):
return path
base_name, ext = os.path.splitext(path)
i = 2
while os.path.exists(path):
path = base_name + str(i) + ext
i += 1
return path
def name_already_exists_message(thing_being_named: str, original_name: str, new_name: str) -> str:
"""
Creates a message about a path already existing and therefore being renamed.
:param thing_being_named: The thing being renamed (ex. Data, Checkpoint).
:param original_name: The original name of the object.
:param new_name: The new name of the object.
:return: A string with a message about the changed name.
"""
return f'{thing_being_named} "{original_name} already exists. ' \
f'Saving to "{new_name}".'
def get_upload_warnings_errors(upload_item: str) -> Tuple[List[str], List[str]]:
"""
Gets any upload warnings passed along in the request.
:param upload_item: The thing being uploaded (ex. Data, Checkpoint).
:return: A tuple with a list of warning messages and a list of error messages.
"""
warnings_raw = request.args.get(f'{upload_item}_upload_warnings')
errors_raw = request.args.get(f'{upload_item}_upload_errors')
warnings = json.loads(warnings_raw) if warnings_raw is not None else None
errors = json.loads(errors_raw) if errors_raw is not None else None
return warnings, errors
def format_float(value: float, precision: int = 4) -> str:
"""
Formats a float value to a specific precision.
:param value: The float value to format.
:param precision: The number of decimal places to use.
:return: A string containing the formatted float.
"""
return f'{value:.{precision}f}'
def format_float_list(array: List[float], precision: int = 4) -> List[str]:
"""
Formats a list of float values to a specific precision.
:param array: A list of float values to format.
:param precision: The number of decimal places to use.
:return: A list of strings containing the formatted floats.
"""
return [format_float(f, precision) for f in array]
@app.route('/receiver', methods=['POST'])
@check_not_demo
def receiver():
"""Receiver monitoring the progress of training."""
return jsonify(progress=PROGRESS.value, training=TRAINING)
@app.route('/')
def home():
"""Renders the home page."""
return render_template('home.html', users=db.get_all_users())
@app.route('/create_user', methods=['GET', 'POST'])
@check_not_demo
def create_user():
"""
If a POST request is made, creates a new user.
Renders the create_user page.
"""
if request.method == 'GET':
return render_template('create_user.html', users=db.get_all_users())
new_name = request.form['newUserName']
if new_name != None:
db.insert_user(new_name)
return redirect(url_for('create_user'))
def render_train(**kwargs):
"""Renders the train page with specified kwargs."""
data_upload_warnings, data_upload_errors = get_upload_warnings_errors('data')
return render_template('train.html',
datasets=db.get_datasets(request.cookies.get('currentUser')),
cuda=app.config['CUDA'],
gpus=app.config['GPUS'],
data_upload_warnings=data_upload_warnings,
data_upload_errors=data_upload_errors,
users=db.get_all_users(),
**kwargs)
@app.route('/train', methods=['GET', 'POST'])
@check_not_demo
def train():
"""Renders the train page and performs training if request method is POST."""
global PROGRESS, TRAINING
warnings, errors = [], []
if request.method == 'GET':
return render_train()
# Get arguments
data_name, epochs, ensemble_size, checkpoint_name = \
request.form['dataName'], int(request.form['epochs']), \
int(request.form['ensembleSize']), request.form['checkpointName']
gpu = request.form.get('gpu')
data_path = os.path.join(app.config['DATA_FOLDER'], f'{data_name}.csv')
dataset_type = request.form.get('datasetType', 'regression')
# Create and modify args
args = TrainArgs().parse_args([
'--data_path', data_path,
'--dataset_type', dataset_type,
'--epochs', str(epochs),
'--ensemble_size', str(ensemble_size)
])
# Check if regression/classification selection matches data
data = get_data(path=data_path)
targets = data.targets()
unique_targets = {target for row in targets for target in row if target is not None}
if dataset_type == 'classification' and len(unique_targets - {0, 1}) > 0:
errors.append('Selected classification dataset but not all labels are 0 or 1. Select regression instead.')
return render_train(warnings=warnings, errors=errors)
if dataset_type == 'regression' and unique_targets <= {0, 1}:
errors.append('Selected regression dataset but all labels are 0 or 1. Select classification instead.')
return render_train(warnings=warnings, errors=errors)
if gpu is not None:
if gpu == 'None':
args.cuda = False
else:
args.gpu = int(gpu)
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
ckpt_id, ckpt_name = db.insert_ckpt(checkpoint_name,
current_user,
args.dataset_type,
args.epochs,
args.ensemble_size,
len(targets))
with TemporaryDirectory() as temp_dir:
args.save_dir = temp_dir
process = mp.Process(target=progress_bar, args=(args, PROGRESS))
process.start()
TRAINING = 1
# Run training
logger = create_logger(name=TRAIN_LOGGER_NAME, save_dir=args.save_dir, quiet=args.quiet)
task_scores = run_training(args, logger)
process.join()
# Reset globals
TRAINING = 0
PROGRESS = mp.Value('d', 0.0)
# Check if name overlap
if checkpoint_name != ckpt_name:
warnings.append(name_already_exists_message('Checkpoint', checkpoint_name, ckpt_name))
# Move models
for root, _, files in os.walk(args.save_dir):
for fname in files:
if fname.endswith('.pt'):
model_id = db.insert_model(ckpt_id)
save_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model_id}.pt')
shutil.move(os.path.join(args.save_dir, root, fname), save_path)
return render_train(trained=True,
metric=args.metric,
num_tasks=len(args.task_names),
task_names=args.task_names,
task_scores=format_float_list(task_scores),
mean_score=format_float(np.mean(task_scores)),
warnings=warnings,
errors=errors)
def render_predict(**kwargs):
"""Renders the predict page with specified kwargs"""
checkpoint_upload_warnings, checkpoint_upload_errors = get_upload_warnings_errors('checkpoint')
return render_template('predict.html',
checkpoints=db.get_ckpts(request.cookies.get('currentUser')),
cuda=app.config['CUDA'],
gpus=app.config['GPUS'],
checkpoint_upload_warnings=checkpoint_upload_warnings,
checkpoint_upload_errors=checkpoint_upload_errors,
users=db.get_all_users(),
**kwargs)
@app.route('/predict', methods=['GET', 'POST'])
def predict():
"""Renders the predict page and makes predictions if the method is POST."""
if request.method == 'GET':
return render_predict()
# Get arguments
ckpt_id = request.form['checkpointName']
if request.form['textSmiles'] != '':
smiles = request.form['textSmiles'].split()
elif request.form['drawSmiles'] != '':
smiles = [request.form['drawSmiles']]
else:
# Upload data file with SMILES
data = request.files['data']
data_name = secure_filename(data.filename)
data_path = os.path.join(app.config['TEMP_FOLDER'], data_name)
data.save(data_path)
# Check if header is smiles
possible_smiles = get_header(data_path)[0]
smiles = [possible_smiles] if Chem.MolFromSmiles(possible_smiles) is not None else []
# Get remaining smiles
smiles.extend(get_smiles(data_path))
models = db.get_models(ckpt_id)
model_paths = [os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model["id"]}.pt') for model in models]
task_names = load_task_names(model_paths[0])
num_tasks = len(task_names)
gpu = request.form.get('gpu')
train_args = load_args(model_paths[0])
# Build arguments
arguments = [
'--test_path', 'None',
'--preds_path', os.path.join(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME']),
'--checkpoint_paths', *model_paths
]
if gpu is not None:
if gpu == 'None':
arguments.append('--no_cuda')
else:
arguments += ['--gpu', gpu]
# Handle additional features
if train_args.features_path is not None:
# TODO: make it possible to specify the features generator if trained using features_path
arguments += [
'--features_generator', 'rdkit_2d_normalized',
'--no_features_scaling'
]
elif train_args.features_generator is not None:
arguments += ['--features_generator', *train_args.features_generator]
if not train_args.features_scaling:
arguments.append('--no_features_scaling')
# Parse arguments
args = PredictArgs().parse_args(arguments)
# Run predictions
preds = make_predictions(args=args, smiles=smiles)
if all(p is None for p in preds):
return render_predict(errors=['All SMILES are invalid'])
# Replace invalid smiles with message
invalid_smiles_warning = 'Invalid SMILES String'
preds = [pred if pred is not None else [invalid_smiles_warning] * num_tasks for pred in preds]
return render_predict(predicted=True,
smiles=smiles,
num_smiles=min(10, len(smiles)),
show_more=max(0, len(smiles)-10),
task_names=task_names,
num_tasks=len(task_names),
preds=preds,
warnings=["List contains invalid SMILES strings"] if None in preds else None,
errors=["No SMILES strings given"] if len(preds) == 0 else None)
@app.route('/download_predictions')
def download_predictions():
"""Downloads predictions as a .csv file."""
return send_from_directory(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME'], as_attachment=True, cache_timeout=-1)
@app.route('/data')
@check_not_demo
def data():
"""Renders the data page."""
data_upload_warnings, data_upload_errors = get_upload_warnings_errors('data')
return render_template('data.html',
datasets=db.get_datasets(request.cookies.get('currentUser')),
data_upload_warnings=data_upload_warnings,
data_upload_errors=data_upload_errors,
users=db.get_all_users())
@app.route('/data/upload/<string:return_page>', methods=['POST'])
@check_not_demo
def upload_data(return_page: str):
"""
Uploads a data .csv file.
:param return_page: The name of the page to render to after uploading the dataset.
"""
warnings, errors = [], []
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
dataset = request.files['dataset']
with NamedTemporaryFile() as temp_file:
dataset.save(temp_file.name)
dataset_errors = validate_data(temp_file.name)
if len(dataset_errors) > 0:
errors.extend(dataset_errors)
else:
dataset_name = request.form['datasetName']
# dataset_class = load_args(ckpt).dataset_type # TODO: SWITCH TO ACTUALLY FINDING THE CLASS
dataset_id, new_dataset_name = db.insert_dataset(dataset_name, current_user, 'UNKNOWN')
dataset_path = os.path.join(app.config['DATA_FOLDER'], f'{dataset_id}.csv')
if dataset_name != new_dataset_name:
warnings.append(name_already_exists_message('Data', dataset_name, new_dataset_name))
shutil.copy(temp_file.name, dataset_path)
warnings, errors = json.dumps(warnings), json.dumps(errors)
return redirect(url_for(return_page, data_upload_warnings=warnings, data_upload_errors=errors))
@app.route('/data/download/<int:dataset>')
@check_not_demo
def download_data(dataset: int):
"""
Downloads a dataset as a .csv file.
:param dataset: The id of the dataset to download.
"""
return send_from_directory(app.config['DATA_FOLDER'], f'{dataset}.csv', as_attachment=True, cache_timeout=-1)
@app.route('/data/delete/<int:dataset>')
@check_not_demo
def delete_data(dataset: int):
"""
Deletes a dataset.
:param dataset: The id of the dataset to delete.
"""
db.delete_dataset(dataset)
os.remove(os.path.join(app.config['DATA_FOLDER'], f'{dataset}.csv'))
return redirect(url_for('data'))
@app.route('/checkpoints')
@check_not_demo
def checkpoints():
"""Renders the checkpoints page."""
checkpoint_upload_warnings, checkpoint_upload_errors = get_upload_warnings_errors('checkpoint')
return render_template('checkpoints.html',
checkpoints=db.get_ckpts(request.cookies.get('currentUser')),
checkpoint_upload_warnings=checkpoint_upload_warnings,
checkpoint_upload_errors=checkpoint_upload_errors,
users=db.get_all_users())
@app.route('/checkpoints/upload/<string:return_page>', methods=['POST'])
@check_not_demo
def upload_checkpoint(return_page: str):
"""
Uploads a checkpoint .pt file.
:param return_page: The name of the page to render after uploading the checkpoint file.
"""
warnings, errors = [], []
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
ckpt = request.files['checkpoint']
ckpt_name = request.form['checkpointName']
ckpt_ext = os.path.splitext(ckpt.filename)[1]
# Collect paths to all uploaded checkpoints (and unzip if necessary)
temp_dir = TemporaryDirectory()
ckpt_paths = []
if ckpt_ext.endswith('.pt'):
ckpt_path = os.path.join(temp_dir.name, MODEL_FILE_NAME)
ckpt.save(ckpt_path)
ckpt_paths = [ckpt_path]
elif ckpt_ext.endswith('.zip'):
ckpt_dir = os.path.join(temp_dir.name, 'models')
zip_path = os.path.join(temp_dir.name, 'models.zip')
ckpt.save(zip_path)
with zipfile.ZipFile(zip_path, mode='r') as z:
z.extractall(ckpt_dir)
for root, _, fnames in os.walk(ckpt_dir):
ckpt_paths += [os.path.join(root, fname) for fname in fnames if fname.endswith('.pt')]
else:
errors.append(f'Uploaded checkpoint(s) file must be either .pt or .zip but got {ckpt_ext}')
# Insert checkpoints into database
if len(ckpt_paths) > 0:
ckpt_args = load_args(ckpt_paths[0])
ckpt_id, new_ckpt_name = db.insert_ckpt(ckpt_name,
current_user,
ckpt_args.dataset_type,
ckpt_args.epochs,
len(ckpt_paths),
ckpt_args.train_data_size)
for ckpt_path in ckpt_paths:
model_id = db.insert_model(ckpt_id)
model_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model_id}.pt')
if ckpt_name != new_ckpt_name:
warnings.append(name_already_exists_message('Checkpoint', ckpt_name, new_ckpt_name))
shutil.copy(ckpt_path, model_path)
temp_dir.cleanup()
warnings, errors = json.dumps(warnings), json.dumps(errors)
return redirect(url_for(return_page, checkpoint_upload_warnings=warnings, checkpoint_upload_errors=errors))
@app.route('/checkpoints/download/<int:checkpoint>')
@check_not_demo
def download_checkpoint(checkpoint: int):
"""
Downloads a zip of model .pt files.
:param checkpoint: The name of the checkpoint to download.
"""
ckpt = db.query_db(f'SELECT * FROM ckpt WHERE id = {checkpoint}', one = True)
models = db.get_models(checkpoint)
model_data = io.BytesIO()
with zipfile.ZipFile(model_data, mode='w') as z:
for model in models:
model_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model["id"]}.pt')
z.write(model_path, os.path.basename(model_path))
model_data.seek(0)
return send_file(
model_data,
mimetype='application/zip',
as_attachment=True,
attachment_filename=f'{ckpt["ckpt_name"]}.zip',
cache_timeout=-1
)
@app.route('/checkpoints/delete/<int:checkpoint>')
@check_not_demo
def delete_checkpoint(checkpoint: int):
"""
Deletes a checkpoint file.
:param checkpoint: The id of the checkpoint to delete.
"""
db.delete_ckpt(checkpoint)
return redirect(url_for('checkpoints'))
| 34.915254
| 131
| 0.637816
|
72da179ad533a686b2d00e372789b251044cfc82
| 26,773
|
py
|
Python
|
src/utils/inference_utils.py
|
hynekdav/semi-supervised-VOS
|
6b29baef2e4fd018502fb434e978e8e924fb84b1
|
[
"MIT"
] | null | null | null |
src/utils/inference_utils.py
|
hynekdav/semi-supervised-VOS
|
6b29baef2e4fd018502fb434e978e8e924fb84b1
|
[
"MIT"
] | 2
|
2022-01-13T03:45:31.000Z
|
2022-03-12T00:57:40.000Z
|
src/utils/inference_utils.py
|
hynekdav/semi-supervised-VOS
|
6b29baef2e4fd018502fb434e978e8e924fb84b1
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
# ! python3
from __future__ import annotations
from __future__ import generator_stop
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from src.config import Config
from src.model.predict import prepare_first_frame, predict
from src.utils.transforms import hflip
from src.utils.utils import save_predictions, index_to_onehot
REDUCTIONS = {'maximum': lambda x, y: torch.maximum(x, y),
'minimum': lambda x, y: torch.minimum(x, y),
'mean': lambda x, y: (x + y) / 2.0}
def inference_single(model, inference_loader, total_len, annotation_dir, last_video, save, sigma_1, sigma_2,
frame_range, ref_num, temperature, probability_propagation, disable):
global pred_visualize, palette, feats_history, label_history, weight_dense, weight_sparse, d
frame_idx = 0
for input, (current_video,) in tqdm(inference_loader, total=total_len, disable=disable):
if current_video != last_video:
# save prediction
pred_visualize = pred_visualize.cpu().numpy()
save_predictions(pred_visualize, palette, save, last_video)
frame_idx = 0
if frame_idx == 0:
input = input.to(Config.DEVICE)
with torch.cuda.amp.autocast():
feats_history = model(input)
first_annotation = annotation_dir / current_video / '00000.png'
label_history, d, palette, weight_dense, weight_sparse = prepare_first_frame(
current_video,
save,
first_annotation,
sigma_1,
sigma_2,
inference_strategy='single',
probability_propagation=probability_propagation)
frame_idx += 1
last_video = current_video
continue
(batch_size, num_channels, H, W) = input.shape
input = input.to(Config.DEVICE)
with torch.cuda.amp.autocast():
features = model(input)
(_, feature_dim, H_d, W_d) = features.shape
prediction = predict(feats_history,
features[0],
label_history,
weight_dense,
weight_sparse,
frame_idx,
frame_range,
ref_num,
temperature,
probability_propagation)
# Store all frames' features
if probability_propagation:
new_label = prediction.unsqueeze(1)
else:
new_label = index_to_onehot(torch.argmax(prediction, 0), d).unsqueeze(1)
label_history = torch.cat((label_history, new_label), 1)
feats_history = torch.cat((feats_history, features), 0)
prediction = torch.nn.functional.interpolate(prediction.view(1, d, H_d, W_d), size=(H, W), mode='nearest')
prediction = torch.argmax(prediction, 1).cpu() # (1, H, W)
last_video = current_video
frame_idx += 1
if frame_idx == 2:
pred_visualize = prediction
else:
pred_visualize = torch.cat((pred_visualize, prediction), 0)
# save last video's prediction
pred_visualize = pred_visualize.cpu().numpy()
save_predictions(pred_visualize, palette, save, last_video)
def inference_hor_flip(model, inference_loader, total_len, annotation_dir, last_video, save, sigma_1, sigma_2,
frame_range, ref_num, temperature, probability_propagation, reduction_str, disable):
global pred_visualize, palette, feats_history_l, label_history_l, weight_dense, weight_sparse, feats_history_r, label_history_r, d
frame_idx = 0
for input, (current_video,) in tqdm(inference_loader, total=total_len, disable=disable):
if current_video != last_video:
# save prediction
pred_visualize = pred_visualize.cpu().numpy()
save_predictions(pred_visualize, palette, save, last_video)
frame_idx = 0
if frame_idx == 0:
input_l = input[0].to(Config.DEVICE)
input_r = input[1].to(Config.DEVICE)
with torch.cuda.amp.autocast():
feats_history_l = model(input_l)
feats_history_r = model(input_r)
first_annotation = annotation_dir / current_video / '00000.png'
label_history_l, label_history_r, d, palette, weight_dense, weight_sparse = prepare_first_frame(
current_video,
save,
first_annotation,
sigma_1,
sigma_2,
inference_strategy='hor-flip',
probability_propagation=probability_propagation)
frame_idx += 1
last_video = current_video
continue
(batch_size, num_channels, H, W) = input[0].shape
input_l = input[0].to(Config.DEVICE)
input_r = input[1].to(Config.DEVICE)
with torch.cuda.amp.autocast():
features_l = model(input_l)
features_r = model(input_r)
(_, feature_dim, H_d, W_d) = features_l.shape
prediction_l = predict(feats_history_l,
features_l[0],
label_history_l,
weight_dense,
weight_sparse,
frame_idx,
frame_range,
ref_num,
temperature,
probability_propagation)
# Store all frames' features
if probability_propagation:
new_label_l = prediction_l.unsqueeze(1)
else:
new_label_l = index_to_onehot(torch.argmax(prediction_l, 0), d).unsqueeze(1)
label_history_l = torch.cat((label_history_l, new_label_l), 1)
feats_history_l = torch.cat((feats_history_l, features_l), 0)
prediction_l = torch.nn.functional.interpolate(prediction_l.view(1, d, H_d, W_d),
size=(H, W),
mode='nearest')
if not probability_propagation:
prediction_l = torch.argmax(prediction_l, 1).squeeze() # (1, H, W)
prediction_r = predict(feats_history_r,
features_r[0],
label_history_r,
weight_dense,
weight_sparse,
frame_idx,
frame_range,
ref_num,
temperature,
probability_propagation)
# Store all frames' features
if probability_propagation:
new_label_r = prediction_r.unsqueeze(1)
else:
new_label_r = index_to_onehot(torch.argmax(prediction_r, 0), d).unsqueeze(1)
label_history_r = torch.cat((label_history_r, new_label_r), 1)
feats_history_r = torch.cat((feats_history_r, features_r), 0)
# 1. upsample, 2. argmax
prediction_r = F.interpolate(prediction_r.view(1, d, H_d, W_d), size=(H, W), mode='nearest')
if not probability_propagation:
prediction_r = torch.argmax(prediction_r, 1).squeeze() # (1, H, W)
prediction_r = torch.fliplr(prediction_r).cpu()
prediction_l = prediction_l.cpu()
last_video = current_video
frame_idx += 1
if probability_propagation:
reduction = REDUCTIONS.get(reduction_str)
prediction = reduction(prediction_l, prediction_r).cpu().half()
prediction = torch.argmax(prediction, 1).cpu() # (1, H, W)
else:
prediction = torch.maximum(prediction_l, prediction_r).unsqueeze(0).cpu().half()
if frame_idx == 2:
pred_visualize = prediction
else:
pred_visualize = torch.cat((pred_visualize, prediction), 0)
# save last video's prediction
pred_visualize = pred_visualize.cpu().numpy()
save_predictions(pred_visualize, palette, save, last_video)
def inference_ver_flip(model, inference_loader, total_len, annotation_dir, last_video, save, sigma_1, sigma_2,
frame_range, ref_num, temperature, probability_propagation, reduction_str, disable):
global pred_visualize, palette, feats_history_l, label_history_l, weight_dense, weight_sparse, feats_history_r, label_history_r, d
frame_idx = 0
for input, (current_video,) in tqdm(inference_loader, total=total_len, disable=disable):
if current_video != last_video:
# save prediction
pred_visualize = pred_visualize.cpu().numpy()
save_predictions(pred_visualize, palette, save, last_video)
frame_idx = 0
if frame_idx == 0:
input_l = input[0].to(Config.DEVICE)
input_r = input[1].to(Config.DEVICE)
with torch.cuda.amp.autocast():
feats_history_l = model(input_l)
feats_history_r = model(input_r)
first_annotation = annotation_dir / current_video / '00000.png'
label_history_l, label_history_r, d, palette, weight_dense, weight_sparse = prepare_first_frame(
current_video,
save,
first_annotation,
sigma_1,
sigma_2,
inference_strategy='ver-flip',
probability_propagation=probability_propagation)
frame_idx += 1
last_video = current_video
continue
(batch_size, num_channels, H, W) = input[0].shape
input_l = input[0].to(Config.DEVICE)
input_r = input[1].to(Config.DEVICE)
with torch.cuda.amp.autocast():
features_l = model(input_l)
features_r = model(input_r)
(_, feature_dim, H_d, W_d) = features_l.shape
prediction_l = predict(feats_history_l,
features_l[0],
label_history_l,
weight_dense,
weight_sparse,
frame_idx,
frame_range,
ref_num,
temperature,
probability_propagation)
# Store all frames' features
if probability_propagation:
new_label_l = prediction_l.unsqueeze(1)
else:
new_label_l = index_to_onehot(torch.argmax(prediction_l, 0), d).unsqueeze(1)
label_history_l = torch.cat((label_history_l, new_label_l), 1)
feats_history_l = torch.cat((feats_history_l, features_l), 0)
prediction_l = torch.nn.functional.interpolate(prediction_l.view(1, d, H_d, W_d),
size=(H, W),
mode='nearest')
if not probability_propagation:
prediction_l = torch.argmax(prediction_l, 1).squeeze() # (1, H, W)
prediction_r = predict(feats_history_r,
features_r[0],
label_history_r,
weight_dense,
weight_sparse,
frame_idx,
frame_range,
ref_num,
temperature,
probability_propagation)
# Store all frames' features
if probability_propagation:
new_label_r = prediction_r.unsqueeze(1)
else:
new_label_r = index_to_onehot(torch.argmax(prediction_r, 0), d).unsqueeze(1)
label_history_r = torch.cat((label_history_r, new_label_r), 1)
feats_history_r = torch.cat((feats_history_r, features_r), 0)
# 1. upsample, 2. argmax
prediction_r = F.interpolate(prediction_r.view(1, d, H_d, W_d), size=(H, W), mode='nearest')
if not probability_propagation:
prediction_r = torch.argmax(prediction_r, 1).squeeze() # (1, H, W)
prediction_r = torch.fliplr(prediction_r).cpu()
prediction_l = prediction_l.cpu()
last_video = current_video
frame_idx += 1
if probability_propagation:
reduction = REDUCTIONS.get(reduction_str)
prediction = reduction(prediction_l, prediction_r).cpu().half()
prediction = torch.argmax(prediction, 1).cpu() # (1, H, W)
else:
prediction = torch.maximum(prediction_l, prediction_r).unsqueeze(0).cpu().half()
if frame_idx == 2:
pred_visualize = prediction
else:
pred_visualize = torch.cat((pred_visualize, prediction), 0)
# save last video's prediction
pred_visualize = pred_visualize.cpu().numpy()
save_predictions(pred_visualize, palette, save, last_video)
def inference_2_scale(model, inference_loader, total_len, annotation_dir, last_video, save, sigma_1, sigma_2,
frame_range, ref_num, temperature, probability_propagation, scale, reduction_str, flip_pred,
disable):
global pred_visualize, palette, feats_history_o, label_history_o, weight_dense_o, weight_sparse_o, feats_history_u, label_history_u, weight_dense_u, weight_sparse_u, d
frame_idx = 0
for input, (current_video,) in tqdm(inference_loader, total=total_len, disable=disable):
if current_video != last_video:
# save prediction
pred_visualize = pred_visualize.cpu().numpy()
save_predictions(pred_visualize, palette, save, last_video)
frame_idx = 0
if frame_idx == 0:
input_o = input[0].to(Config.DEVICE)
input_u = input[1].to(Config.DEVICE)
with torch.cuda.amp.autocast():
feats_history_o = model(input_o)
feats_history_u = model(input_u)
first_annotation = annotation_dir / current_video / '00000.png'
label_history, d, palette, weight_dense, weight_sparse = prepare_first_frame(
current_video,
save,
first_annotation,
sigma_1,
sigma_2,
inference_strategy='2-scale',
probability_propagation=probability_propagation,
scale=scale)
frame_idx += 1
last_video = current_video
label_history_o, label_history_u = label_history
weight_dense_o, weight_dense_u = weight_dense
weight_sparse_o, weight_sparse_u = weight_sparse
continue
(_, _, H, W) = input[0].shape
input_o = input[0].to(Config.DEVICE)
input_u = input[1].to(Config.DEVICE)
with torch.cuda.amp.autocast():
features_o = model(input_o)
features_u = model(input_u)
(_, feature_dim, H_d, W_d) = features_o.shape
prediction_o = predict(feats_history_o,
features_o[0],
label_history_o,
weight_dense_o,
weight_sparse_o,
frame_idx,
frame_range,
ref_num,
temperature,
probability_propagation)
# Store all frames' features
if probability_propagation:
new_label_o = prediction_o.unsqueeze(1)
else:
new_label_o = index_to_onehot(torch.argmax(prediction_o, 0), d).unsqueeze(1)
label_history_o = torch.cat((label_history_o, new_label_o), 1)
feats_history_o = torch.cat((feats_history_o, features_o), 0)
prediction_o = torch.nn.functional.interpolate(prediction_o.view(1, d, H_d, W_d), size=(H, W), mode='nearest')
if not probability_propagation:
prediction_o = torch.argmax(prediction_o, 1).cpu() # (1, H, W)
(_, feature_dim, H_d, W_d) = features_u.shape
prediction_u = predict(feats_history_u,
features_u[0],
label_history_u,
weight_dense_u,
weight_sparse_u,
frame_idx,
frame_range,
ref_num,
temperature,
probability_propagation)
# Store all frames' features
if probability_propagation:
new_label_u = prediction_u.unsqueeze(1)
else:
new_label_u = index_to_onehot(torch.argmax(prediction_u, 0), d).unsqueeze(1)
label_history_u = torch.cat((label_history_u, new_label_u), 1)
feats_history_u = torch.cat((feats_history_u, features_u), 0)
prediction_u = torch.nn.functional.interpolate(prediction_u.view(1, d, H_d, W_d), size=(H, W), mode='nearest')
if not probability_propagation:
prediction_u = torch.argmax(prediction_u, 1).cpu() # (1, H, W)
if flip_pred:
prediction_u = hflip(prediction_u)
if probability_propagation:
reduction = REDUCTIONS.get(reduction_str)
prediction = reduction(prediction_o, prediction_u).cpu().half()
prediction = torch.argmax(prediction, 1).cpu() # (1, H, W)
else:
prediction = torch.maximum(prediction_o, prediction_u).cpu().half()
last_video = current_video
frame_idx += 1
if frame_idx == 2:
pred_visualize = prediction
else:
pred_visualize = torch.cat((pred_visualize, prediction), 0)
# save last video's prediction
pred_visualize = pred_visualize.cpu().numpy()
save_predictions(pred_visualize, palette, save, last_video)
def inference_multimodel(model, additional_model, inference_loader, total_len, annotation_dir, last_video, save,
sigma_1, sigma_2, frame_range, ref_num, temperature, probability_propagation, reduction_str,
disable):
global pred_visualize, label_history_a, feats_history_a, weight_sparse, weight_dense, label_history_o, feats_history_o, d, palette
frame_idx = 0
for input, (current_video,) in tqdm(inference_loader, total=total_len, disable=disable):
if current_video != last_video:
# save prediction
pred_visualize = pred_visualize.cpu().numpy()
save_predictions(pred_visualize, palette, save, last_video)
frame_idx = 0
if frame_idx == 0:
input = input.to(Config.DEVICE)
with torch.cuda.amp.autocast():
feats_history_o = model(input)
feats_history_a = additional_model(input)
first_annotation = annotation_dir / current_video / '00000.png'
label_history, d, palette, weight_dense, weight_sparse = prepare_first_frame(
current_video,
save,
first_annotation,
sigma_1,
sigma_2,
inference_strategy='multimodel',
probability_propagation=probability_propagation)
frame_idx += 1
last_video = current_video
label_history_o = label_history
label_history_a = label_history
continue
(_, _, H, W) = input.shape
input = input.to(Config.DEVICE)
with torch.cuda.amp.autocast():
features_o = model(input)
features_a = additional_model(input)
(_, feature_dim, H_d, W_d) = features_o.shape
prediction_o = predict(feats_history_o,
features_o[0],
label_history_o,
weight_dense,
weight_sparse,
frame_idx,
frame_range,
ref_num,
temperature,
probability_propagation)
# Store all frames' features
if probability_propagation:
new_label_o = prediction_o.unsqueeze(1)
else:
new_label_o = index_to_onehot(torch.argmax(prediction_o, 0), d).unsqueeze(1)
label_history_o = torch.cat((label_history_o, new_label_o), 1)
feats_history_o = torch.cat((feats_history_o, features_o), 0)
prediction_o = torch.nn.functional.interpolate(prediction_o.view(1, d, H_d, W_d), size=(H, W), mode='nearest')
if not probability_propagation:
prediction_o = torch.argmax(prediction_o, 1).cpu() # (1, H, W)
(_, feature_dim, H_d, W_d) = features_a.shape
prediction_a = predict(feats_history_a,
features_a[0],
label_history_a,
weight_dense,
weight_sparse,
frame_idx,
frame_range,
ref_num,
temperature,
probability_propagation)
# Store all frames' features
if probability_propagation:
new_label_a = prediction_a.unsqueeze(1)
else:
new_label_a = index_to_onehot(torch.argmax(prediction_a, 0), d).unsqueeze(1)
label_history_a = torch.cat((label_history_a, new_label_a), 1)
feats_history_a = torch.cat((feats_history_a, features_a), 0)
prediction_a = torch.nn.functional.interpolate(prediction_a.view(1, d, H_d, W_d), size=(H, W), mode='nearest')
if not probability_propagation:
prediction_a = torch.argmax(prediction_a, 1).cpu() # (1, H, W)
if probability_propagation:
reduction = REDUCTIONS.get(reduction_str)
prediction = reduction(prediction_o, prediction_a).cpu().half()
prediction = torch.argmax(prediction, 1).cpu() # (1, H, W)
else:
prediction = torch.maximum(prediction_o, prediction_a).cpu().half()
last_video = current_video
frame_idx += 1
if frame_idx == 2:
pred_visualize = prediction
else:
pred_visualize = torch.cat((pred_visualize, prediction), 0)
# save last video's prediction
pred_visualize = pred_visualize.cpu().numpy()
save_predictions(pred_visualize, palette, save, last_video)
def inference_3_scale(model, inference_loader, total_len, annotation_dir, last_video, save, sigma_1, sigma_2,
frame_range, ref_num, temperature, probability_propagation, scale, disable):
global pred_visualize, palette, feats_history, label_history, weight_dense, weight_sparse, d, current_video
scales = [0.9, 1.0, scale]
predictions = {}
palettes = []
for scale in scales:
frame_idx = 0
for i, (input, (current_video,)) in tqdm(enumerate(inference_loader), total=total_len, disable=disable):
(_, _, H, W) = input.shape
H_d = int(np.ceil(H * scale))
W_d = int(np.ceil(W * scale))
input = torch.nn.functional.interpolate(input, size=(H_d, W_d), mode='nearest').to(Config.DEVICE)
if i != 0 and current_video != last_video:
# save prediction
pred_visualize = pred_visualize.cpu().numpy()
if last_video not in predictions:
predictions[last_video] = []
predictions[last_video].append(pred_visualize)
frame_idx = 0
if frame_idx == 0:
with torch.cuda.amp.autocast():
feats_history = model(input)
first_annotation = annotation_dir / current_video / '00000.png'
label_history, d, palette, weight_dense, weight_sparse = prepare_first_frame(
current_video,
save,
first_annotation,
sigma_1,
sigma_2,
inference_strategy='3-scale',
probability_propagation=probability_propagation,
scale=scale)
frame_idx += 1
last_video = current_video
palettes.append(palette)
continue
with torch.cuda.amp.autocast():
features = model(input)
(_, feature_dim, H_d, W_d) = features.shape
prediction = predict(feats_history,
features[0],
label_history,
weight_dense,
weight_sparse,
frame_idx,
frame_range,
ref_num,
temperature,
probability_propagation)
# Store all frames' features
if probability_propagation:
new_label = prediction.unsqueeze(1)
else:
new_label = index_to_onehot(torch.argmax(prediction, 0), d).unsqueeze(1)
label_history = torch.cat((label_history, new_label), 1)
feats_history = torch.cat((feats_history, features), 0)
prediction = torch.nn.functional.interpolate(prediction.view(1, d, H_d, W_d), size=(480, 910),
mode='nearest')
prediction = torch.argmax(prediction, 1).cpu().type(torch.int8) # (1, H, W)
last_video = current_video
frame_idx += 1
if frame_idx == 2:
pred_visualize = prediction
else:
pred_visualize = torch.cat((pred_visualize, prediction), 0)
pred_visualize = pred_visualize.cpu().numpy()
if current_video not in predictions:
predictions[current_video] = []
predictions[current_video].append(pred_visualize)
pred_visualize = None
for (video_name, frames), palette in tqdm(zip(predictions.items(), palettes), desc='Saving',
total=len(predictions)):
prediction = np.maximum(np.maximum(frames[0], frames[1]), frames[2])
save_predictions(prediction, palette, save, video_name)
| 44.921141
| 171
| 0.563852
|
bf8626c52f23cfd6c0ccee0c6b673808dea7b45c
| 10,926
|
py
|
Python
|
sympy/printing/repr.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | 2
|
2021-02-16T14:20:37.000Z
|
2021-02-16T16:37:47.000Z
|
sympy/printing/repr.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/printing/repr.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | 1
|
2020-03-06T15:18:46.000Z
|
2020-03-06T15:18:46.000Z
|
"""
A Printer for generating executable code.
The most important function here is srepr that returns a string so that the
relation eval(srepr(expr))=expr holds in an appropriate environment.
"""
from __future__ import print_function, division
from typing import Any, Dict
from sympy.core.function import AppliedUndef
from mpmath.libmp import repr_dps, to_str as mlib_to_str
from .printer import Printer
class ReprPrinter(Printer):
printmethod = "_sympyrepr"
_default_settings = {
"order": None,
"perm_cyclic" : True,
} # type: Dict[str, Any]
def reprify(self, args, sep):
"""
Prints each item in `args` and joins them with `sep`.
"""
return sep.join([self.doprint(item) for item in args])
def emptyPrinter(self, expr):
"""
The fallback printer.
"""
if isinstance(expr, str):
return expr
elif hasattr(expr, "__srepr__"):
return expr.__srepr__()
elif hasattr(expr, "args") and hasattr(expr.args, "__iter__"):
l = []
for o in expr.args:
l.append(self._print(o))
return expr.__class__.__name__ + '(%s)' % ', '.join(l)
elif hasattr(expr, "__module__") and hasattr(expr, "__name__"):
return "<'%s.%s'>" % (expr.__module__, expr.__name__)
else:
return str(expr)
def _print_Add(self, expr, order=None):
args = self._as_ordered_terms(expr, order=order)
nargs = len(args)
args = map(self._print, args)
clsname = type(expr).__name__
if nargs > 255: # Issue #10259, Python < 3.7
return clsname + "(*[%s])" % ", ".join(args)
return clsname + "(%s)" % ", ".join(args)
def _print_Cycle(self, expr):
return expr.__repr__()
def _print_Permutation(self, expr):
from sympy.combinatorics.permutations import Permutation, Cycle
from sympy.utilities.exceptions import SymPyDeprecationWarning
perm_cyclic = Permutation.print_cyclic
if perm_cyclic is not None:
SymPyDeprecationWarning(
feature="Permutation.print_cyclic = {}".format(perm_cyclic),
useinstead="init_printing(perm_cyclic={})"
.format(perm_cyclic),
issue=15201,
deprecated_since_version="1.6").warn()
else:
perm_cyclic = self._settings.get("perm_cyclic", True)
if perm_cyclic:
if not expr.size:
return 'Permutation()'
# before taking Cycle notation, see if the last element is
# a singleton and move it to the head of the string
s = Cycle(expr)(expr.size - 1).__repr__()[len('Cycle'):]
last = s.rfind('(')
if not last == 0 and ',' not in s[last:]:
s = s[last:] + s[:last]
return 'Permutation%s' %s
else:
s = expr.support()
if not s:
if expr.size < 5:
return 'Permutation(%s)' % str(expr.array_form)
return 'Permutation([], size=%s)' % expr.size
trim = str(expr.array_form[:s[-1] + 1]) + ', size=%s' % expr.size
use = full = str(expr.array_form)
if len(trim) < len(full):
use = trim
return 'Permutation(%s)' % use
def _print_Function(self, expr):
r = self._print(expr.func)
r += '(%s)' % ', '.join([self._print(a) for a in expr.args])
return r
def _print_FunctionClass(self, expr):
if issubclass(expr, AppliedUndef):
return 'Function(%r)' % (expr.__name__)
else:
return expr.__name__
def _print_Half(self, expr):
return 'Rational(1, 2)'
def _print_RationalConstant(self, expr):
return str(expr)
def _print_AtomicExpr(self, expr):
return str(expr)
def _print_NumberSymbol(self, expr):
return str(expr)
def _print_Integer(self, expr):
return 'Integer(%i)' % expr.p
def _print_Integers(self, expr):
return 'Integers'
def _print_Naturals(self, expr):
return 'Naturals'
def _print_Naturals0(self, expr):
return 'Naturals0'
def _print_Reals(self, expr):
return 'Reals'
def _print_EmptySet(self, expr):
return 'EmptySet'
def _print_EmptySequence(self, expr):
return 'EmptySequence'
def _print_list(self, expr):
return "[%s]" % self.reprify(expr, ", ")
def _print_MatrixBase(self, expr):
# special case for some empty matrices
if (expr.rows == 0) ^ (expr.cols == 0):
return '%s(%s, %s, %s)' % (expr.__class__.__name__,
self._print(expr.rows),
self._print(expr.cols),
self._print([]))
l = []
for i in range(expr.rows):
l.append([])
for j in range(expr.cols):
l[-1].append(expr[i, j])
return '%s(%s)' % (expr.__class__.__name__, self._print(l))
def _print_MutableSparseMatrix(self, expr):
return self._print_MatrixBase(expr)
def _print_SparseMatrix(self, expr):
return self._print_MatrixBase(expr)
def _print_ImmutableSparseMatrix(self, expr):
return self._print_MatrixBase(expr)
def _print_Matrix(self, expr):
return self._print_MatrixBase(expr)
def _print_DenseMatrix(self, expr):
return self._print_MatrixBase(expr)
def _print_MutableDenseMatrix(self, expr):
return self._print_MatrixBase(expr)
def _print_ImmutableMatrix(self, expr):
return self._print_MatrixBase(expr)
def _print_ImmutableDenseMatrix(self, expr):
return self._print_MatrixBase(expr)
def _print_BooleanTrue(self, expr):
return "true"
def _print_BooleanFalse(self, expr):
return "false"
def _print_NaN(self, expr):
return "nan"
def _print_Mul(self, expr, order=None):
terms = expr.args
if self.order != 'old':
args = expr._new_rawargs(*terms).as_ordered_factors()
else:
args = terms
nargs = len(args)
args = map(self._print, args)
clsname = type(expr).__name__
if nargs > 255: # Issue #10259, Python < 3.7
return clsname + "(*[%s])" % ", ".join(args)
return clsname + "(%s)" % ", ".join(args)
def _print_Rational(self, expr):
return 'Rational(%s, %s)' % (self._print(expr.p), self._print(expr.q))
def _print_PythonRational(self, expr):
return "%s(%d, %d)" % (expr.__class__.__name__, expr.p, expr.q)
def _print_Fraction(self, expr):
return 'Fraction(%s, %s)' % (self._print(expr.numerator), self._print(expr.denominator))
def _print_Float(self, expr):
r = mlib_to_str(expr._mpf_, repr_dps(expr._prec))
return "%s('%s', precision=%i)" % (expr.__class__.__name__, r, expr._prec)
def _print_Sum2(self, expr):
return "Sum2(%s, (%s, %s, %s))" % (self._print(expr.f), self._print(expr.i),
self._print(expr.a), self._print(expr.b))
def _print_Symbol(self, expr):
d = expr._assumptions.generator
# print the dummy_index like it was an assumption
if expr.is_Dummy:
d['dummy_index'] = expr.dummy_index
if d == {}:
return "%s(%s)" % (expr.__class__.__name__, self._print(expr.name))
else:
attr = ['%s=%s' % (k, v) for k, v in d.items()]
return "%s(%s, %s)" % (expr.__class__.__name__,
self._print(expr.name), ', '.join(attr))
def _print_Predicate(self, expr):
return "%s(%s)" % (expr.__class__.__name__, self._print(expr.name))
def _print_AppliedPredicate(self, expr):
return "%s(%s, %s)" % (expr.__class__.__name__, expr.func, expr.arg)
def _print_str(self, expr):
return repr(expr)
def _print_tuple(self, expr):
if len(expr) == 1:
return "(%s,)" % self._print(expr[0])
else:
return "(%s)" % self.reprify(expr, ", ")
def _print_WildFunction(self, expr):
return "%s('%s')" % (expr.__class__.__name__, expr.name)
def _print_AlgebraicNumber(self, expr):
return "%s(%s, %s)" % (expr.__class__.__name__,
self._print(expr.root), self._print(expr.coeffs()))
def _print_PolyRing(self, ring):
return "%s(%s, %s, %s)" % (ring.__class__.__name__,
self._print(ring.symbols), self._print(ring.domain), self._print(ring.order))
def _print_FracField(self, field):
return "%s(%s, %s, %s)" % (field.__class__.__name__,
self._print(field.symbols), self._print(field.domain), self._print(field.order))
def _print_PolyElement(self, poly):
terms = list(poly.terms())
terms.sort(key=poly.ring.order, reverse=True)
return "%s(%s, %s)" % (poly.__class__.__name__, self._print(poly.ring), self._print(terms))
def _print_FracElement(self, frac):
numer_terms = list(frac.numer.terms())
numer_terms.sort(key=frac.field.order, reverse=True)
denom_terms = list(frac.denom.terms())
denom_terms.sort(key=frac.field.order, reverse=True)
numer = self._print(numer_terms)
denom = self._print(denom_terms)
return "%s(%s, %s, %s)" % (frac.__class__.__name__, self._print(frac.field), numer, denom)
def _print_FractionField(self, domain):
cls = domain.__class__.__name__
field = self._print(domain.field)
return "%s(%s)" % (cls, field)
def _print_PolynomialRingBase(self, ring):
cls = ring.__class__.__name__
dom = self._print(ring.domain)
gens = ', '.join(map(self._print, ring.gens))
order = str(ring.order)
if order != ring.default_order:
orderstr = ", order=" + order
else:
orderstr = ""
return "%s(%s, %s%s)" % (cls, dom, gens, orderstr)
def _print_DMP(self, p):
cls = p.__class__.__name__
rep = self._print(p.rep)
dom = self._print(p.dom)
if p.ring is not None:
ringstr = ", ring=" + self._print(p.ring)
else:
ringstr = ""
return "%s(%s, %s%s)" % (cls, rep, dom, ringstr)
def _print_MonogenicFiniteExtension(self, ext):
# The expanded tree shown by srepr(ext.modulus)
# is not practical.
return "FiniteExtension(%s)" % str(ext.modulus)
def _print_ExtensionElement(self, f):
rep = self._print(f.rep)
ext = self._print(f.ext)
return "ExtElem(%s, %s)" % (rep, ext)
def srepr(expr, **settings):
"""return expr in repr form"""
return ReprPrinter(settings).doprint(expr)
| 34.037383
| 99
| 0.576698
|
5ec5fc17bca9444bce675ea5f951bd248c8ef42d
| 542
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/small-poetry-31544
|
960a0945af1b45d421be56a7164ab7f42b69ffdc
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/small-poetry-31544
|
960a0945af1b45d421be56a7164ab7f42b69ffdc
|
[
"FTL",
"AML",
"RSA-MD"
] | 5
|
2021-10-19T08:15:10.000Z
|
2021-10-19T08:15:13.000Z
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/small-poetry-31544
|
960a0945af1b45d421be56a7164ab7f42b69ffdc
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "small-poetry-31544.botics.co"
site_params = {
"name": "Small Poetry",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.846154
| 61
| 0.656827
|
365cc8757a4d8e529c2e3d79cc61141faf808d77
| 594
|
py
|
Python
|
schevo/store/tests/test_store_utils.py
|
Schevo/schevo
|
d57a41f8b7b514ed48dc0164dcd3412a89e9873b
|
[
"MIT"
] | 1
|
2020-09-05T00:47:50.000Z
|
2020-09-05T00:47:50.000Z
|
schevo/store/tests/test_store_utils.py
|
Schevo/schevo
|
d57a41f8b7b514ed48dc0164dcd3412a89e9873b
|
[
"MIT"
] | null | null | null |
schevo/store/tests/test_store_utils.py
|
Schevo/schevo
|
d57a41f8b7b514ed48dc0164dcd3412a89e9873b
|
[
"MIT"
] | null | null | null |
"""
$URL: svn+ssh://svn/repos/trunk/durus/test/utest_utils.py $
$Id: utest_utils.py 27079 2005-07-25 20:54:05Z dbinger $
"""
from schevo.store.utils import format_oid, u64, p64, u32, p32
class Test(object):
def test_check_format_oid(self):
assert format_oid('A'*8) == '4702111234474983745'
def test_check_p64_u64(self):
for x in range(3):
assert len(p64(x)) == 8
assert u64(p64(x)) == x
def test_check_p32_u32(self):
for x in range(3):
assert len(p32(x)) == 4
assert x == u32(p32(x))
| 27
| 62
| 0.579125
|
e6ed4d1c80720881b56b0151d2de44ff3a486aa4
| 254,550
|
py
|
Python
|
bigquery/tests/unit/test_client.py
|
codyoss/google-cloud-python
|
505d55357fbdffc5d55005c58712932c758737bd
|
[
"Apache-2.0"
] | null | null | null |
bigquery/tests/unit/test_client.py
|
codyoss/google-cloud-python
|
505d55357fbdffc5d55005c58712932c758737bd
|
[
"Apache-2.0"
] | null | null | null |
bigquery/tests/unit/test_client.py
|
codyoss/google-cloud-python
|
505d55357fbdffc5d55005c58712932c758737bd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import collections
import datetime
import decimal
import email
import gzip
import io
import json
import operator
import unittest
import warnings
import mock
import requests
import six
from six.moves import http_client
import pytest
import pytz
try:
import pandas
except (ImportError, AttributeError): # pragma: NO COVER
pandas = None
try:
import pyarrow
except (ImportError, AttributeError): # pragma: NO COVER
pyarrow = None
import google.api_core.exceptions
from google.api_core.gapic_v1 import client_info
import google.cloud._helpers
from google.cloud import bigquery_v2
from google.cloud.bigquery.dataset import DatasetReference
from tests.unit.helpers import make_connection
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_list_partitons_meta_info(project, dataset_id, table_id, num_rows=0):
return {
"tableReference": {
"projectId": project,
"datasetId": dataset_id,
"tableId": "{}$__PARTITIONS_SUMMARY__".format(table_id),
},
"schema": {
"fields": [
{"name": "project_id", "type": "STRING", "mode": "NULLABLE"},
{"name": "dataset_id", "type": "STRING", "mode": "NULLABLE"},
{"name": "table_id", "type": "STRING", "mode": "NULLABLE"},
{"name": "partition_id", "type": "STRING", "mode": "NULLABLE"},
]
},
"etag": "ETAG",
"numRows": num_rows,
}
class TestClient(unittest.TestCase):
PROJECT = "PROJECT"
DS_ID = "DATASET_ID"
TABLE_ID = "TABLE_ID"
MODEL_ID = "MODEL_ID"
TABLE_REF = DatasetReference(PROJECT, DS_ID).table(TABLE_ID)
KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
LOCATION = "us-central"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.client import Client
return Client
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _make_table_resource(self):
return {
"id": "%s:%s:%s" % (self.PROJECT, self.DS_ID, self.TABLE_ID),
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
}
def test_ctor_defaults(self):
from google.cloud.bigquery._http import Connection
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._connection.credentials, creds)
self.assertIs(client._connection.http, http)
self.assertIsNone(client.location)
self.assertEqual(
client._connection.API_BASE_URL, Connection.DEFAULT_API_ENDPOINT
)
def test_ctor_w_empty_client_options(self):
from google.api_core.client_options import ClientOptions
creds = _make_credentials()
http = object()
client_options = ClientOptions()
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
client_options=client_options,
)
self.assertEqual(
client._connection.API_BASE_URL, client._connection.DEFAULT_API_ENDPOINT
)
def test_ctor_w_client_options_dict(self):
creds = _make_credentials()
http = object()
client_options = {"api_endpoint": "https://www.foo-googleapis.com"}
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
client_options=client_options,
)
self.assertEqual(
client._connection.API_BASE_URL, "https://www.foo-googleapis.com"
)
def test_ctor_w_client_options_object(self):
from google.api_core.client_options import ClientOptions
creds = _make_credentials()
http = object()
client_options = ClientOptions(api_endpoint="https://www.foo-googleapis.com")
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
client_options=client_options,
)
self.assertEqual(
client._connection.API_BASE_URL, "https://www.foo-googleapis.com"
)
def test_ctor_w_location(self):
from google.cloud.bigquery._http import Connection
creds = _make_credentials()
http = object()
location = "us-central"
client = self._make_one(
project=self.PROJECT, credentials=creds, _http=http, location=location
)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._connection.credentials, creds)
self.assertIs(client._connection.http, http)
self.assertEqual(client.location, location)
def test_ctor_w_query_job_config(self):
from google.cloud.bigquery._http import Connection
from google.cloud.bigquery import QueryJobConfig
creds = _make_credentials()
http = object()
location = "us-central"
job_config = QueryJobConfig()
job_config.dry_run = True
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
location=location,
default_query_job_config=job_config,
)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._connection.credentials, creds)
self.assertIs(client._connection.http, http)
self.assertEqual(client.location, location)
self.assertIsInstance(client._default_query_job_config, QueryJobConfig)
self.assertTrue(client._default_query_job_config.dry_run)
def test__get_query_results_miss_w_explicit_project_and_timeout(self):
from google.cloud.exceptions import NotFound
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection()
with self.assertRaises(NotFound):
client._get_query_results(
"nothere",
None,
project="other-project",
location=self.LOCATION,
timeout_ms=500,
)
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/other-project/queries/nothere",
query_params={"maxResults": 0, "timeoutMs": 500, "location": self.LOCATION},
)
def test__get_query_results_miss_w_client_location(self):
from google.cloud.exceptions import NotFound
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds, location=self.LOCATION)
conn = client._connection = make_connection()
with self.assertRaises(NotFound):
client._get_query_results("nothere", None)
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/PROJECT/queries/nothere",
query_params={"maxResults": 0, "location": self.LOCATION},
)
def test__get_query_results_hit(self):
job_id = "query_job"
data = {
"kind": "bigquery#getQueryResultsResponse",
"etag": "some-tag",
"schema": {
"fields": [
{"name": "title", "type": "STRING", "mode": "NULLABLE"},
{"name": "unique_words", "type": "INTEGER", "mode": "NULLABLE"},
]
},
"jobReference": {"projectId": self.PROJECT, "jobId": job_id},
"totalRows": "10",
"totalBytesProcessed": "2464625",
"jobComplete": True,
"cacheHit": False,
}
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
client._connection = make_connection(data)
query_results = client._get_query_results(job_id, None)
self.assertEqual(query_results.total_rows, 10)
self.assertTrue(query_results.complete)
def test_get_service_account_email(self):
path = "/projects/%s/serviceAccount" % (self.PROJECT,)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
email = "bq-123@bigquery-encryption.iam.gserviceaccount.com"
resource = {"kind": "bigquery#getServiceAccountResponse", "email": email}
conn = client._connection = make_connection(resource)
service_account_email = client.get_service_account_email()
conn.api_request.assert_called_once_with(method="GET", path=path)
self.assertEqual(service_account_email, email)
def test_get_service_account_email_w_alternate_project(self):
project = "my-alternate-project"
path = "/projects/%s/serviceAccount" % (project,)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
email = "bq-123@bigquery-encryption.iam.gserviceaccount.com"
resource = {"kind": "bigquery#getServiceAccountResponse", "email": email}
conn = client._connection = make_connection(resource)
service_account_email = client.get_service_account_email(project=project)
conn.api_request.assert_called_once_with(method="GET", path=path)
self.assertEqual(service_account_email, email)
def test_list_projects_defaults(self):
from google.cloud.bigquery.client import Project
PROJECT_1 = "PROJECT_ONE"
PROJECT_2 = "PROJECT_TWO"
TOKEN = "TOKEN"
DATA = {
"nextPageToken": TOKEN,
"projects": [
{
"kind": "bigquery#project",
"id": PROJECT_1,
"numericId": 1,
"projectReference": {"projectId": PROJECT_1},
"friendlyName": "One",
},
{
"kind": "bigquery#project",
"id": PROJECT_2,
"numericId": 2,
"projectReference": {"projectId": PROJECT_2},
"friendlyName": "Two",
},
],
}
creds = _make_credentials()
client = self._make_one(PROJECT_1, creds)
conn = client._connection = make_connection(DATA)
iterator = client.list_projects()
page = six.next(iterator.pages)
projects = list(page)
token = iterator.next_page_token
self.assertEqual(len(projects), len(DATA["projects"]))
for found, expected in zip(projects, DATA["projects"]):
self.assertIsInstance(found, Project)
self.assertEqual(found.project_id, expected["id"])
self.assertEqual(found.numeric_id, expected["numericId"])
self.assertEqual(found.friendly_name, expected["friendlyName"])
self.assertEqual(token, TOKEN)
conn.api_request.assert_called_once_with(
method="GET", path="/projects", query_params={}
)
def test_list_projects_explicit_response_missing_projects_key(self):
TOKEN = "TOKEN"
DATA = {}
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection(DATA)
iterator = client.list_projects(max_results=3, page_token=TOKEN)
page = six.next(iterator.pages)
projects = list(page)
token = iterator.next_page_token
self.assertEqual(len(projects), 0)
self.assertIsNone(token)
conn.api_request.assert_called_once_with(
method="GET",
path="/projects",
query_params={"maxResults": 3, "pageToken": TOKEN},
)
def test_list_datasets_defaults(self):
from google.cloud.bigquery.dataset import DatasetListItem
DATASET_1 = "dataset_one"
DATASET_2 = "dataset_two"
PATH = "projects/%s/datasets" % self.PROJECT
TOKEN = "TOKEN"
DATA = {
"nextPageToken": TOKEN,
"datasets": [
{
"kind": "bigquery#dataset",
"id": "%s:%s" % (self.PROJECT, DATASET_1),
"datasetReference": {
"datasetId": DATASET_1,
"projectId": self.PROJECT,
},
"friendlyName": None,
},
{
"kind": "bigquery#dataset",
"id": "%s:%s" % (self.PROJECT, DATASET_2),
"datasetReference": {
"datasetId": DATASET_2,
"projectId": self.PROJECT,
},
"friendlyName": "Two",
},
],
}
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection(DATA)
iterator = client.list_datasets()
page = six.next(iterator.pages)
datasets = list(page)
token = iterator.next_page_token
self.assertEqual(len(datasets), len(DATA["datasets"]))
for found, expected in zip(datasets, DATA["datasets"]):
self.assertIsInstance(found, DatasetListItem)
self.assertEqual(found.full_dataset_id, expected["id"])
self.assertEqual(found.friendly_name, expected["friendlyName"])
self.assertEqual(token, TOKEN)
conn.api_request.assert_called_once_with(
method="GET", path="/%s" % PATH, query_params={}
)
def test_list_datasets_w_project(self):
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection({})
list(client.list_datasets(project="other-project"))
conn.api_request.assert_called_once_with(
method="GET", path="/projects/other-project/datasets", query_params={}
)
def test_list_datasets_explicit_response_missing_datasets_key(self):
PATH = "projects/%s/datasets" % self.PROJECT
TOKEN = "TOKEN"
FILTER = "FILTER"
DATA = {}
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection(DATA)
iterator = client.list_datasets(
include_all=True, filter=FILTER, max_results=3, page_token=TOKEN
)
page = six.next(iterator.pages)
datasets = list(page)
token = iterator.next_page_token
self.assertEqual(len(datasets), 0)
self.assertIsNone(token)
conn.api_request.assert_called_once_with(
method="GET",
path="/%s" % PATH,
query_params={
"all": True,
"filter": FILTER,
"maxResults": 3,
"pageToken": TOKEN,
},
)
def test_dataset_with_specified_project(self):
from google.cloud.bigquery.dataset import DatasetReference
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
dataset = client.dataset(self.DS_ID, self.PROJECT)
self.assertIsInstance(dataset, DatasetReference)
self.assertEqual(dataset.dataset_id, self.DS_ID)
self.assertEqual(dataset.project, self.PROJECT)
def test_dataset_with_default_project(self):
from google.cloud.bigquery.dataset import DatasetReference
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
dataset = client.dataset(self.DS_ID)
self.assertIsInstance(dataset, DatasetReference)
self.assertEqual(dataset.dataset_id, self.DS_ID)
self.assertEqual(dataset.project, self.PROJECT)
def test_get_dataset(self):
from google.cloud.exceptions import ServerError
path = "projects/%s/datasets/%s" % (self.PROJECT, self.DS_ID)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
resource = {
"id": "%s:%s" % (self.PROJECT, self.DS_ID),
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
}
conn = client._connection = make_connection(resource)
dataset_ref = client.dataset(self.DS_ID)
dataset = client.get_dataset(dataset_ref)
conn.api_request.assert_called_once_with(method="GET", path="/%s" % path)
self.assertEqual(dataset.dataset_id, self.DS_ID)
# Test retry.
# Not a cloud API exception (missing 'errors' field).
client._connection = make_connection(Exception(""), resource)
with self.assertRaises(Exception):
client.get_dataset(dataset_ref)
# Zero-length errors field.
client._connection = make_connection(ServerError(""), resource)
with self.assertRaises(ServerError):
client.get_dataset(dataset_ref)
# Non-retryable reason.
client._connection = make_connection(
ServerError("", errors=[{"reason": "serious"}]), resource
)
with self.assertRaises(ServerError):
client.get_dataset(dataset_ref)
# Retryable reason, but retry is disabled.
client._connection = make_connection(
ServerError("", errors=[{"reason": "backendError"}]), resource
)
with self.assertRaises(ServerError):
client.get_dataset(dataset_ref, retry=None)
# Retryable reason, default retry: success.
client._connection = make_connection(
ServerError("", errors=[{"reason": "backendError"}]), resource
)
dataset = client.get_dataset(
# Test with a string for dataset ID.
dataset_ref.dataset_id
)
self.assertEqual(dataset.dataset_id, self.DS_ID)
def test_create_dataset_minimal(self):
from google.cloud.bigquery.dataset import Dataset
PATH = "projects/%s/datasets" % self.PROJECT
RESOURCE = {
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
"etag": "etag",
"id": "%s:%s" % (self.PROJECT, self.DS_ID),
}
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(RESOURCE)
ds_ref = client.dataset(self.DS_ID)
before = Dataset(ds_ref)
after = client.create_dataset(before)
self.assertEqual(after.dataset_id, self.DS_ID)
self.assertEqual(after.project, self.PROJECT)
self.assertEqual(after.etag, RESOURCE["etag"])
self.assertEqual(after.full_dataset_id, RESOURCE["id"])
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % PATH,
data={
"datasetReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
},
"labels": {},
},
)
def test_create_dataset_w_attrs(self):
from google.cloud.bigquery.dataset import Dataset, AccessEntry
PATH = "projects/%s/datasets" % self.PROJECT
DESCRIPTION = "DESC"
FRIENDLY_NAME = "FN"
LOCATION = "US"
USER_EMAIL = "phred@example.com"
LABELS = {"color": "red"}
VIEW = {
"projectId": "my-proj",
"datasetId": "starry-skies",
"tableId": "northern-hemisphere",
}
RESOURCE = {
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
"etag": "etag",
"id": "%s:%s" % (self.PROJECT, self.DS_ID),
"description": DESCRIPTION,
"friendlyName": FRIENDLY_NAME,
"location": LOCATION,
"defaultTableExpirationMs": "3600",
"labels": LABELS,
"access": [{"role": "OWNER", "userByEmail": USER_EMAIL}, {"view": VIEW}],
}
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(RESOURCE)
entries = [
AccessEntry("OWNER", "userByEmail", USER_EMAIL),
AccessEntry(None, "view", VIEW),
]
ds_ref = client.dataset(self.DS_ID)
before = Dataset(ds_ref)
before.access_entries = entries
before.description = DESCRIPTION
before.friendly_name = FRIENDLY_NAME
before.default_table_expiration_ms = 3600
before.location = LOCATION
before.labels = LABELS
after = client.create_dataset(before)
self.assertEqual(after.dataset_id, self.DS_ID)
self.assertEqual(after.project, self.PROJECT)
self.assertEqual(after.etag, RESOURCE["etag"])
self.assertEqual(after.full_dataset_id, RESOURCE["id"])
self.assertEqual(after.description, DESCRIPTION)
self.assertEqual(after.friendly_name, FRIENDLY_NAME)
self.assertEqual(after.location, LOCATION)
self.assertEqual(after.default_table_expiration_ms, 3600)
self.assertEqual(after.labels, LABELS)
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % PATH,
data={
"datasetReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
},
"description": DESCRIPTION,
"friendlyName": FRIENDLY_NAME,
"location": LOCATION,
"defaultTableExpirationMs": "3600",
"access": [
{"role": "OWNER", "userByEmail": USER_EMAIL},
{"view": VIEW},
],
"labels": LABELS,
},
)
def test_create_dataset_w_custom_property(self):
# The library should handle sending properties to the API that are not
# yet part of the library
from google.cloud.bigquery.dataset import Dataset
path = "/projects/%s/datasets" % self.PROJECT
resource = {
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
"newAlphaProperty": "unreleased property",
}
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource)
ds_ref = client.dataset(self.DS_ID)
before = Dataset(ds_ref)
before._properties["newAlphaProperty"] = "unreleased property"
after = client.create_dataset(before)
self.assertEqual(after.dataset_id, self.DS_ID)
self.assertEqual(after.project, self.PROJECT)
self.assertEqual(after._properties["newAlphaProperty"], "unreleased property")
conn.api_request.assert_called_once_with(
method="POST",
path=path,
data={
"datasetReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
},
"newAlphaProperty": "unreleased property",
"labels": {},
},
)
def test_create_dataset_w_client_location_wo_dataset_location(self):
from google.cloud.bigquery.dataset import Dataset
PATH = "projects/%s/datasets" % self.PROJECT
RESOURCE = {
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
"etag": "etag",
"id": "%s:%s" % (self.PROJECT, self.DS_ID),
"location": self.LOCATION,
}
creds = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=creds, location=self.LOCATION
)
conn = client._connection = make_connection(RESOURCE)
ds_ref = client.dataset(self.DS_ID)
before = Dataset(ds_ref)
after = client.create_dataset(before)
self.assertEqual(after.dataset_id, self.DS_ID)
self.assertEqual(after.project, self.PROJECT)
self.assertEqual(after.etag, RESOURCE["etag"])
self.assertEqual(after.full_dataset_id, RESOURCE["id"])
self.assertEqual(after.location, self.LOCATION)
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % PATH,
data={
"datasetReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
},
"labels": {},
"location": self.LOCATION,
},
)
def test_create_dataset_w_client_location_w_dataset_location(self):
from google.cloud.bigquery.dataset import Dataset
PATH = "projects/%s/datasets" % self.PROJECT
OTHER_LOCATION = "EU"
RESOURCE = {
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
"etag": "etag",
"id": "%s:%s" % (self.PROJECT, self.DS_ID),
"location": OTHER_LOCATION,
}
creds = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=creds, location=self.LOCATION
)
conn = client._connection = make_connection(RESOURCE)
ds_ref = client.dataset(self.DS_ID)
before = Dataset(ds_ref)
before.location = OTHER_LOCATION
after = client.create_dataset(before)
self.assertEqual(after.dataset_id, self.DS_ID)
self.assertEqual(after.project, self.PROJECT)
self.assertEqual(after.etag, RESOURCE["etag"])
self.assertEqual(after.full_dataset_id, RESOURCE["id"])
self.assertEqual(after.location, OTHER_LOCATION)
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % PATH,
data={
"datasetReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
},
"labels": {},
"location": OTHER_LOCATION,
},
)
def test_create_dataset_w_reference(self):
path = "/projects/%s/datasets" % self.PROJECT
resource = {
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
"etag": "etag",
"id": "%s:%s" % (self.PROJECT, self.DS_ID),
"location": self.LOCATION,
}
creds = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=creds, location=self.LOCATION
)
conn = client._connection = make_connection(resource)
dataset = client.create_dataset(client.dataset(self.DS_ID))
self.assertEqual(dataset.dataset_id, self.DS_ID)
self.assertEqual(dataset.project, self.PROJECT)
self.assertEqual(dataset.etag, resource["etag"])
self.assertEqual(dataset.full_dataset_id, resource["id"])
self.assertEqual(dataset.location, self.LOCATION)
conn.api_request.assert_called_once_with(
method="POST",
path=path,
data={
"datasetReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
},
"labels": {},
"location": self.LOCATION,
},
)
def test_create_dataset_w_fully_qualified_string(self):
path = "/projects/%s/datasets" % self.PROJECT
resource = {
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
"etag": "etag",
"id": "%s:%s" % (self.PROJECT, self.DS_ID),
"location": self.LOCATION,
}
creds = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=creds, location=self.LOCATION
)
conn = client._connection = make_connection(resource)
dataset = client.create_dataset("{}.{}".format(self.PROJECT, self.DS_ID))
self.assertEqual(dataset.dataset_id, self.DS_ID)
self.assertEqual(dataset.project, self.PROJECT)
self.assertEqual(dataset.etag, resource["etag"])
self.assertEqual(dataset.full_dataset_id, resource["id"])
self.assertEqual(dataset.location, self.LOCATION)
conn.api_request.assert_called_once_with(
method="POST",
path=path,
data={
"datasetReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
},
"labels": {},
"location": self.LOCATION,
},
)
def test_create_dataset_w_string(self):
path = "/projects/%s/datasets" % self.PROJECT
resource = {
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
"etag": "etag",
"id": "%s:%s" % (self.PROJECT, self.DS_ID),
"location": self.LOCATION,
}
creds = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=creds, location=self.LOCATION
)
conn = client._connection = make_connection(resource)
dataset = client.create_dataset(self.DS_ID)
self.assertEqual(dataset.dataset_id, self.DS_ID)
self.assertEqual(dataset.project, self.PROJECT)
self.assertEqual(dataset.etag, resource["etag"])
self.assertEqual(dataset.full_dataset_id, resource["id"])
self.assertEqual(dataset.location, self.LOCATION)
conn.api_request.assert_called_once_with(
method="POST",
path=path,
data={
"datasetReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
},
"labels": {},
"location": self.LOCATION,
},
)
def test_create_dataset_alreadyexists_w_exists_ok_false(self):
creds = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=creds, location=self.LOCATION
)
client._connection = make_connection(
google.api_core.exceptions.AlreadyExists("dataset already exists")
)
with pytest.raises(google.api_core.exceptions.AlreadyExists):
client.create_dataset(self.DS_ID)
def test_create_dataset_alreadyexists_w_exists_ok_true(self):
post_path = "/projects/{}/datasets".format(self.PROJECT)
get_path = "/projects/{}/datasets/{}".format(self.PROJECT, self.DS_ID)
resource = {
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
"etag": "etag",
"id": "{}:{}".format(self.PROJECT, self.DS_ID),
"location": self.LOCATION,
}
creds = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=creds, location=self.LOCATION
)
conn = client._connection = make_connection(
google.api_core.exceptions.AlreadyExists("dataset already exists"), resource
)
dataset = client.create_dataset(self.DS_ID, exists_ok=True)
self.assertEqual(dataset.dataset_id, self.DS_ID)
self.assertEqual(dataset.project, self.PROJECT)
self.assertEqual(dataset.etag, resource["etag"])
self.assertEqual(dataset.full_dataset_id, resource["id"])
self.assertEqual(dataset.location, self.LOCATION)
conn.api_request.assert_has_calls(
[
mock.call(
method="POST",
path=post_path,
data={
"datasetReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
},
"labels": {},
"location": self.LOCATION,
},
),
mock.call(method="GET", path=get_path),
]
)
def test_create_routine_w_minimal_resource(self):
from google.cloud.bigquery.routine import Routine
from google.cloud.bigquery.routine import RoutineReference
creds = _make_credentials()
resource = {
"routineReference": {
"projectId": "test-routine-project",
"datasetId": "test_routines",
"routineId": "minimal_routine",
}
}
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource)
full_routine_id = "test-routine-project.test_routines.minimal_routine"
routine = Routine(full_routine_id)
actual_routine = client.create_routine(routine)
conn.api_request.assert_called_once_with(
method="POST",
path="/projects/test-routine-project/datasets/test_routines/routines",
data=resource,
)
self.assertEqual(
actual_routine.reference, RoutineReference.from_string(full_routine_id)
)
def test_create_routine_w_conflict(self):
from google.cloud.bigquery.routine import Routine
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(
google.api_core.exceptions.AlreadyExists("routine already exists")
)
full_routine_id = "test-routine-project.test_routines.minimal_routine"
routine = Routine(full_routine_id)
with pytest.raises(google.api_core.exceptions.AlreadyExists):
client.create_routine(routine)
resource = {
"routineReference": {
"projectId": "test-routine-project",
"datasetId": "test_routines",
"routineId": "minimal_routine",
}
}
conn.api_request.assert_called_once_with(
method="POST",
path="/projects/test-routine-project/datasets/test_routines/routines",
data=resource,
)
def test_create_routine_w_conflict_exists_ok(self):
from google.cloud.bigquery.routine import Routine
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
resource = {
"routineReference": {
"projectId": "test-routine-project",
"datasetId": "test_routines",
"routineId": "minimal_routine",
}
}
conn = client._connection = make_connection(
google.api_core.exceptions.AlreadyExists("routine already exists"), resource
)
full_routine_id = "test-routine-project.test_routines.minimal_routine"
routine = Routine(full_routine_id)
actual_routine = client.create_routine(routine, exists_ok=True)
self.assertEqual(actual_routine.project, "test-routine-project")
self.assertEqual(actual_routine.dataset_id, "test_routines")
self.assertEqual(actual_routine.routine_id, "minimal_routine")
conn.api_request.assert_has_calls(
[
mock.call(
method="POST",
path="/projects/test-routine-project/datasets/test_routines/routines",
data=resource,
),
mock.call(
method="GET",
path="/projects/test-routine-project/datasets/test_routines/routines/minimal_routine",
),
]
)
def test_create_table_w_day_partition(self):
from google.cloud.bigquery.table import Table
from google.cloud.bigquery.table import TimePartitioning
path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
resource = self._make_table_resource()
conn = client._connection = make_connection(resource)
table = Table(self.TABLE_REF)
table.time_partitioning = TimePartitioning()
got = client.create_table(table)
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % path,
data={
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"timePartitioning": {"type": "DAY"},
"labels": {},
},
)
self.assertEqual(table.time_partitioning.type_, "DAY")
self.assertEqual(got.table_id, self.TABLE_ID)
def test_create_table_w_custom_property(self):
# The library should handle sending properties to the API that are not
# yet part of the library
from google.cloud.bigquery.table import Table
path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
resource = self._make_table_resource()
resource["newAlphaProperty"] = "unreleased property"
conn = client._connection = make_connection(resource)
table = Table(self.TABLE_REF)
table._properties["newAlphaProperty"] = "unreleased property"
got = client.create_table(table)
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % path,
data={
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"newAlphaProperty": "unreleased property",
"labels": {},
},
)
self.assertEqual(got._properties["newAlphaProperty"], "unreleased property")
self.assertEqual(got.table_id, self.TABLE_ID)
def test_create_table_w_encryption_configuration(self):
from google.cloud.bigquery.encryption_configuration import (
EncryptionConfiguration,
)
from google.cloud.bigquery.table import Table
path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
resource = self._make_table_resource()
conn = client._connection = make_connection(resource)
table = Table(self.TABLE_REF)
table.encryption_configuration = EncryptionConfiguration(
kms_key_name=self.KMS_KEY_NAME
)
got = client.create_table(table)
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % path,
data={
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"labels": {},
"encryptionConfiguration": {"kmsKeyName": self.KMS_KEY_NAME},
},
)
self.assertEqual(got.table_id, self.TABLE_ID)
def test_create_table_w_day_partition_and_expire(self):
from google.cloud.bigquery.table import Table
from google.cloud.bigquery.table import TimePartitioning
path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
resource = self._make_table_resource()
conn = client._connection = make_connection(resource)
table = Table(self.TABLE_REF)
table.time_partitioning = TimePartitioning(expiration_ms=100)
got = client.create_table(table)
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % path,
data={
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"timePartitioning": {"type": "DAY", "expirationMs": "100"},
"labels": {},
},
)
self.assertEqual(table.time_partitioning.type_, "DAY")
self.assertEqual(table.time_partitioning.expiration_ms, 100)
self.assertEqual(got.table_id, self.TABLE_ID)
def test_create_table_w_schema_and_query(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
query = "SELECT * from %s:%s" % (self.DS_ID, self.TABLE_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
resource = self._make_table_resource()
resource.update(
{
"schema": {
"fields": [
{
"name": "full_name",
"type": "STRING",
"mode": "REQUIRED",
"description": None,
},
{
"name": "age",
"type": "INTEGER",
"mode": "REQUIRED",
"description": None,
},
]
},
"view": {"query": query},
}
)
schema = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
conn = client._connection = make_connection(resource)
table = Table(self.TABLE_REF, schema=schema)
table.view_query = query
got = client.create_table(table)
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % path,
data={
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"schema": {
"fields": [
{
"name": "full_name",
"type": "STRING",
"mode": "REQUIRED",
"description": None,
},
{
"name": "age",
"type": "INTEGER",
"mode": "REQUIRED",
"description": None,
},
]
},
"view": {"query": query, "useLegacySql": False},
"labels": {},
},
)
self.assertEqual(got.table_id, self.TABLE_ID)
self.assertEqual(got.project, self.PROJECT)
self.assertEqual(got.dataset_id, self.DS_ID)
self.assertEqual(got.schema, schema)
self.assertEqual(got.view_query, query)
def test_create_table_w_external(self):
from google.cloud.bigquery.external_config import ExternalConfig
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.table import Table
path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
resource = self._make_table_resource()
resource.update(
{
"externalDataConfiguration": {
"sourceFormat": SourceFormat.CSV,
"autodetect": True,
}
}
)
conn = client._connection = make_connection(resource)
table = Table(self.TABLE_REF)
ec = ExternalConfig("CSV")
ec.autodetect = True
table.external_data_configuration = ec
got = client.create_table(table)
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % path,
data={
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"externalDataConfiguration": {
"sourceFormat": SourceFormat.CSV,
"autodetect": True,
},
"labels": {},
},
)
self.assertEqual(got.table_id, self.TABLE_ID)
self.assertEqual(got.project, self.PROJECT)
self.assertEqual(got.dataset_id, self.DS_ID)
self.assertEqual(
got.external_data_configuration.source_format, SourceFormat.CSV
)
self.assertEqual(got.external_data_configuration.autodetect, True)
def test_create_table_w_reference(self):
path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
resource = self._make_table_resource()
conn = client._connection = make_connection(resource)
got = client.create_table(self.TABLE_REF)
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % path,
data={
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"labels": {},
},
)
self.assertEqual(got.table_id, self.TABLE_ID)
def test_create_table_w_fully_qualified_string(self):
path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
resource = self._make_table_resource()
conn = client._connection = make_connection(resource)
got = client.create_table(
"{}.{}.{}".format(self.PROJECT, self.DS_ID, self.TABLE_ID)
)
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % path,
data={
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"labels": {},
},
)
self.assertEqual(got.table_id, self.TABLE_ID)
def test_create_table_w_string(self):
path = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
resource = self._make_table_resource()
conn = client._connection = make_connection(resource)
got = client.create_table("{}.{}".format(self.DS_ID, self.TABLE_ID))
conn.api_request.assert_called_once_with(
method="POST",
path="/%s" % path,
data={
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"labels": {},
},
)
self.assertEqual(got.table_id, self.TABLE_ID)
def test_create_table_alreadyexists_w_exists_ok_false(self):
post_path = "/projects/{}/datasets/{}/tables".format(self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=creds, location=self.LOCATION
)
conn = client._connection = make_connection(
google.api_core.exceptions.AlreadyExists("table already exists")
)
with pytest.raises(google.api_core.exceptions.AlreadyExists):
client.create_table("{}.{}".format(self.DS_ID, self.TABLE_ID))
conn.api_request.assert_called_once_with(
method="POST",
path=post_path,
data={
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"labels": {},
},
)
def test_create_table_alreadyexists_w_exists_ok_true(self):
post_path = "/projects/{}/datasets/{}/tables".format(self.PROJECT, self.DS_ID)
get_path = "/projects/{}/datasets/{}/tables/{}".format(
self.PROJECT, self.DS_ID, self.TABLE_ID
)
resource = self._make_table_resource()
creds = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=creds, location=self.LOCATION
)
conn = client._connection = make_connection(
google.api_core.exceptions.AlreadyExists("table already exists"), resource
)
got = client.create_table(
"{}.{}".format(self.DS_ID, self.TABLE_ID), exists_ok=True
)
self.assertEqual(got.project, self.PROJECT)
self.assertEqual(got.dataset_id, self.DS_ID)
self.assertEqual(got.table_id, self.TABLE_ID)
conn.api_request.assert_has_calls(
[
mock.call(
method="POST",
path=post_path,
data={
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
"labels": {},
},
),
mock.call(method="GET", path=get_path),
]
)
def test_get_model(self):
path = "projects/%s/datasets/%s/models/%s" % (
self.PROJECT,
self.DS_ID,
self.MODEL_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
resource = {
"modelReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"modelId": self.MODEL_ID,
}
}
conn = client._connection = make_connection(resource)
model_ref = client.dataset(self.DS_ID).model(self.MODEL_ID)
got = client.get_model(model_ref)
conn.api_request.assert_called_once_with(method="GET", path="/%s" % path)
self.assertEqual(got.model_id, self.MODEL_ID)
def test_get_model_w_string(self):
path = "projects/%s/datasets/%s/models/%s" % (
self.PROJECT,
self.DS_ID,
self.MODEL_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
resource = {
"modelReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"modelId": self.MODEL_ID,
}
}
conn = client._connection = make_connection(resource)
model_id = "{}.{}.{}".format(self.PROJECT, self.DS_ID, self.MODEL_ID)
got = client.get_model(model_id)
conn.api_request.assert_called_once_with(method="GET", path="/%s" % path)
self.assertEqual(got.model_id, self.MODEL_ID)
def test_get_routine(self):
from google.cloud.bigquery.routine import Routine
from google.cloud.bigquery.routine import RoutineReference
full_routine_id = "test-routine-project.test_routines.minimal_routine"
routines = [
full_routine_id,
Routine(full_routine_id),
RoutineReference.from_string(full_routine_id),
]
for routine in routines:
creds = _make_credentials()
resource = {
"etag": "im-an-etag",
"routineReference": {
"projectId": "test-routine-project",
"datasetId": "test_routines",
"routineId": "minimal_routine",
},
"routineType": "SCALAR_FUNCTION",
}
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource)
actual_routine = client.get_routine(routine)
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/test-routine-project/datasets/test_routines/routines/minimal_routine",
)
self.assertEqual(
actual_routine.reference,
RoutineReference.from_string(full_routine_id),
msg="routine={}".format(repr(routine)),
)
self.assertEqual(
actual_routine.etag,
"im-an-etag",
msg="routine={}".format(repr(routine)),
)
self.assertEqual(
actual_routine.type_,
"SCALAR_FUNCTION",
msg="routine={}".format(repr(routine)),
)
def test_get_table(self):
path = "projects/%s/datasets/%s/tables/%s" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
resource = self._make_table_resource()
conn = client._connection = make_connection(resource)
table = client.get_table(self.TABLE_REF)
conn.api_request.assert_called_once_with(method="GET", path="/%s" % path)
self.assertEqual(table.table_id, self.TABLE_ID)
def test_get_table_sets_user_agent(self):
creds = _make_credentials()
http = mock.create_autospec(requests.Session)
mock_response = http.request(
url=mock.ANY, method=mock.ANY, headers=mock.ANY, data=mock.ANY
)
http.reset_mock()
mock_response.status_code = 200
mock_response.json.return_value = self._make_table_resource()
user_agent_override = client_info.ClientInfo(user_agent="my-application/1.2.3")
client = self._make_one(
project=self.PROJECT,
credentials=creds,
client_info=user_agent_override,
_http=http,
)
client.get_table(self.TABLE_REF)
expected_user_agent = user_agent_override.to_user_agent()
http.request.assert_called_once_with(
url=mock.ANY,
method="GET",
headers={
"X-Goog-API-Client": expected_user_agent,
"Accept-Encoding": "gzip",
"User-Agent": expected_user_agent,
},
data=mock.ANY,
)
self.assertIn("my-application/1.2.3", expected_user_agent)
def test_update_dataset_w_invalid_field(self):
from google.cloud.bigquery.dataset import Dataset
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
with self.assertRaises(ValueError):
client.update_dataset(Dataset(client.dataset(self.DS_ID)), ["foo"])
def test_update_dataset(self):
from google.cloud.bigquery.dataset import Dataset, AccessEntry
PATH = "projects/%s/datasets/%s" % (self.PROJECT, self.DS_ID)
DESCRIPTION = "DESCRIPTION"
FRIENDLY_NAME = "TITLE"
LOCATION = "loc"
LABELS = {"priority": "high"}
ACCESS = [{"role": "OWNER", "userByEmail": "phred@example.com"}]
EXP = 17
RESOURCE = {
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
"etag": "etag",
"description": DESCRIPTION,
"friendlyName": FRIENDLY_NAME,
"location": LOCATION,
"defaultTableExpirationMs": EXP,
"labels": LABELS,
"access": ACCESS,
}
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(RESOURCE, RESOURCE)
ds = Dataset(client.dataset(self.DS_ID))
ds.description = DESCRIPTION
ds.friendly_name = FRIENDLY_NAME
ds.location = LOCATION
ds.default_table_expiration_ms = EXP
ds.labels = LABELS
ds.access_entries = [AccessEntry("OWNER", "userByEmail", "phred@example.com")]
ds2 = client.update_dataset(
ds, ["description", "friendly_name", "location", "labels", "access_entries"]
)
conn.api_request.assert_called_once_with(
method="PATCH",
data={
"description": DESCRIPTION,
"friendlyName": FRIENDLY_NAME,
"location": LOCATION,
"labels": LABELS,
"access": ACCESS,
},
path="/" + PATH,
headers=None,
)
self.assertEqual(ds2.description, ds.description)
self.assertEqual(ds2.friendly_name, ds.friendly_name)
self.assertEqual(ds2.location, ds.location)
self.assertEqual(ds2.labels, ds.labels)
self.assertEqual(ds2.access_entries, ds.access_entries)
# ETag becomes If-Match header.
ds._properties["etag"] = "etag"
client.update_dataset(ds, [])
req = conn.api_request.call_args
self.assertEqual(req[1]["headers"]["If-Match"], "etag")
def test_update_dataset_w_custom_property(self):
# The library should handle sending properties to the API that are not
# yet part of the library
from google.cloud.bigquery.dataset import Dataset
path = "/projects/%s/datasets/%s" % (self.PROJECT, self.DS_ID)
resource = {
"datasetReference": {"projectId": self.PROJECT, "datasetId": self.DS_ID},
"newAlphaProperty": "unreleased property",
}
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource)
dataset = Dataset(client.dataset(self.DS_ID))
dataset._properties["newAlphaProperty"] = "unreleased property"
dataset = client.update_dataset(dataset, ["newAlphaProperty"])
conn.api_request.assert_called_once_with(
method="PATCH",
data={"newAlphaProperty": "unreleased property"},
path=path,
headers=None,
)
self.assertEqual(dataset.dataset_id, self.DS_ID)
self.assertEqual(dataset.project, self.PROJECT)
self.assertEqual(dataset._properties["newAlphaProperty"], "unreleased property")
def test_update_model(self):
from google.cloud.bigquery.model import Model
path = "projects/%s/datasets/%s/models/%s" % (
self.PROJECT,
self.DS_ID,
self.MODEL_ID,
)
description = "description"
title = "title"
expires = datetime.datetime(
2012, 12, 21, 16, 0, 0, tzinfo=google.cloud._helpers.UTC
)
resource = {
"modelReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"modelId": self.MODEL_ID,
},
"description": description,
"etag": "etag",
"expirationTime": str(google.cloud._helpers._millis(expires)),
"friendlyName": title,
"labels": {"x": "y"},
}
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource, resource)
model_id = "{}.{}.{}".format(self.PROJECT, self.DS_ID, self.MODEL_ID)
model = Model(model_id)
model.description = description
model.friendly_name = title
model.expires = expires
model.labels = {"x": "y"}
updated_model = client.update_model(
model, ["description", "friendly_name", "labels", "expires"]
)
sent = {
"description": description,
"expirationTime": str(google.cloud._helpers._millis(expires)),
"friendlyName": title,
"labels": {"x": "y"},
}
conn.api_request.assert_called_once_with(
method="PATCH", data=sent, path="/" + path, headers=None
)
self.assertEqual(updated_model.model_id, model.model_id)
self.assertEqual(updated_model.description, model.description)
self.assertEqual(updated_model.friendly_name, model.friendly_name)
self.assertEqual(updated_model.labels, model.labels)
self.assertEqual(updated_model.expires, model.expires)
# ETag becomes If-Match header.
model._proto.etag = "etag"
client.update_model(model, [])
req = conn.api_request.call_args
self.assertEqual(req[1]["headers"]["If-Match"], "etag")
def test_update_routine(self):
from google.cloud.bigquery.routine import Routine
from google.cloud.bigquery.routine import RoutineArgument
full_routine_id = "routines-project.test_routines.updated_routine"
resource = {
"routineReference": {
"projectId": "routines-project",
"datasetId": "test_routines",
"routineId": "updated_routine",
},
"routineType": "SCALAR_FUNCTION",
"language": "SQL",
"definitionBody": "x * 3",
"arguments": [{"name": "x", "dataType": {"typeKind": "INT64"}}],
"returnType": None,
"someNewField": "someValue",
}
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource, resource)
routine = Routine(full_routine_id)
routine.arguments = [
RoutineArgument(
name="x",
data_type=bigquery_v2.types.StandardSqlDataType(
type_kind=bigquery_v2.enums.StandardSqlDataType.TypeKind.INT64
),
)
]
routine.body = "x * 3"
routine.language = "SQL"
routine.type_ = "SCALAR_FUNCTION"
routine._properties["someNewField"] = "someValue"
actual_routine = client.update_routine(
routine,
["arguments", "language", "body", "type_", "return_type", "someNewField"],
)
# TODO: routineReference isn't needed when the Routines API supports
# partial updates.
sent = resource
conn.api_request.assert_called_once_with(
method="PUT",
data=sent,
path="/projects/routines-project/datasets/test_routines/routines/updated_routine",
headers=None,
)
self.assertEqual(actual_routine.arguments, routine.arguments)
self.assertEqual(actual_routine.body, routine.body)
self.assertEqual(actual_routine.language, routine.language)
self.assertEqual(actual_routine.type_, routine.type_)
# ETag becomes If-Match header.
routine._properties["etag"] = "im-an-etag"
client.update_routine(routine, [])
req = conn.api_request.call_args
self.assertEqual(req[1]["headers"]["If-Match"], "im-an-etag")
def test_update_table(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
path = "projects/%s/datasets/%s/tables/%s" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
description = "description"
title = "title"
resource = self._make_table_resource()
resource.update(
{
"schema": {
"fields": [
{
"name": "full_name",
"type": "STRING",
"mode": "REQUIRED",
"description": None,
},
{
"name": "age",
"type": "INTEGER",
"mode": "REQUIRED",
"description": None,
},
]
},
"etag": "etag",
"description": description,
"friendlyName": title,
"labels": {"x": "y"},
}
)
schema = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource, resource)
table = Table(self.TABLE_REF, schema=schema)
table.description = description
table.friendly_name = title
table.labels = {"x": "y"}
updated_table = client.update_table(
table, ["schema", "description", "friendly_name", "labels"]
)
sent = {
"schema": {
"fields": [
{
"name": "full_name",
"type": "STRING",
"mode": "REQUIRED",
"description": None,
},
{
"name": "age",
"type": "INTEGER",
"mode": "REQUIRED",
"description": None,
},
]
},
"description": description,
"friendlyName": title,
"labels": {"x": "y"},
}
conn.api_request.assert_called_once_with(
method="PATCH", data=sent, path="/" + path, headers=None
)
self.assertEqual(updated_table.description, table.description)
self.assertEqual(updated_table.friendly_name, table.friendly_name)
self.assertEqual(updated_table.schema, table.schema)
self.assertEqual(updated_table.labels, table.labels)
# ETag becomes If-Match header.
table._properties["etag"] = "etag"
client.update_table(table, [])
req = conn.api_request.call_args
self.assertEqual(req[1]["headers"]["If-Match"], "etag")
def test_update_table_w_custom_property(self):
from google.cloud.bigquery.table import Table
path = "projects/%s/datasets/%s/tables/%s" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
resource = self._make_table_resource()
resource["newAlphaProperty"] = "unreleased property"
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource)
table = Table(self.TABLE_REF)
table._properties["newAlphaProperty"] = "unreleased property"
updated_table = client.update_table(table, ["newAlphaProperty"])
conn.api_request.assert_called_once_with(
method="PATCH",
path="/%s" % path,
data={"newAlphaProperty": "unreleased property"},
headers=None,
)
self.assertEqual(
updated_table._properties["newAlphaProperty"], "unreleased property"
)
def test_update_table_only_use_legacy_sql(self):
from google.cloud.bigquery.table import Table
path = "projects/%s/datasets/%s/tables/%s" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
resource = self._make_table_resource()
resource["view"] = {"useLegacySql": True}
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource)
table = Table(self.TABLE_REF)
table.view_use_legacy_sql = True
updated_table = client.update_table(table, ["view_use_legacy_sql"])
conn.api_request.assert_called_once_with(
method="PATCH",
path="/%s" % path,
data={"view": {"useLegacySql": True}},
headers=None,
)
self.assertEqual(updated_table.view_use_legacy_sql, table.view_use_legacy_sql)
def test_update_table_w_query(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud._helpers import _millis
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
path = "projects/%s/datasets/%s/tables/%s" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
query = "select fullname, age from person_ages"
location = "EU"
exp_time = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)
schema_resource = {
"fields": [
{
"name": "full_name",
"type": "STRING",
"mode": "REQUIRED",
"description": None,
},
{
"name": "age",
"type": "INTEGER",
"mode": "REQUIRED",
"description": None,
},
]
}
schema = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
resource = self._make_table_resource()
resource.update(
{
"schema": schema_resource,
"view": {"query": query, "useLegacySql": True},
"location": location,
"expirationTime": _millis(exp_time),
}
)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource)
table = Table(self.TABLE_REF, schema=schema)
table.expires = exp_time
table.view_query = query
table.view_use_legacy_sql = True
updated_properties = ["schema", "view_query", "expires", "view_use_legacy_sql"]
updated_table = client.update_table(table, updated_properties)
self.assertEqual(updated_table.schema, table.schema)
self.assertEqual(updated_table.view_query, table.view_query)
self.assertEqual(updated_table.expires, table.expires)
self.assertEqual(updated_table.view_use_legacy_sql, table.view_use_legacy_sql)
self.assertEqual(updated_table.location, location)
conn.api_request.assert_called_once_with(
method="PATCH",
path="/%s" % path,
data={
"view": {"query": query, "useLegacySql": True},
"expirationTime": str(_millis(exp_time)),
"schema": schema_resource,
},
headers=None,
)
def test_update_table_w_schema_None(self):
# Simulate deleting schema: not sure if back-end will actually
# allow this operation, but the spec says it is optional.
path = "projects/%s/datasets/%s/tables/%s" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
resource1 = self._make_table_resource()
resource1.update(
{
"schema": {
"fields": [
{"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "age", "type": "INTEGER", "mode": "REQUIRED"},
]
}
}
)
resource2 = self._make_table_resource()
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource1, resource2)
table = client.get_table(
# Test with string for table ID
"{}.{}.{}".format(
self.TABLE_REF.project,
self.TABLE_REF.dataset_id,
self.TABLE_REF.table_id,
)
)
table.schema = None
updated_table = client.update_table(table, ["schema"])
self.assertEqual(len(conn.api_request.call_args_list), 2)
req = conn.api_request.call_args_list[1]
self.assertEqual(req[1]["method"], "PATCH")
sent = {"schema": None}
self.assertEqual(req[1]["data"], sent)
self.assertEqual(req[1]["path"], "/%s" % path)
self.assertEqual(len(updated_table.schema), 0)
def test_update_table_delete_property(self):
from google.cloud.bigquery.table import Table
description = "description"
title = "title"
path = "projects/%s/datasets/%s/tables/%s" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
resource1 = self._make_table_resource()
resource1.update({"description": description, "friendlyName": title})
resource2 = self._make_table_resource()
resource2["description"] = None
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(resource1, resource2)
table = Table(self.TABLE_REF)
table.description = description
table.friendly_name = title
table2 = client.update_table(table, ["description", "friendly_name"])
self.assertEqual(table2.description, table.description)
table2.description = None
table3 = client.update_table(table2, ["description"])
self.assertEqual(len(conn.api_request.call_args_list), 2)
req = conn.api_request.call_args_list[1]
self.assertEqual(req[1]["method"], "PATCH")
self.assertEqual(req[1]["path"], "/%s" % path)
sent = {"description": None}
self.assertEqual(req[1]["data"], sent)
self.assertIsNone(table3.description)
def test_list_tables_empty(self):
path = "/projects/{}/datasets/{}/tables".format(self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection({})
dataset = client.dataset(self.DS_ID)
iterator = client.list_tables(dataset)
self.assertIs(iterator.dataset, dataset)
page = six.next(iterator.pages)
tables = list(page)
token = iterator.next_page_token
self.assertEqual(tables, [])
self.assertIsNone(token)
conn.api_request.assert_called_once_with(
method="GET", path=path, query_params={}
)
def test_list_models_empty(self):
path = "/projects/{}/datasets/{}/models".format(self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection({})
dataset_id = "{}.{}".format(self.PROJECT, self.DS_ID)
iterator = client.list_models(dataset_id)
page = six.next(iterator.pages)
models = list(page)
token = iterator.next_page_token
self.assertEqual(models, [])
self.assertIsNone(token)
conn.api_request.assert_called_once_with(
method="GET", path=path, query_params={}
)
def test_list_models_defaults(self):
from google.cloud.bigquery.model import Model
MODEL_1 = "model_one"
MODEL_2 = "model_two"
PATH = "projects/%s/datasets/%s/models" % (self.PROJECT, self.DS_ID)
TOKEN = "TOKEN"
DATA = {
"nextPageToken": TOKEN,
"models": [
{
"modelReference": {
"modelId": MODEL_1,
"datasetId": self.DS_ID,
"projectId": self.PROJECT,
}
},
{
"modelReference": {
"modelId": MODEL_2,
"datasetId": self.DS_ID,
"projectId": self.PROJECT,
}
},
],
}
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(DATA)
dataset = client.dataset(self.DS_ID)
iterator = client.list_models(dataset)
self.assertIs(iterator.dataset, dataset)
page = six.next(iterator.pages)
models = list(page)
token = iterator.next_page_token
self.assertEqual(len(models), len(DATA["models"]))
for found, expected in zip(models, DATA["models"]):
self.assertIsInstance(found, Model)
self.assertEqual(found.model_id, expected["modelReference"]["modelId"])
self.assertEqual(token, TOKEN)
conn.api_request.assert_called_once_with(
method="GET", path="/%s" % PATH, query_params={}
)
def test_list_models_wrong_type(self):
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
with self.assertRaises(TypeError):
client.list_models(client.dataset(self.DS_ID).model("foo"))
def test_list_routines_empty(self):
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection({})
iterator = client.list_routines("test-routines.test_routines")
page = six.next(iterator.pages)
routines = list(page)
token = iterator.next_page_token
self.assertEqual(routines, [])
self.assertIsNone(token)
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/test-routines/datasets/test_routines/routines",
query_params={},
)
def test_list_routines_defaults(self):
from google.cloud.bigquery.routine import Routine
project_id = "test-routines"
dataset_id = "test_routines"
path = "/projects/test-routines/datasets/test_routines/routines"
routine_1 = "routine_one"
routine_2 = "routine_two"
token = "TOKEN"
resource = {
"nextPageToken": token,
"routines": [
{
"routineReference": {
"routineId": routine_1,
"datasetId": dataset_id,
"projectId": project_id,
}
},
{
"routineReference": {
"routineId": routine_2,
"datasetId": dataset_id,
"projectId": project_id,
}
},
],
}
creds = _make_credentials()
client = self._make_one(project=project_id, credentials=creds)
conn = client._connection = make_connection(resource)
dataset = client.dataset(dataset_id)
iterator = client.list_routines(dataset)
self.assertIs(iterator.dataset, dataset)
page = six.next(iterator.pages)
routines = list(page)
actual_token = iterator.next_page_token
self.assertEqual(len(routines), len(resource["routines"]))
for found, expected in zip(routines, resource["routines"]):
self.assertIsInstance(found, Routine)
self.assertEqual(
found.routine_id, expected["routineReference"]["routineId"]
)
self.assertEqual(actual_token, token)
conn.api_request.assert_called_once_with(
method="GET", path=path, query_params={}
)
def test_list_routines_wrong_type(self):
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
with self.assertRaises(TypeError):
client.list_routines(client.dataset(self.DS_ID).table("foo"))
def test_list_tables_defaults(self):
from google.cloud.bigquery.table import TableListItem
TABLE_1 = "table_one"
TABLE_2 = "table_two"
PATH = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
TOKEN = "TOKEN"
DATA = {
"nextPageToken": TOKEN,
"tables": [
{
"kind": "bigquery#table",
"id": "%s:%s.%s" % (self.PROJECT, self.DS_ID, TABLE_1),
"tableReference": {
"tableId": TABLE_1,
"datasetId": self.DS_ID,
"projectId": self.PROJECT,
},
"type": "TABLE",
},
{
"kind": "bigquery#table",
"id": "%s:%s.%s" % (self.PROJECT, self.DS_ID, TABLE_2),
"tableReference": {
"tableId": TABLE_2,
"datasetId": self.DS_ID,
"projectId": self.PROJECT,
},
"type": "TABLE",
},
],
}
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(DATA)
dataset = client.dataset(self.DS_ID)
iterator = client.list_tables(dataset)
self.assertIs(iterator.dataset, dataset)
page = six.next(iterator.pages)
tables = list(page)
token = iterator.next_page_token
self.assertEqual(len(tables), len(DATA["tables"]))
for found, expected in zip(tables, DATA["tables"]):
self.assertIsInstance(found, TableListItem)
self.assertEqual(found.full_table_id, expected["id"])
self.assertEqual(found.table_type, expected["type"])
self.assertEqual(token, TOKEN)
conn.api_request.assert_called_once_with(
method="GET", path="/%s" % PATH, query_params={}
)
def test_list_tables_explicit(self):
from google.cloud.bigquery.table import TableListItem
TABLE_1 = "table_one"
TABLE_2 = "table_two"
PATH = "projects/%s/datasets/%s/tables" % (self.PROJECT, self.DS_ID)
TOKEN = "TOKEN"
DATA = {
"tables": [
{
"kind": "bigquery#dataset",
"id": "%s:%s.%s" % (self.PROJECT, self.DS_ID, TABLE_1),
"tableReference": {
"tableId": TABLE_1,
"datasetId": self.DS_ID,
"projectId": self.PROJECT,
},
"type": "TABLE",
},
{
"kind": "bigquery#dataset",
"id": "%s:%s.%s" % (self.PROJECT, self.DS_ID, TABLE_2),
"tableReference": {
"tableId": TABLE_2,
"datasetId": self.DS_ID,
"projectId": self.PROJECT,
},
"type": "TABLE",
},
]
}
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(DATA)
dataset = client.dataset(self.DS_ID)
iterator = client.list_tables(
# Test with string for dataset ID.
self.DS_ID,
max_results=3,
page_token=TOKEN,
)
self.assertEqual(iterator.dataset, dataset)
page = six.next(iterator.pages)
tables = list(page)
token = iterator.next_page_token
self.assertEqual(len(tables), len(DATA["tables"]))
for found, expected in zip(tables, DATA["tables"]):
self.assertIsInstance(found, TableListItem)
self.assertEqual(found.full_table_id, expected["id"])
self.assertEqual(found.table_type, expected["type"])
self.assertIsNone(token)
conn.api_request.assert_called_once_with(
method="GET",
path="/%s" % PATH,
query_params={"maxResults": 3, "pageToken": TOKEN},
)
def test_list_tables_wrong_type(self):
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
with self.assertRaises(TypeError):
client.list_tables(client.dataset(self.DS_ID).table("foo"))
def test_delete_dataset(self):
from google.cloud.bigquery.dataset import Dataset
from google.cloud.bigquery.dataset import DatasetReference
ds_ref = DatasetReference(self.PROJECT, self.DS_ID)
datasets = (ds_ref, Dataset(ds_ref), "{}.{}".format(self.PROJECT, self.DS_ID))
PATH = "projects/%s/datasets/%s" % (self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection(*([{}] * len(datasets)))
for arg in datasets:
client.delete_dataset(arg)
conn.api_request.assert_called_with(
method="DELETE", path="/%s" % PATH, query_params={}
)
def test_delete_dataset_delete_contents(self):
from google.cloud.bigquery.dataset import Dataset
PATH = "projects/%s/datasets/%s" % (self.PROJECT, self.DS_ID)
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
conn = client._connection = make_connection({}, {})
ds_ref = client.dataset(self.DS_ID)
for arg in (ds_ref, Dataset(ds_ref)):
client.delete_dataset(arg, delete_contents=True)
conn.api_request.assert_called_with(
method="DELETE",
path="/%s" % PATH,
query_params={"deleteContents": "true"},
)
def test_delete_dataset_wrong_type(self):
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
with self.assertRaises(TypeError):
client.delete_dataset(client.dataset(self.DS_ID).table("foo"))
def test_delete_dataset_w_not_found_ok_false(self):
path = "/projects/{}/datasets/{}".format(self.PROJECT, self.DS_ID)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(
google.api_core.exceptions.NotFound("dataset not found")
)
with self.assertRaises(google.api_core.exceptions.NotFound):
client.delete_dataset(self.DS_ID)
conn.api_request.assert_called_with(method="DELETE", path=path, query_params={})
def test_delete_dataset_w_not_found_ok_true(self):
path = "/projects/{}/datasets/{}".format(self.PROJECT, self.DS_ID)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(
google.api_core.exceptions.NotFound("dataset not found")
)
client.delete_dataset(self.DS_ID, not_found_ok=True)
conn.api_request.assert_called_with(method="DELETE", path=path, query_params={})
def test_delete_model(self):
from google.cloud.bigquery.model import Model
path = "projects/%s/datasets/%s/models/%s" % (
self.PROJECT,
self.DS_ID,
self.MODEL_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
model_id = "{}.{}.{}".format(self.PROJECT, self.DS_ID, self.MODEL_ID)
models = (
model_id,
client.dataset(self.DS_ID).model(self.MODEL_ID),
Model(model_id),
)
conn = client._connection = make_connection(*([{}] * len(models)))
for arg in models:
client.delete_model(arg)
conn.api_request.assert_called_with(method="DELETE", path="/%s" % path)
def test_delete_model_w_wrong_type(self):
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
with self.assertRaises(TypeError):
client.delete_model(client.dataset(self.DS_ID))
def test_delete_model_w_not_found_ok_false(self):
path = "/projects/{}/datasets/{}/models/{}".format(
self.PROJECT, self.DS_ID, self.MODEL_ID
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(
google.api_core.exceptions.NotFound("model not found")
)
with self.assertRaises(google.api_core.exceptions.NotFound):
client.delete_model("{}.{}".format(self.DS_ID, self.MODEL_ID))
conn.api_request.assert_called_with(method="DELETE", path=path)
def test_delete_model_w_not_found_ok_true(self):
path = "/projects/{}/datasets/{}/models/{}".format(
self.PROJECT, self.DS_ID, self.MODEL_ID
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(
google.api_core.exceptions.NotFound("model not found")
)
client.delete_model(
"{}.{}".format(self.DS_ID, self.MODEL_ID), not_found_ok=True
)
conn.api_request.assert_called_with(method="DELETE", path=path)
def test_delete_routine(self):
from google.cloud.bigquery.routine import Routine
from google.cloud.bigquery.routine import RoutineReference
full_routine_id = "test-routine-project.test_routines.minimal_routine"
routines = [
full_routine_id,
Routine(full_routine_id),
RoutineReference.from_string(full_routine_id),
]
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(*([{}] * len(routines)))
for routine in routines:
client.delete_routine(routine)
conn.api_request.assert_called_with(
method="DELETE",
path="/projects/test-routine-project/datasets/test_routines/routines/minimal_routine",
)
def test_delete_routine_w_wrong_type(self):
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
with self.assertRaises(TypeError):
client.delete_routine(client.dataset(self.DS_ID))
def test_delete_routine_w_not_found_ok_false(self):
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(
google.api_core.exceptions.NotFound("routine not found")
)
with self.assertRaises(google.api_core.exceptions.NotFound):
client.delete_routine("routines-project.test_routines.test_routine")
conn.api_request.assert_called_with(
method="DELETE",
path="/projects/routines-project/datasets/test_routines/routines/test_routine",
)
def test_delete_routine_w_not_found_ok_true(self):
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(
google.api_core.exceptions.NotFound("routine not found")
)
client.delete_routine(
"routines-project.test_routines.test_routine", not_found_ok=True
)
conn.api_request.assert_called_with(
method="DELETE",
path="/projects/routines-project/datasets/test_routines/routines/test_routine",
)
def test_delete_table(self):
from google.cloud.bigquery.table import Table
tables = (
self.TABLE_REF,
Table(self.TABLE_REF),
"{}.{}.{}".format(
self.TABLE_REF.project,
self.TABLE_REF.dataset_id,
self.TABLE_REF.table_id,
),
)
path = "projects/%s/datasets/%s/tables/%s" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(*([{}] * len(tables)))
for arg in tables:
client.delete_table(arg)
conn.api_request.assert_called_with(method="DELETE", path="/%s" % path)
def test_delete_table_w_wrong_type(self):
creds = _make_credentials()
client = self._make_one(project=self.PROJECT, credentials=creds)
with self.assertRaises(TypeError):
client.delete_table(client.dataset(self.DS_ID))
def test_delete_table_w_not_found_ok_false(self):
path = "/projects/{}/datasets/{}/tables/{}".format(
self.PROJECT, self.DS_ID, self.TABLE_ID
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(
google.api_core.exceptions.NotFound("table not found")
)
with self.assertRaises(google.api_core.exceptions.NotFound):
client.delete_table("{}.{}".format(self.DS_ID, self.TABLE_ID))
conn.api_request.assert_called_with(method="DELETE", path=path)
def test_delete_table_w_not_found_ok_true(self):
path = "/projects/{}/datasets/{}/tables/{}".format(
self.PROJECT, self.DS_ID, self.TABLE_ID
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(
google.api_core.exceptions.NotFound("table not found")
)
client.delete_table(
"{}.{}".format(self.DS_ID, self.TABLE_ID), not_found_ok=True
)
conn.api_request.assert_called_with(method="DELETE", path=path)
def test_job_from_resource_unknown_type(self):
from google.cloud.bigquery.job import UnknownJob
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
got = client.job_from_resource({}) # Can parse redacted job.
self.assertIsInstance(got, UnknownJob)
self.assertEqual(got.project, self.PROJECT)
def test_get_job_miss_w_explict_project(self):
from google.cloud.exceptions import NotFound
OTHER_PROJECT = "OTHER_PROJECT"
JOB_ID = "NONESUCH"
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection()
with self.assertRaises(NotFound):
client.get_job(JOB_ID, project=OTHER_PROJECT, location=self.LOCATION)
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/OTHER_PROJECT/jobs/NONESUCH",
query_params={"projection": "full", "location": self.LOCATION},
)
def test_get_job_miss_w_client_location(self):
from google.cloud.exceptions import NotFound
OTHER_PROJECT = "OTHER_PROJECT"
JOB_ID = "NONESUCH"
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds, location=self.LOCATION)
conn = client._connection = make_connection()
with self.assertRaises(NotFound):
client.get_job(JOB_ID, project=OTHER_PROJECT)
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/OTHER_PROJECT/jobs/NONESUCH",
query_params={"projection": "full", "location": self.LOCATION},
)
def test_get_job_hit(self):
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import QueryJob
from google.cloud.bigquery.job import WriteDisposition
JOB_ID = "query_job"
QUERY_DESTINATION_TABLE = "query_destination_table"
QUERY = "SELECT * from test_dataset:test_table"
ASYNC_QUERY_DATA = {
"id": "{}:{}".format(self.PROJECT, JOB_ID),
"jobReference": {"projectId": self.PROJECT, "jobId": "query_job"},
"state": "DONE",
"configuration": {
"query": {
"query": QUERY,
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": QUERY_DESTINATION_TABLE,
},
"createDisposition": CreateDisposition.CREATE_IF_NEEDED,
"writeDisposition": WriteDisposition.WRITE_TRUNCATE,
}
},
}
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection(ASYNC_QUERY_DATA)
job = client.get_job(JOB_ID)
self.assertIsInstance(job, QueryJob)
self.assertEqual(job.job_id, JOB_ID)
self.assertEqual(job.create_disposition, CreateDisposition.CREATE_IF_NEEDED)
self.assertEqual(job.write_disposition, WriteDisposition.WRITE_TRUNCATE)
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/PROJECT/jobs/query_job",
query_params={"projection": "full"},
)
def test_cancel_job_miss_w_explict_project(self):
from google.cloud.exceptions import NotFound
OTHER_PROJECT = "OTHER_PROJECT"
JOB_ID = "NONESUCH"
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection()
with self.assertRaises(NotFound):
client.cancel_job(JOB_ID, project=OTHER_PROJECT, location=self.LOCATION)
conn.api_request.assert_called_once_with(
method="POST",
path="/projects/OTHER_PROJECT/jobs/NONESUCH/cancel",
query_params={"projection": "full", "location": self.LOCATION},
)
def test_cancel_job_miss_w_client_location(self):
from google.cloud.exceptions import NotFound
OTHER_PROJECT = "OTHER_PROJECT"
JOB_ID = "NONESUCH"
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds, location=self.LOCATION)
conn = client._connection = make_connection()
with self.assertRaises(NotFound):
client.cancel_job(JOB_ID, project=OTHER_PROJECT)
conn.api_request.assert_called_once_with(
method="POST",
path="/projects/OTHER_PROJECT/jobs/NONESUCH/cancel",
query_params={"projection": "full", "location": self.LOCATION},
)
def test_cancel_job_hit(self):
from google.cloud.bigquery.job import QueryJob
JOB_ID = "query_job"
QUERY = "SELECT * from test_dataset:test_table"
QUERY_JOB_RESOURCE = {
"id": "{}:{}".format(self.PROJECT, JOB_ID),
"jobReference": {"projectId": self.PROJECT, "jobId": "query_job"},
"state": "RUNNING",
"configuration": {"query": {"query": QUERY}},
}
RESOURCE = {"job": QUERY_JOB_RESOURCE}
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection(RESOURCE)
job = client.cancel_job(JOB_ID)
self.assertIsInstance(job, QueryJob)
self.assertEqual(job.job_id, JOB_ID)
self.assertEqual(job.query, QUERY)
conn.api_request.assert_called_once_with(
method="POST",
path="/projects/PROJECT/jobs/query_job/cancel",
query_params={"projection": "full"},
)
def test_list_jobs_defaults(self):
from google.cloud.bigquery.job import CopyJob
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import ExtractJob
from google.cloud.bigquery.job import LoadJob
from google.cloud.bigquery.job import QueryJob
from google.cloud.bigquery.job import WriteDisposition
SOURCE_TABLE = "source_table"
DESTINATION_TABLE = "destination_table"
QUERY_DESTINATION_TABLE = "query_destination_table"
SOURCE_URI = "gs://test_bucket/src_object*"
DESTINATION_URI = "gs://test_bucket/dst_object*"
JOB_TYPES = {
"load_job": LoadJob,
"copy_job": CopyJob,
"extract_job": ExtractJob,
"query_job": QueryJob,
}
PATH = "projects/%s/jobs" % self.PROJECT
TOKEN = "TOKEN"
QUERY = "SELECT * from test_dataset:test_table"
ASYNC_QUERY_DATA = {
"id": "%s:%s" % (self.PROJECT, "query_job"),
"jobReference": {"projectId": self.PROJECT, "jobId": "query_job"},
"state": "DONE",
"configuration": {
"query": {
"query": QUERY,
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": QUERY_DESTINATION_TABLE,
},
"createDisposition": CreateDisposition.CREATE_IF_NEEDED,
"writeDisposition": WriteDisposition.WRITE_TRUNCATE,
}
},
}
EXTRACT_DATA = {
"id": "%s:%s" % (self.PROJECT, "extract_job"),
"jobReference": {"projectId": self.PROJECT, "jobId": "extract_job"},
"state": "DONE",
"configuration": {
"extract": {
"sourceTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": SOURCE_TABLE,
},
"destinationUris": [DESTINATION_URI],
}
},
}
COPY_DATA = {
"id": "%s:%s" % (self.PROJECT, "copy_job"),
"jobReference": {"projectId": self.PROJECT, "jobId": "copy_job"},
"state": "DONE",
"configuration": {
"copy": {
"sourceTables": [
{
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": SOURCE_TABLE,
}
],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": DESTINATION_TABLE,
},
}
},
}
LOAD_DATA = {
"id": "%s:%s" % (self.PROJECT, "load_job"),
"jobReference": {"projectId": self.PROJECT, "jobId": "load_job"},
"state": "DONE",
"configuration": {
"load": {
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": SOURCE_TABLE,
},
"sourceUris": [SOURCE_URI],
}
},
}
DATA = {
"nextPageToken": TOKEN,
"jobs": [ASYNC_QUERY_DATA, EXTRACT_DATA, COPY_DATA, LOAD_DATA],
}
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection(DATA)
iterator = client.list_jobs()
page = six.next(iterator.pages)
jobs = list(page)
token = iterator.next_page_token
self.assertEqual(len(jobs), len(DATA["jobs"]))
for found, expected in zip(jobs, DATA["jobs"]):
name = expected["jobReference"]["jobId"]
self.assertIsInstance(found, JOB_TYPES[name])
self.assertEqual(found.job_id, name)
self.assertEqual(token, TOKEN)
conn.api_request.assert_called_once_with(
method="GET", path="/%s" % PATH, query_params={"projection": "full"}
)
def test_list_jobs_load_job_wo_sourceUris(self):
from google.cloud.bigquery.job import LoadJob
SOURCE_TABLE = "source_table"
JOB_TYPES = {"load_job": LoadJob}
PATH = "projects/%s/jobs" % self.PROJECT
TOKEN = "TOKEN"
LOAD_DATA = {
"id": "%s:%s" % (self.PROJECT, "load_job"),
"jobReference": {"projectId": self.PROJECT, "jobId": "load_job"},
"state": "DONE",
"configuration": {
"load": {
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": SOURCE_TABLE,
}
}
},
}
DATA = {"nextPageToken": TOKEN, "jobs": [LOAD_DATA]}
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection(DATA)
iterator = client.list_jobs()
page = six.next(iterator.pages)
jobs = list(page)
token = iterator.next_page_token
self.assertEqual(len(jobs), len(DATA["jobs"]))
for found, expected in zip(jobs, DATA["jobs"]):
name = expected["jobReference"]["jobId"]
self.assertIsInstance(found, JOB_TYPES[name])
self.assertEqual(found.job_id, name)
self.assertEqual(token, TOKEN)
conn.api_request.assert_called_once_with(
method="GET", path="/%s" % PATH, query_params={"projection": "full"}
)
def test_list_jobs_explicit_missing(self):
PATH = "projects/%s/jobs" % self.PROJECT
DATA = {}
TOKEN = "TOKEN"
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection(DATA)
iterator = client.list_jobs(
max_results=1000, page_token=TOKEN, all_users=True, state_filter="done"
)
page = six.next(iterator.pages)
jobs = list(page)
token = iterator.next_page_token
self.assertEqual(len(jobs), 0)
self.assertIsNone(token)
conn.api_request.assert_called_once_with(
method="GET",
path="/%s" % PATH,
query_params={
"projection": "full",
"maxResults": 1000,
"pageToken": TOKEN,
"allUsers": True,
"stateFilter": "done",
},
)
def test_list_jobs_w_project(self):
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection({})
list(client.list_jobs(project="other-project"))
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/other-project/jobs",
query_params={"projection": "full"},
)
def test_list_jobs_w_time_filter(self):
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection({})
# One millisecond after the unix epoch.
start_time = datetime.datetime(1970, 1, 1, 0, 0, 0, 1000)
# One millisecond after the the 2038 31-bit signed int rollover
end_time = datetime.datetime(2038, 1, 19, 3, 14, 7, 1000)
end_time_millis = (((2 ** 31) - 1) * 1000) + 1
list(client.list_jobs(min_creation_time=start_time, max_creation_time=end_time))
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/%s/jobs" % self.PROJECT,
query_params={
"projection": "full",
"minCreationTime": "1",
"maxCreationTime": str(end_time_millis),
},
)
def test_list_jobs_w_parent_job_filter(self):
from google.cloud.bigquery import job
creds = _make_credentials()
client = self._make_one(self.PROJECT, creds)
conn = client._connection = make_connection({}, {})
parent_job_args = ["parent-job-123", job._AsyncJob("parent-job-123", client)]
for parent_job in parent_job_args:
list(client.list_jobs(parent_job=parent_job))
conn.api_request.assert_called_once_with(
method="GET",
path="/projects/%s/jobs" % self.PROJECT,
query_params={"projection": "full", "parentJobId": "parent-job-123"},
)
conn.api_request.reset_mock()
def test_load_table_from_uri(self):
from google.cloud.bigquery.job import LoadJob, LoadJobConfig
JOB = "job_name"
DESTINATION = "destination_table"
SOURCE_URI = "http://example.com/source.csv"
RESOURCE = {
"jobReference": {"projectId": self.PROJECT, "jobId": JOB},
"configuration": {
"load": {
"sourceUris": [SOURCE_URI],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": DESTINATION,
},
}
},
}
creds = _make_credentials()
http = object()
job_config = LoadJobConfig()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(RESOURCE)
destination = client.dataset(self.DS_ID).table(DESTINATION)
job = client.load_table_from_uri(
SOURCE_URI, destination, job_id=JOB, job_config=job_config
)
# Check that load_table_from_uri actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/%s/jobs" % self.PROJECT, data=RESOURCE
)
self.assertIsInstance(job, LoadJob)
self.assertIsInstance(job._configuration, LoadJobConfig)
self.assertIs(job._client, client)
self.assertEqual(job.job_id, JOB)
self.assertEqual(list(job.source_uris), [SOURCE_URI])
self.assertIs(job.destination, destination)
conn = client._connection = make_connection(RESOURCE)
job = client.load_table_from_uri([SOURCE_URI], destination, job_id=JOB)
self.assertIsInstance(job, LoadJob)
self.assertIs(job._client, client)
self.assertEqual(job.job_id, JOB)
self.assertEqual(list(job.source_uris), [SOURCE_URI])
self.assertIs(job.destination, destination)
def test_load_table_from_uri_w_explicit_project(self):
job_id = "this-is-a-job-id"
destination_id = "destination_table"
source_uri = "gs://example/source.csv"
resource = {
"jobReference": {
"projectId": "other-project",
"location": self.LOCATION,
"jobId": job_id,
},
"configuration": {
"load": {
"sourceUris": [source_uri],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": destination_id,
},
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(resource)
destination = client.dataset(self.DS_ID).table(destination_id)
client.load_table_from_uri(
source_uri,
destination,
job_id=job_id,
project="other-project",
location=self.LOCATION,
)
# Check that load_table_from_uri actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/other-project/jobs", data=resource
)
def test_load_table_from_uri_w_client_location(self):
job_id = "this-is-a-job-id"
destination_id = "destination_table"
source_uri = "gs://example/source.csv"
resource = {
"jobReference": {
"projectId": "other-project",
"location": self.LOCATION,
"jobId": job_id,
},
"configuration": {
"load": {
"sourceUris": [source_uri],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": destination_id,
},
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(
project=self.PROJECT, credentials=creds, _http=http, location=self.LOCATION
)
conn = client._connection = make_connection(resource)
client.load_table_from_uri(
source_uri,
# Test with string for table ID.
"{}.{}".format(self.DS_ID, destination_id),
job_id=job_id,
project="other-project",
)
# Check that load_table_from_uri actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/other-project/jobs", data=resource
)
def test_load_table_from_uri_w_invalid_job_config(self):
from google.cloud.bigquery import job
JOB = "job_name"
DESTINATION = "destination_table"
SOURCE_URI = "http://example.com/source.csv"
creds = _make_credentials()
http = object()
job_config = job.CopyJobConfig()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
destination = client.dataset(self.DS_ID).table(DESTINATION)
with self.assertRaises(TypeError) as exc:
client.load_table_from_uri(
SOURCE_URI, destination, job_id=JOB, job_config=job_config
)
self.assertIn("Expected an instance of LoadJobConfig", exc.exception.args[0])
@staticmethod
def _mock_requests_response(status_code, headers, content=b""):
return mock.Mock(
content=content,
headers=headers,
status_code=status_code,
spec=["content", "headers", "status_code"],
)
def _mock_transport(self, status_code, headers, content=b""):
fake_transport = mock.Mock(spec=["request"])
fake_response = self._mock_requests_response(
status_code, headers, content=content
)
fake_transport.request.return_value = fake_response
return fake_transport
def _initiate_resumable_upload_helper(self, num_retries=None):
from google.resumable_media.requests import ResumableUpload
from google.cloud.bigquery.client import _DEFAULT_CHUNKSIZE
from google.cloud.bigquery.client import _GENERIC_CONTENT_TYPE
from google.cloud.bigquery.client import _get_upload_headers
from google.cloud.bigquery.job import LoadJob
from google.cloud.bigquery.job import LoadJobConfig
from google.cloud.bigquery.job import SourceFormat
# Create mocks to be checked for doing transport.
resumable_url = "http://test.invalid?upload_id=hey-you"
response_headers = {"location": resumable_url}
fake_transport = self._mock_transport(http_client.OK, response_headers)
client = self._make_one(project=self.PROJECT, _http=fake_transport)
conn = client._connection = make_connection()
# Create some mock arguments and call the method under test.
data = b"goodbye gudbi gootbee"
stream = io.BytesIO(data)
config = LoadJobConfig()
config.source_format = SourceFormat.CSV
job = LoadJob(None, None, self.TABLE_REF, client, job_config=config)
metadata = job.to_api_repr()
upload, transport = client._initiate_resumable_upload(
stream, metadata, num_retries
)
# Check the returned values.
self.assertIsInstance(upload, ResumableUpload)
upload_url = (
"https://bigquery.googleapis.com/upload/bigquery/v2/projects/"
+ self.PROJECT
+ "/jobs?uploadType=resumable"
)
self.assertEqual(upload.upload_url, upload_url)
expected_headers = _get_upload_headers(conn.user_agent)
self.assertEqual(upload._headers, expected_headers)
self.assertFalse(upload.finished)
self.assertEqual(upload._chunk_size, _DEFAULT_CHUNKSIZE)
self.assertIs(upload._stream, stream)
self.assertIsNone(upload._total_bytes)
self.assertEqual(upload._content_type, _GENERIC_CONTENT_TYPE)
self.assertEqual(upload.resumable_url, resumable_url)
retry_strategy = upload._retry_strategy
self.assertEqual(retry_strategy.max_sleep, 64.0)
if num_retries is None:
self.assertEqual(retry_strategy.max_cumulative_retry, 600.0)
self.assertIsNone(retry_strategy.max_retries)
else:
self.assertIsNone(retry_strategy.max_cumulative_retry)
self.assertEqual(retry_strategy.max_retries, num_retries)
self.assertIs(transport, fake_transport)
# Make sure we never read from the stream.
self.assertEqual(stream.tell(), 0)
# Check the mocks.
request_headers = expected_headers.copy()
request_headers["x-upload-content-type"] = _GENERIC_CONTENT_TYPE
fake_transport.request.assert_called_once_with(
"POST",
upload_url,
data=json.dumps(metadata).encode("utf-8"),
headers=request_headers,
timeout=mock.ANY,
)
def test__initiate_resumable_upload(self):
self._initiate_resumable_upload_helper()
def test__initiate_resumable_upload_with_retry(self):
self._initiate_resumable_upload_helper(num_retries=11)
def _do_multipart_upload_success_helper(self, get_boundary, num_retries=None):
from google.cloud.bigquery.client import _get_upload_headers
from google.cloud.bigquery.job import LoadJob
from google.cloud.bigquery.job import LoadJobConfig
from google.cloud.bigquery.job import SourceFormat
fake_transport = self._mock_transport(http_client.OK, {})
client = self._make_one(project=self.PROJECT, _http=fake_transport)
conn = client._connection = make_connection()
# Create some mock arguments.
data = b"Bzzzz-zap \x00\x01\xf4"
stream = io.BytesIO(data)
config = LoadJobConfig()
config.source_format = SourceFormat.CSV
job = LoadJob(None, None, self.TABLE_REF, client, job_config=config)
metadata = job.to_api_repr()
size = len(data)
response = client._do_multipart_upload(stream, metadata, size, num_retries)
# Check the mocks and the returned value.
self.assertIs(response, fake_transport.request.return_value)
self.assertEqual(stream.tell(), size)
get_boundary.assert_called_once_with()
upload_url = (
"https://bigquery.googleapis.com/upload/bigquery/v2/projects/"
+ self.PROJECT
+ "/jobs?uploadType=multipart"
)
payload = (
b"--==0==\r\n"
+ b"content-type: application/json; charset=UTF-8\r\n\r\n"
+ json.dumps(metadata).encode("utf-8")
+ b"\r\n"
+ b"--==0==\r\n"
+ b"content-type: */*\r\n\r\n"
+ data
+ b"\r\n"
+ b"--==0==--"
)
headers = _get_upload_headers(conn.user_agent)
headers["content-type"] = b'multipart/related; boundary="==0=="'
fake_transport.request.assert_called_once_with(
"POST", upload_url, data=payload, headers=headers, timeout=mock.ANY
)
@mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==")
def test__do_multipart_upload(self, get_boundary):
self._do_multipart_upload_success_helper(get_boundary)
@mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==")
def test__do_multipart_upload_with_retry(self, get_boundary):
self._do_multipart_upload_success_helper(get_boundary, num_retries=8)
def test_copy_table(self):
from google.cloud.bigquery.job import CopyJob
JOB = "job_name"
SOURCE = "source_table"
DESTINATION = "destination_table"
RESOURCE = {
"jobReference": {"projectId": self.PROJECT, "jobId": JOB},
"configuration": {
"copy": {
"sourceTables": [
{
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": SOURCE,
}
],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": DESTINATION,
},
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(RESOURCE)
dataset = client.dataset(self.DS_ID)
source = dataset.table(SOURCE)
destination = dataset.table(DESTINATION)
job = client.copy_table(source, destination, job_id=JOB)
# Check that copy_table actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/%s/jobs" % self.PROJECT, data=RESOURCE
)
self.assertIsInstance(job, CopyJob)
self.assertIs(job._client, client)
self.assertEqual(job.job_id, JOB)
self.assertEqual(list(job.sources), [source])
self.assertIs(job.destination, destination)
conn = client._connection = make_connection(RESOURCE)
source2 = dataset.table(SOURCE + "2")
job = client.copy_table([source, source2], destination, job_id=JOB)
self.assertIsInstance(job, CopyJob)
self.assertIs(job._client, client)
self.assertEqual(job.job_id, JOB)
self.assertEqual(list(job.sources), [source, source2])
self.assertIs(job.destination, destination)
def test_copy_table_w_explicit_project(self):
job_id = "this-is-a-job-id"
source_id = "source_table"
destination_id = "destination_table"
resource = {
"jobReference": {
"projectId": "other-project",
"location": self.LOCATION,
"jobId": job_id,
},
"configuration": {
"copy": {
"sourceTables": [
{
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": source_id,
}
],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": destination_id,
},
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(resource)
dataset = client.dataset(self.DS_ID)
source = dataset.table(source_id)
destination = dataset.table(destination_id)
client.copy_table(
source,
destination,
job_id=job_id,
project="other-project",
location=self.LOCATION,
)
# Check that copy_table actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/other-project/jobs", data=resource
)
def test_copy_table_w_client_location(self):
job_id = "this-is-a-job-id"
source_id = "source_table"
destination_id = "destination_table"
resource = {
"jobReference": {
"projectId": "other-project",
"location": self.LOCATION,
"jobId": job_id,
},
"configuration": {
"copy": {
"sourceTables": [
{
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": source_id,
}
],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": destination_id,
},
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(
project=self.PROJECT, credentials=creds, _http=http, location=self.LOCATION
)
conn = client._connection = make_connection(resource)
client.copy_table(
# Test with string for table IDs.
"{}.{}".format(self.DS_ID, source_id),
"{}.{}".format(self.DS_ID, destination_id),
job_id=job_id,
project="other-project",
)
# Check that copy_table actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/other-project/jobs", data=resource
)
def test_copy_table_w_source_strings(self):
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
client._connection = make_connection({})
sources = [
"dataset_wo_proj.some_table",
"other_project.other_dataset.other_table",
client.dataset("dataset_from_ref").table("table_from_ref"),
]
destination = "some_project.some_dataset.destination_table"
job = client.copy_table(sources, destination)
expected_sources = [
client.dataset("dataset_wo_proj").table("some_table"),
client.dataset("other_dataset", project="other_project").table(
"other_table"
),
client.dataset("dataset_from_ref").table("table_from_ref"),
]
self.assertEqual(list(job.sources), expected_sources)
expected_destination = client.dataset(
"some_dataset", project="some_project"
).table("destination_table")
self.assertEqual(job.destination, expected_destination)
def test_copy_table_w_invalid_job_config(self):
from google.cloud.bigquery import job
JOB = "job_name"
SOURCE = "source_table"
DESTINATION = "destination_table"
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
job_config = job.ExtractJobConfig()
dataset = client.dataset(self.DS_ID)
source = dataset.table(SOURCE)
destination = dataset.table(DESTINATION)
with self.assertRaises(TypeError) as exc:
client.copy_table(source, destination, job_id=JOB, job_config=job_config)
self.assertIn("Expected an instance of CopyJobConfig", exc.exception.args[0])
def test_copy_table_w_valid_job_config(self):
from google.cloud.bigquery.job import CopyJobConfig
JOB = "job_name"
SOURCE = "source_table"
DESTINATION = "destination_table"
RESOURCE = {
"jobReference": {"projectId": self.PROJECT, "jobId": JOB},
"configuration": {
"copy": {
"sourceTables": [
{
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": SOURCE,
}
],
"destinationTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": DESTINATION,
},
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
job_config = CopyJobConfig()
conn = client._connection = make_connection(RESOURCE)
dataset = client.dataset(self.DS_ID)
source = dataset.table(SOURCE)
destination = dataset.table(DESTINATION)
job = client.copy_table(source, destination, job_id=JOB, job_config=job_config)
# Check that copy_table actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/%s/jobs" % self.PROJECT, data=RESOURCE
)
self.assertIsInstance(job._configuration, CopyJobConfig)
def test_extract_table(self):
from google.cloud.bigquery.job import ExtractJob
JOB = "job_id"
SOURCE = "source_table"
DESTINATION = "gs://bucket_name/object_name"
RESOURCE = {
"jobReference": {"projectId": self.PROJECT, "jobId": JOB},
"configuration": {
"extract": {
"sourceTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": SOURCE,
},
"destinationUris": [DESTINATION],
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(RESOURCE)
dataset = client.dataset(self.DS_ID)
source = dataset.table(SOURCE)
job = client.extract_table(source, DESTINATION, job_id=JOB)
# Check that extract_table actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/PROJECT/jobs", data=RESOURCE
)
# Check the job resource.
self.assertIsInstance(job, ExtractJob)
self.assertIs(job._client, client)
self.assertEqual(job.job_id, JOB)
self.assertEqual(job.source, source)
self.assertEqual(list(job.destination_uris), [DESTINATION])
def test_extract_table_w_invalid_job_config(self):
from google.cloud.bigquery import job
JOB = "job_id"
SOURCE = "source_table"
DESTINATION = "gs://bucket_name/object_name"
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
dataset = client.dataset(self.DS_ID)
source = dataset.table(SOURCE)
job_config = job.LoadJobConfig()
with self.assertRaises(TypeError) as exc:
client.extract_table(source, DESTINATION, job_id=JOB, job_config=job_config)
self.assertIn("Expected an instance of ExtractJobConfig", exc.exception.args[0])
def test_extract_table_w_explicit_project(self):
job_id = "job_id"
source_id = "source_table"
destination = "gs://bucket_name/object_name"
resource = {
"jobReference": {
"projectId": "other-project",
"location": self.LOCATION,
"jobId": job_id,
},
"configuration": {
"extract": {
"sourceTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": source_id,
},
"destinationUris": [destination],
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(resource)
dataset = client.dataset(self.DS_ID)
source = dataset.table(source_id)
client.extract_table(
source,
destination,
job_id=job_id,
project="other-project",
location=self.LOCATION,
)
# Check that extract_table actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/other-project/jobs", data=resource
)
def test_extract_table_w_client_location(self):
job_id = "job_id"
source_id = "source_table"
destination = "gs://bucket_name/object_name"
resource = {
"jobReference": {
"projectId": "other-project",
"location": self.LOCATION,
"jobId": job_id,
},
"configuration": {
"extract": {
"sourceTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": source_id,
},
"destinationUris": [destination],
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(
project=self.PROJECT, credentials=creds, _http=http, location=self.LOCATION
)
conn = client._connection = make_connection(resource)
client.extract_table(
# Test with string for table ID.
"{}.{}".format(self.DS_ID, source_id),
destination,
job_id=job_id,
project="other-project",
)
# Check that extract_table actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/other-project/jobs", data=resource
)
def test_extract_table_generated_job_id(self):
from google.cloud.bigquery.job import ExtractJob
from google.cloud.bigquery.job import ExtractJobConfig
from google.cloud.bigquery.job import DestinationFormat
JOB = "job_id"
SOURCE = "source_table"
DESTINATION = "gs://bucket_name/object_name"
RESOURCE = {
"jobReference": {"projectId": self.PROJECT, "jobId": JOB},
"configuration": {
"extract": {
"sourceTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": SOURCE,
},
"destinationUris": [DESTINATION],
"destinationFormat": "NEWLINE_DELIMITED_JSON",
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(RESOURCE)
dataset = client.dataset(self.DS_ID)
source = dataset.table(SOURCE)
job_config = ExtractJobConfig()
job_config.destination_format = DestinationFormat.NEWLINE_DELIMITED_JSON
job = client.extract_table(source, DESTINATION, job_config=job_config)
# Check that extract_table actually starts the job.
conn.api_request.assert_called_once()
_, req = conn.api_request.call_args
self.assertEqual(req["method"], "POST")
self.assertEqual(req["path"], "/projects/PROJECT/jobs")
self.assertIsInstance(req["data"]["jobReference"]["jobId"], six.string_types)
# Check the job resource.
self.assertIsInstance(job, ExtractJob)
self.assertIs(job._client, client)
self.assertEqual(job.source, source)
self.assertEqual(list(job.destination_uris), [DESTINATION])
def test_extract_table_w_destination_uris(self):
from google.cloud.bigquery.job import ExtractJob
JOB = "job_id"
SOURCE = "source_table"
DESTINATION1 = "gs://bucket_name/object_one"
DESTINATION2 = "gs://bucket_name/object_two"
RESOURCE = {
"jobReference": {"projectId": self.PROJECT, "jobId": JOB},
"configuration": {
"extract": {
"sourceTable": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": SOURCE,
},
"destinationUris": [DESTINATION1, DESTINATION2],
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(RESOURCE)
dataset = client.dataset(self.DS_ID)
source = dataset.table(SOURCE)
job = client.extract_table(source, [DESTINATION1, DESTINATION2], job_id=JOB)
# Check that extract_table actually starts the job.
conn.api_request.assert_called_once()
_, req = conn.api_request.call_args
self.assertEqual(req["method"], "POST")
self.assertEqual(req["path"], "/projects/PROJECT/jobs")
# Check the job resource.
self.assertIsInstance(job, ExtractJob)
self.assertIs(job._client, client)
self.assertEqual(job.job_id, JOB)
self.assertEqual(job.source, source)
self.assertEqual(list(job.destination_uris), [DESTINATION1, DESTINATION2])
def test_query_defaults(self):
from google.cloud.bigquery.job import QueryJob
QUERY = "select count(*) from persons"
RESOURCE = {
"jobReference": {"projectId": self.PROJECT, "jobId": "some-random-id"},
"configuration": {"query": {"query": QUERY, "useLegacySql": False}},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(RESOURCE)
job = client.query(QUERY)
self.assertIsInstance(job, QueryJob)
self.assertIsInstance(job.job_id, six.string_types)
self.assertIs(job._client, client)
self.assertEqual(job.query, QUERY)
self.assertEqual(job.udf_resources, [])
self.assertEqual(job.query_parameters, [])
# Check that query actually starts the job.
conn.api_request.assert_called_once()
_, req = conn.api_request.call_args
self.assertEqual(req["method"], "POST")
self.assertEqual(req["path"], "/projects/PROJECT/jobs")
sent = req["data"]
self.assertIsInstance(sent["jobReference"]["jobId"], six.string_types)
sent_config = sent["configuration"]["query"]
self.assertEqual(sent_config["query"], QUERY)
self.assertFalse(sent_config["useLegacySql"])
def test_query_w_explicit_project(self):
job_id = "some-job-id"
query = "select count(*) from persons"
resource = {
"jobReference": {
"projectId": "other-project",
"location": self.LOCATION,
"jobId": job_id,
},
"configuration": {"query": {"query": query, "useLegacySql": False}},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(resource)
client.query(
query, job_id=job_id, project="other-project", location=self.LOCATION
)
# Check that query actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/other-project/jobs", data=resource
)
def test_query_w_explicit_job_config(self):
job_id = "some-job-id"
query = "select count(*) from persons"
resource = {
"jobReference": {
"jobId": job_id,
"projectId": self.PROJECT,
"location": self.LOCATION,
},
"configuration": {
"query": {
"query": query,
"defaultDataset": {
"projectId": self.PROJECT,
"datasetId": "some-dataset",
},
"useLegacySql": False,
"useQueryCache": True,
"maximumBytesBilled": "2000",
}
},
}
creds = _make_credentials()
http = object()
from google.cloud.bigquery import QueryJobConfig, DatasetReference
default_job_config = QueryJobConfig()
default_job_config.default_dataset = DatasetReference(
self.PROJECT, "some-dataset"
)
default_job_config.maximum_bytes_billed = 1000
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
default_query_job_config=default_job_config,
)
conn = client._connection = make_connection(resource)
job_config = QueryJobConfig()
job_config.use_query_cache = True
job_config.maximum_bytes_billed = 2000
client.query(
query, job_id=job_id, location=self.LOCATION, job_config=job_config
)
# Check that query actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/PROJECT/jobs", data=resource
)
def test_query_w_invalid_job_config(self):
from google.cloud.bigquery import QueryJobConfig, DatasetReference
from google.cloud.bigquery import job
job_id = "some-job-id"
query = "select count(*) from persons"
creds = _make_credentials()
http = object()
default_job_config = QueryJobConfig()
default_job_config.default_dataset = DatasetReference(
self.PROJECT, "some-dataset"
)
default_job_config.maximum_bytes_billed = 1000
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
default_query_job_config=default_job_config,
)
job_config = job.LoadJobConfig()
with self.assertRaises(TypeError) as exc:
client.query(
query, job_id=job_id, location=self.LOCATION, job_config=job_config
)
self.assertIn("Expected an instance of QueryJobConfig", exc.exception.args[0])
def test_query_w_explicit_job_config_override(self):
job_id = "some-job-id"
query = "select count(*) from persons"
resource = {
"jobReference": {
"jobId": job_id,
"projectId": self.PROJECT,
"location": self.LOCATION,
},
"configuration": {
"query": {
"query": query,
"defaultDataset": None,
"useLegacySql": False,
"useQueryCache": True,
"maximumBytesBilled": "2000",
}
},
}
creds = _make_credentials()
http = object()
from google.cloud.bigquery import QueryJobConfig, DatasetReference
default_job_config = QueryJobConfig()
default_job_config.default_dataset = DatasetReference(
self.PROJECT, "some-dataset"
)
default_job_config.maximum_bytes_billed = 1000
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
default_query_job_config=default_job_config,
)
conn = client._connection = make_connection(resource)
job_config = QueryJobConfig()
job_config.use_query_cache = True
job_config.maximum_bytes_billed = 2000
job_config.default_dataset = None
client.query(
query, job_id=job_id, location=self.LOCATION, job_config=job_config
)
# Check that query actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/PROJECT/jobs", data=resource
)
def test_query_w_client_default_config_no_incoming(self):
job_id = "some-job-id"
query = "select count(*) from persons"
resource = {
"jobReference": {
"jobId": job_id,
"projectId": self.PROJECT,
"location": self.LOCATION,
},
"configuration": {
"query": {
"query": query,
"useLegacySql": False,
"maximumBytesBilled": "1000",
}
},
}
creds = _make_credentials()
http = object()
from google.cloud.bigquery import QueryJobConfig
default_job_config = QueryJobConfig()
default_job_config.maximum_bytes_billed = 1000
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
default_query_job_config=default_job_config,
)
conn = client._connection = make_connection(resource)
client.query(query, job_id=job_id, location=self.LOCATION)
# Check that query actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/PROJECT/jobs", data=resource
)
def test_query_w_invalid_default_job_config(self):
job_id = "some-job-id"
query = "select count(*) from persons"
creds = _make_credentials()
http = object()
default_job_config = object()
client = self._make_one(
project=self.PROJECT,
credentials=creds,
_http=http,
default_query_job_config=default_job_config,
)
with self.assertRaises(TypeError) as exc:
client.query(query, job_id=job_id, location=self.LOCATION)
self.assertIn("Expected an instance of QueryJobConfig", exc.exception.args[0])
def test_query_w_client_location(self):
job_id = "some-job-id"
query = "select count(*) from persons"
resource = {
"jobReference": {
"projectId": "other-project",
"location": self.LOCATION,
"jobId": job_id,
},
"configuration": {"query": {"query": query, "useLegacySql": False}},
}
creds = _make_credentials()
http = object()
client = self._make_one(
project=self.PROJECT, credentials=creds, _http=http, location=self.LOCATION
)
conn = client._connection = make_connection(resource)
client.query(query, job_id=job_id, project="other-project")
# Check that query actually starts the job.
conn.api_request.assert_called_once_with(
method="POST", path="/projects/other-project/jobs", data=resource
)
def test_query_detect_location(self):
query = "select count(*) from persons"
resource_location = "EU"
resource = {
"jobReference": {
"projectId": self.PROJECT,
# Location not set in request, but present in the response.
"location": resource_location,
"jobId": "some-random-id",
},
"configuration": {"query": {"query": query, "useLegacySql": False}},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(resource)
job = client.query(query)
self.assertEqual(job.location, resource_location)
# Check that request did not contain a location.
conn.api_request.assert_called_once()
_, req = conn.api_request.call_args
sent = req["data"]
self.assertIsNone(sent["jobReference"].get("location"))
def test_query_w_udf_resources(self):
from google.cloud.bigquery.job import QueryJob
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import UDFResource
RESOURCE_URI = "gs://some-bucket/js/lib.js"
JOB = "job_name"
QUERY = "select count(*) from persons"
RESOURCE = {
"jobReference": {"projectId": self.PROJECT, "jobId": JOB},
"configuration": {
"query": {
"query": QUERY,
"useLegacySql": True,
"userDefinedFunctionResources": [{"resourceUri": RESOURCE_URI}],
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(RESOURCE)
udf_resources = [UDFResource("resourceUri", RESOURCE_URI)]
config = QueryJobConfig()
config.udf_resources = udf_resources
config.use_legacy_sql = True
job = client.query(QUERY, job_config=config, job_id=JOB)
self.assertIsInstance(job, QueryJob)
self.assertIs(job._client, client)
self.assertEqual(job.job_id, JOB)
self.assertEqual(job.query, QUERY)
self.assertEqual(job.udf_resources, udf_resources)
self.assertEqual(job.query_parameters, [])
# Check that query actually starts the job.
conn.api_request.assert_called_once()
_, req = conn.api_request.call_args
self.assertEqual(req["method"], "POST")
self.assertEqual(req["path"], "/projects/PROJECT/jobs")
sent = req["data"]
self.assertIsInstance(sent["jobReference"]["jobId"], six.string_types)
sent_config = sent["configuration"]["query"]
self.assertEqual(sent_config["query"], QUERY)
self.assertTrue(sent_config["useLegacySql"])
self.assertEqual(
sent_config["userDefinedFunctionResources"][0],
{"resourceUri": RESOURCE_URI},
)
def test_query_w_query_parameters(self):
from google.cloud.bigquery.job import QueryJob
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.query import ScalarQueryParameter
JOB = "job_name"
QUERY = "select count(*) from persons"
RESOURCE = {
"jobReference": {"projectId": self.PROJECT, "jobId": JOB},
"configuration": {
"query": {
"query": QUERY,
"useLegacySql": False,
"queryParameters": [
{
"name": "foo",
"parameterType": {"type": "INT64"},
"parameterValue": {"value": "123"},
}
],
}
},
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(RESOURCE)
query_parameters = [ScalarQueryParameter("foo", "INT64", 123)]
config = QueryJobConfig()
config.query_parameters = query_parameters
job = client.query(QUERY, job_config=config, job_id=JOB)
self.assertIsInstance(job, QueryJob)
self.assertIs(job._client, client)
self.assertEqual(job.job_id, JOB)
self.assertEqual(job.query, QUERY)
self.assertEqual(job.udf_resources, [])
self.assertEqual(job.query_parameters, query_parameters)
# Check that query actually starts the job.
conn.api_request.assert_called_once()
_, req = conn.api_request.call_args
self.assertEqual(req["method"], "POST")
self.assertEqual(req["path"], "/projects/PROJECT/jobs")
sent = req["data"]
self.assertEqual(sent["jobReference"]["jobId"], JOB)
sent_config = sent["configuration"]["query"]
self.assertEqual(sent_config["query"], QUERY)
self.assertFalse(sent_config["useLegacySql"])
self.assertEqual(
sent_config["queryParameters"][0],
{
"name": "foo",
"parameterType": {"type": "INT64"},
"parameterValue": {"value": "123"},
},
)
def test_insert_rows_wo_schema(self):
from google.cloud.bigquery.table import Table
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
table = Table(self.TABLE_REF)
ROWS = [
("Phred Phlyntstone", 32),
("Bharney Rhubble", 33),
("Wylma Phlyntstone", 29),
("Bhettye Rhubble", 27),
]
with self.assertRaises(ValueError) as exc:
client.insert_rows(table, ROWS)
self.assertIn("Could not determine schema for table", exc.exception.args[0])
def test_insert_rows_w_schema(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_rfc3339
from google.cloud._helpers import _microseconds_from_datetime
from google.cloud.bigquery.schema import SchemaField
WHEN_TS = 1437767599.006
WHEN = datetime.datetime.utcfromtimestamp(WHEN_TS).replace(tzinfo=UTC)
PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection({})
schema = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
SchemaField("joined", "TIMESTAMP", mode="NULLABLE"),
]
ROWS = [
("Phred Phlyntstone", 32, _datetime_to_rfc3339(WHEN)),
("Bharney Rhubble", 33, WHEN + datetime.timedelta(seconds=1)),
("Wylma Phlyntstone", 29, WHEN + datetime.timedelta(seconds=2)),
("Bhettye Rhubble", 27, None),
]
def _row_data(row):
joined = row[2]
if isinstance(row[2], datetime.datetime):
joined = _microseconds_from_datetime(joined) * 1e-6
return {"full_name": row[0], "age": str(row[1]), "joined": joined}
SENT = {
"rows": [
{"json": _row_data(row), "insertId": str(i)}
for i, row in enumerate(ROWS)
]
}
with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
# Test with using string IDs for the table.
errors = client.insert_rows(
"{}.{}".format(self.DS_ID, self.TABLE_ID), ROWS, selected_fields=schema
)
self.assertEqual(len(errors), 0)
conn.api_request.assert_called_once()
_, req = conn.api_request.call_args
self.assertEqual(req["method"], "POST")
self.assertEqual(req["path"], "/%s" % PATH)
self.assertEqual(req["data"], SENT)
def test_insert_rows_w_list_of_dictionaries(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud._helpers import _datetime_to_rfc3339
from google.cloud._helpers import _microseconds_from_datetime
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
WHEN_TS = 1437767599.006
WHEN = datetime.datetime.utcfromtimestamp(WHEN_TS).replace(tzinfo=UTC)
PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection({})
schema = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
SchemaField("joined", "TIMESTAMP", mode="NULLABLE"),
]
table = Table(self.TABLE_REF, schema=schema)
ROWS = [
{
"full_name": "Phred Phlyntstone",
"age": 32,
"joined": _datetime_to_rfc3339(WHEN),
},
{
"full_name": "Bharney Rhubble",
"age": 33,
"joined": WHEN + datetime.timedelta(seconds=1),
},
{
"full_name": "Wylma Phlyntstone",
"age": 29,
"joined": WHEN + datetime.timedelta(seconds=2),
},
{"full_name": "Bhettye Rhubble", "age": 27, "joined": None},
]
def _row_data(row):
joined = row["joined"]
if isinstance(joined, datetime.datetime):
row["joined"] = _microseconds_from_datetime(joined) * 1e-6
row["age"] = str(row["age"])
return row
SENT = {
"rows": [
{"json": _row_data(row), "insertId": str(i)}
for i, row in enumerate(ROWS)
]
}
with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
errors = client.insert_rows(table, ROWS)
self.assertEqual(len(errors), 0)
conn.api_request.assert_called_once_with(
method="POST", path="/%s" % PATH, data=SENT
)
def test_insert_rows_w_list_of_Rows(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
from google.cloud.bigquery.table import Row
PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection({})
schema = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
table = Table(self.TABLE_REF, schema=schema)
f2i = {"full_name": 0, "age": 1}
ROWS = [
Row(("Phred Phlyntstone", 32), f2i),
Row(("Bharney Rhubble", 33), f2i),
Row(("Wylma Phlyntstone", 29), f2i),
Row(("Bhettye Rhubble", 27), f2i),
]
def _row_data(row):
return {"full_name": row[0], "age": str(row[1])}
SENT = {
"rows": [
{"json": _row_data(row), "insertId": str(i)}
for i, row in enumerate(ROWS)
]
}
with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
errors = client.insert_rows(table, ROWS)
self.assertEqual(len(errors), 0)
conn.api_request.assert_called_once_with(
method="POST", path="/%s" % PATH, data=SENT
)
def test_insert_rows_w_skip_invalid_and_ignore_unknown(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
RESPONSE = {
"insertErrors": [
{
"index": 1,
"errors": [
{
"reason": "REASON",
"location": "LOCATION",
"debugInfo": "INFO",
"message": "MESSAGE",
}
],
}
]
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(RESPONSE)
schema = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
SchemaField("voter", "BOOLEAN", mode="NULLABLE"),
]
table = Table(self.TABLE_REF, schema=schema)
ROWS = [
("Phred Phlyntstone", 32, True),
("Bharney Rhubble", 33, False),
("Wylma Phlyntstone", 29, True),
("Bhettye Rhubble", 27, True),
]
def _row_data(row):
return {
"full_name": row[0],
"age": str(row[1]),
"voter": row[2] and "true" or "false",
}
SENT = {
"skipInvalidRows": True,
"ignoreUnknownValues": True,
"templateSuffix": "20160303",
"rows": [
{"insertId": index, "json": _row_data(row)}
for index, row in enumerate(ROWS)
],
}
errors = client.insert_rows(
table,
ROWS,
row_ids=[index for index, _ in enumerate(ROWS)],
skip_invalid_rows=True,
ignore_unknown_values=True,
template_suffix="20160303",
)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["index"], 1)
self.assertEqual(len(errors[0]["errors"]), 1)
self.assertEqual(
errors[0]["errors"][0], RESPONSE["insertErrors"][0]["errors"][0]
)
conn.api_request.assert_called_once_with(
method="POST", path="/%s" % PATH, data=SENT
)
def test_insert_rows_w_repeated_fields(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection({})
color = SchemaField("color", "STRING", mode="REPEATED")
items = SchemaField("items", "INTEGER", mode="REPEATED")
score = SchemaField("score", "INTEGER")
times = SchemaField("times", "TIMESTAMP", mode="REPEATED")
distances = SchemaField("distances", "FLOAT", mode="REPEATED")
structs = SchemaField(
"structs", "RECORD", mode="REPEATED", fields=[score, times, distances]
)
table = Table(self.TABLE_REF, schema=[color, items, structs])
ROWS = [
(
["red", "green"],
[1, 2],
[
(
12,
[
datetime.datetime(2018, 12, 1, 12, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2018, 12, 1, 13, 0, 0, tzinfo=pytz.utc),
],
[1.25, 2.5],
),
{
"score": 13,
"times": [
datetime.datetime(2018, 12, 2, 12, 0, 0, tzinfo=pytz.utc),
datetime.datetime(2018, 12, 2, 13, 0, 0, tzinfo=pytz.utc),
],
"distances": [-1.25, -2.5],
},
],
),
{"color": None, "items": [], "structs": [(None, [], [3.5])]},
]
SENT = {
"rows": [
{
"json": {
"color": ["red", "green"],
"items": ["1", "2"],
"structs": [
{
"score": "12",
"times": [
1543665600.0, # 2018-12-01 12:00 UTC
1543669200.0, # 2018-12-01 13:00 UTC
],
"distances": [1.25, 2.5],
},
{
"score": "13",
"times": [
1543752000.0, # 2018-12-02 12:00 UTC
1543755600.0, # 2018-12-02 13:00 UTC
],
"distances": [-1.25, -2.5],
},
],
},
"insertId": "0",
},
{
"json": {
"color": None,
"items": [],
"structs": [{"score": None, "times": [], "distances": [3.5]}],
},
"insertId": "1",
},
]
}
with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
errors = client.insert_rows(table, ROWS)
self.assertEqual(len(errors), 0)
conn.api_request.assert_called_once_with(
method="POST", path="/%s" % PATH, data=SENT
)
def test_insert_rows_w_record_schema(self):
from google.cloud.bigquery.schema import SchemaField
PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection({})
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
area_code = SchemaField("area_code", "STRING", "REQUIRED")
local_number = SchemaField("local_number", "STRING", "REQUIRED")
rank = SchemaField("rank", "INTEGER", "REQUIRED")
phone = SchemaField(
"phone", "RECORD", mode="NULLABLE", fields=[area_code, local_number, rank]
)
ROWS = [
(
"Phred Phlyntstone",
{"area_code": "800", "local_number": "555-1212", "rank": 1},
),
("Bharney Rhubble", ("877", "768-5309", 2)),
("Wylma Phlyntstone", None),
]
SENT = {
"rows": [
{
"json": {
"full_name": "Phred Phlyntstone",
"phone": {
"area_code": "800",
"local_number": "555-1212",
"rank": "1",
},
},
"insertId": "0",
},
{
"json": {
"full_name": "Bharney Rhubble",
"phone": {
"area_code": "877",
"local_number": "768-5309",
"rank": "2",
},
},
"insertId": "1",
},
{
"json": {"full_name": "Wylma Phlyntstone", "phone": None},
"insertId": "2",
},
]
}
with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
errors = client.insert_rows(
self.TABLE_REF, ROWS, selected_fields=[full_name, phone]
)
self.assertEqual(len(errors), 0)
conn.api_request.assert_called_once_with(
method="POST", path="/%s" % PATH, data=SENT
)
def test_insert_rows_w_explicit_none_insert_ids(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
PATH = "projects/{}/datasets/{}/tables/{}/insertAll".format(
self.PROJECT, self.DS_ID, self.TABLE_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection({})
schema = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
table = Table(self.TABLE_REF, schema=schema)
ROWS = [
{"full_name": "Phred Phlyntstone", "age": 32},
{"full_name": "Bharney Rhubble", "age": 33},
]
def _row_data(row):
row["age"] = str(row["age"])
return row
SENT = {"rows": [{"json": _row_data(row), "insertId": None} for row in ROWS]}
errors = client.insert_rows(table, ROWS, row_ids=[None] * len(ROWS))
self.assertEqual(len(errors), 0)
conn.api_request.assert_called_once_with(
method="POST", path="/{}".format(PATH), data=SENT
)
def test_insert_rows_errors(self):
from google.cloud.bigquery.table import Table
ROWS = [
("Phred Phlyntstone", 32, True),
("Bharney Rhubble", 33, False),
("Wylma Phlyntstone", 29, True),
("Bhettye Rhubble", 27, True),
]
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
# table ref with no selected fields
with self.assertRaises(ValueError):
client.insert_rows(self.TABLE_REF, ROWS)
# table with no schema
with self.assertRaises(ValueError):
client.insert_rows(Table(self.TABLE_REF), ROWS)
# neither Table nor tableReference
with self.assertRaises(TypeError):
client.insert_rows(1, ROWS)
def test_insert_rows_w_numeric(self):
from google.cloud.bigquery import table
from google.cloud.bigquery.schema import SchemaField
project = "PROJECT"
ds_id = "DS_ID"
table_id = "TABLE_ID"
creds = _make_credentials()
http = object()
client = self._make_one(project=project, credentials=creds, _http=http)
conn = client._connection = make_connection({})
table_ref = DatasetReference(project, ds_id).table(table_id)
schema = [SchemaField("account", "STRING"), SchemaField("balance", "NUMERIC")]
insert_table = table.Table(table_ref, schema=schema)
rows = [
("Savings", decimal.Decimal("23.47")),
("Checking", decimal.Decimal("1.98")),
("Mortgage", decimal.Decimal("-12345678909.87654321")),
]
with mock.patch("uuid.uuid4", side_effect=map(str, range(len(rows)))):
errors = client.insert_rows(insert_table, rows)
self.assertEqual(len(errors), 0)
rows_json = [
{"account": "Savings", "balance": "23.47"},
{"account": "Checking", "balance": "1.98"},
{"account": "Mortgage", "balance": "-12345678909.87654321"},
]
sent = {
"rows": [
{"json": row, "insertId": str(i)} for i, row in enumerate(rows_json)
]
}
conn.api_request.assert_called_once_with(
method="POST",
path="/projects/{}/datasets/{}/tables/{}/insertAll".format(
project, ds_id, table_id
),
data=sent,
)
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_insert_rows_from_dataframe(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
API_PATH = "/projects/{}/datasets/{}/tables/{}/insertAll".format(
self.PROJECT, self.DS_ID, self.TABLE_REF.table_id
)
dataframe = pandas.DataFrame(
[
{"name": u"Little One", "age": 10, "adult": False},
{"name": u"Young Gun", "age": 20, "adult": True},
{"name": u"Dad", "age": 30, "adult": True},
{"name": u"Stranger", "age": 40, "adult": True},
]
)
# create client
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection({}, {})
# create table
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
SchemaField("adult", "BOOLEAN", mode="REQUIRED"),
]
table = Table(self.TABLE_REF, schema=schema)
with mock.patch("uuid.uuid4", side_effect=map(str, range(len(dataframe)))):
error_info = client.insert_rows_from_dataframe(
table, dataframe, chunk_size=3
)
self.assertEqual(len(error_info), 2)
for chunk_errors in error_info:
assert chunk_errors == []
EXPECTED_SENT_DATA = [
{
"rows": [
{
"insertId": "0",
"json": {"name": "Little One", "age": "10", "adult": "false"},
},
{
"insertId": "1",
"json": {"name": "Young Gun", "age": "20", "adult": "true"},
},
{
"insertId": "2",
"json": {"name": "Dad", "age": "30", "adult": "true"},
},
]
},
{
"rows": [
{
"insertId": "3",
"json": {"name": "Stranger", "age": "40", "adult": "true"},
}
]
},
]
actual_calls = conn.api_request.call_args_list
for call, expected_data in six.moves.zip_longest(
actual_calls, EXPECTED_SENT_DATA
):
expected_call = mock.call(method="POST", path=API_PATH, data=expected_data)
assert call == expected_call
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_insert_rows_from_dataframe_many_columns(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
API_PATH = "/projects/{}/datasets/{}/tables/{}/insertAll".format(
self.PROJECT, self.DS_ID, self.TABLE_REF.table_id
)
N_COLUMNS = 256 # should be >= 256
dataframe = pandas.DataFrame(
[{"foo_{}".format(i): "bar_{}".format(i) for i in range(N_COLUMNS)}]
)
# create client
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection({}, {})
# create table
schema = [SchemaField("foo_{}".format(i), "STRING") for i in range(N_COLUMNS)]
table = Table(self.TABLE_REF, schema=schema)
with mock.patch("uuid.uuid4", side_effect=map(str, range(len(dataframe)))):
error_info = client.insert_rows_from_dataframe(
table, dataframe, chunk_size=3
)
assert len(error_info) == 1
assert error_info[0] == []
EXPECTED_SENT_DATA = {
"rows": [
{
"insertId": "0",
"json": {
"foo_{}".format(i): "bar_{}".format(i) for i in range(N_COLUMNS)
},
}
]
}
expected_call = mock.call(method="POST", path=API_PATH, data=EXPECTED_SENT_DATA)
actual_calls = conn.api_request.call_args_list
assert len(actual_calls) == 1
assert actual_calls[0] == expected_call
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_insert_rows_from_dataframe_w_explicit_none_insert_ids(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
API_PATH = "/projects/{}/datasets/{}/tables/{}/insertAll".format(
self.PROJECT, self.DS_ID, self.TABLE_REF.table_id
)
dataframe = pandas.DataFrame(
[
{"name": u"Little One", "adult": False},
{"name": u"Young Gun", "adult": True},
]
)
# create client
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection({}, {})
# create table
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("adult", "BOOLEAN", mode="REQUIRED"),
]
table = Table(self.TABLE_REF, schema=schema)
error_info = client.insert_rows_from_dataframe(
table, dataframe, row_ids=[None] * len(dataframe)
)
self.assertEqual(len(error_info), 1)
assert error_info[0] == [] # no chunk errors
EXPECTED_SENT_DATA = {
"rows": [
{"insertId": None, "json": {"name": "Little One", "adult": "false"}},
{"insertId": None, "json": {"name": "Young Gun", "adult": "true"}},
]
}
actual_calls = conn.api_request.call_args_list
assert len(actual_calls) == 1
assert actual_calls[0] == mock.call(
method="POST", path=API_PATH, data=EXPECTED_SENT_DATA
)
def test_insert_rows_json(self):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
PROJECT = "PROJECT"
DS_ID = "DS_ID"
TABLE_ID = "TABLE_ID"
PATH = "projects/%s/datasets/%s/tables/%s/insertAll" % (
PROJECT,
DS_ID,
TABLE_ID,
)
creds = _make_credentials()
http = object()
client = self._make_one(project=PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection({})
table_ref = DatasetReference(PROJECT, DS_ID).table(TABLE_ID)
schema = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
SchemaField("joined", "TIMESTAMP", mode="NULLABLE"),
]
table = Table(table_ref, schema=schema)
ROWS = [
{
"full_name": "Phred Phlyntstone",
"age": "32",
"joined": "2015-07-24T19:53:19.006000Z",
},
{"full_name": "Bharney Rhubble", "age": "33", "joined": 1437767600.006},
{"full_name": "Wylma Phlyntstone", "age": "29", "joined": 1437767601.006},
{"full_name": "Bhettye Rhubble", "age": "27", "joined": None},
]
SENT = {
"rows": [{"json": row, "insertId": str(i)} for i, row in enumerate(ROWS)]
}
with mock.patch("uuid.uuid4", side_effect=map(str, range(len(ROWS)))):
errors = client.insert_rows_json(table, ROWS)
self.assertEqual(len(errors), 0)
conn.api_request.assert_called_once_with(
method="POST", path="/%s" % PATH, data=SENT
)
def test_insert_rows_json_with_string_id(self):
rows = [{"col1": "val1"}]
creds = _make_credentials()
http = object()
client = self._make_one(
project="default-project", credentials=creds, _http=http
)
conn = client._connection = make_connection({})
with mock.patch("uuid.uuid4", side_effect=map(str, range(len(rows)))):
errors = client.insert_rows_json("proj.dset.tbl", rows)
self.assertEqual(len(errors), 0)
expected = {
"rows": [{"json": row, "insertId": str(i)} for i, row in enumerate(rows)]
}
conn.api_request.assert_called_once_with(
method="POST",
path="/projects/proj/datasets/dset/tables/tbl/insertAll",
data=expected,
)
def test_insert_rows_json_w_explicit_none_insert_ids(self):
rows = [{"col1": "val1"}, {"col2": "val2"}]
creds = _make_credentials()
http = object()
client = self._make_one(
project="default-project", credentials=creds, _http=http
)
conn = client._connection = make_connection({})
errors = client.insert_rows_json(
"proj.dset.tbl", rows, row_ids=[None] * len(rows),
)
self.assertEqual(len(errors), 0)
expected = {"rows": [{"json": row, "insertId": None} for row in rows]}
conn.api_request.assert_called_once_with(
method="POST",
path="/projects/proj/datasets/dset/tables/tbl/insertAll",
data=expected,
)
def test_list_partitions(self):
from google.cloud.bigquery.table import Table
rows = 3
meta_info = _make_list_partitons_meta_info(
self.PROJECT, self.DS_ID, self.TABLE_ID, rows
)
data = {
"totalRows": str(rows),
"rows": [
{"f": [{"v": "20180101"}]},
{"f": [{"v": "20180102"}]},
{"f": [{"v": "20180103"}]},
],
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
client._connection = make_connection(meta_info, data)
table = Table(self.TABLE_REF)
partition_list = client.list_partitions(table)
self.assertEqual(len(partition_list), rows)
self.assertIn("20180102", partition_list)
def test_list_partitions_with_string_id(self):
meta_info = _make_list_partitons_meta_info(
self.PROJECT, self.DS_ID, self.TABLE_ID, 0
)
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
client._connection = make_connection(meta_info, {})
partition_list = client.list_partitions(
"{}.{}".format(self.DS_ID, self.TABLE_ID)
)
self.assertEqual(len(partition_list), 0)
def test_list_rows(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
from google.cloud.bigquery.table import Row
PATH = "projects/%s/datasets/%s/tables/%s/data" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
WHEN_TS = 1437767599.006
WHEN = datetime.datetime.utcfromtimestamp(WHEN_TS).replace(tzinfo=UTC)
WHEN_1 = WHEN + datetime.timedelta(seconds=1)
WHEN_2 = WHEN + datetime.timedelta(seconds=2)
ROWS = 1234
TOKEN = "TOKEN"
def _bigquery_timestamp_float_repr(ts_float):
# Preserve microsecond precision for E+09 timestamps
return "%0.15E" % (ts_float,)
DATA = {
"totalRows": str(ROWS),
"pageToken": TOKEN,
"rows": [
{
"f": [
{"v": "Phred Phlyntstone"},
{"v": "32"},
{"v": _bigquery_timestamp_float_repr(WHEN_TS)},
]
},
{
"f": [
{"v": "Bharney Rhubble"},
{"v": "33"},
{"v": _bigquery_timestamp_float_repr(WHEN_TS + 1)},
]
},
{
"f": [
{"v": "Wylma Phlyntstone"},
{"v": "29"},
{"v": _bigquery_timestamp_float_repr(WHEN_TS + 2)},
]
},
{"f": [{"v": "Bhettye Rhubble"}, {"v": None}, {"v": None}]},
],
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(DATA, DATA)
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
age = SchemaField("age", "INTEGER", mode="NULLABLE")
joined = SchemaField("joined", "TIMESTAMP", mode="NULLABLE")
table = Table(self.TABLE_REF, schema=[full_name, age, joined])
iterator = client.list_rows(table)
page = six.next(iterator.pages)
rows = list(page)
total_rows = iterator.total_rows
page_token = iterator.next_page_token
f2i = {"full_name": 0, "age": 1, "joined": 2}
self.assertEqual(len(rows), 4)
self.assertEqual(rows[0], Row(("Phred Phlyntstone", 32, WHEN), f2i))
self.assertEqual(rows[1], Row(("Bharney Rhubble", 33, WHEN_1), f2i))
self.assertEqual(rows[2], Row(("Wylma Phlyntstone", 29, WHEN_2), f2i))
self.assertEqual(rows[3], Row(("Bhettye Rhubble", None, None), f2i))
self.assertEqual(total_rows, ROWS)
self.assertEqual(page_token, TOKEN)
conn.api_request.assert_called_once_with(
method="GET", path="/%s" % PATH, query_params={}
)
def test_list_rows_empty_table(self):
response = {"totalRows": "0", "rows": []}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
client._connection = make_connection(response, response)
# Table that has no schema because it's an empty table.
rows = client.list_rows(
# Test with using a string for the table ID.
"{}.{}.{}".format(
self.TABLE_REF.project,
self.TABLE_REF.dataset_id,
self.TABLE_REF.table_id,
),
selected_fields=[],
)
# When a table reference / string and selected_fields is provided,
# total_rows can't be populated until iteration starts.
self.assertIsNone(rows.total_rows)
self.assertEqual(tuple(rows), ())
self.assertEqual(rows.total_rows, 0)
def test_list_rows_query_params(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
table = Table(
self.TABLE_REF, schema=[SchemaField("age", "INTEGER", mode="NULLABLE")]
)
tests = [
({}, {}),
({"start_index": 1}, {"startIndex": 1}),
({"max_results": 2}, {"maxResults": 2}),
({"start_index": 1, "max_results": 2}, {"startIndex": 1, "maxResults": 2}),
]
conn = client._connection = make_connection(*len(tests) * [{}])
for i, test in enumerate(tests):
iterator = client.list_rows(table, **test[0])
six.next(iterator.pages)
req = conn.api_request.call_args_list[i]
self.assertEqual(req[1]["query_params"], test[1], "for kwargs %s" % test[0])
def test_list_rows_repeated_fields(self):
from google.cloud.bigquery.schema import SchemaField
PATH = "projects/%s/datasets/%s/tables/%s/data" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
ROWS = 1234
TOKEN = "TOKEN"
DATA = {
"totalRows": ROWS,
"pageToken": TOKEN,
"rows": [
{
"f": [
{"v": [{"v": "red"}, {"v": "green"}]},
{
"v": [
{
"v": {
"f": [
{"v": [{"v": "1"}, {"v": "2"}]},
{"v": [{"v": "3.1415"}, {"v": "1.414"}]},
]
}
}
]
},
]
}
],
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(DATA)
color = SchemaField("color", "STRING", mode="REPEATED")
index = SchemaField("index", "INTEGER", "REPEATED")
score = SchemaField("score", "FLOAT", "REPEATED")
struct = SchemaField("struct", "RECORD", mode="REPEATED", fields=[index, score])
iterator = client.list_rows(self.TABLE_REF, selected_fields=[color, struct])
page = six.next(iterator.pages)
rows = list(page)
total_rows = iterator.total_rows
page_token = iterator.next_page_token
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], ["red", "green"])
self.assertEqual(rows[0][1], [{"index": [1, 2], "score": [3.1415, 1.414]}])
self.assertEqual(total_rows, ROWS)
self.assertEqual(page_token, TOKEN)
conn.api_request.assert_called_once_with(
method="GET",
path="/%s" % PATH,
query_params={"selectedFields": "color,struct"},
)
def test_list_rows_w_record_schema(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
PATH = "projects/%s/datasets/%s/tables/%s/data" % (
self.PROJECT,
self.DS_ID,
self.TABLE_ID,
)
ROWS = 1234
TOKEN = "TOKEN"
DATA = {
"totalRows": ROWS,
"pageToken": TOKEN,
"rows": [
{
"f": [
{"v": "Phred Phlyntstone"},
{"v": {"f": [{"v": "800"}, {"v": "555-1212"}, {"v": 1}]}},
]
},
{
"f": [
{"v": "Bharney Rhubble"},
{"v": {"f": [{"v": "877"}, {"v": "768-5309"}, {"v": 2}]}},
]
},
{"f": [{"v": "Wylma Phlyntstone"}, {"v": None}]},
],
}
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(DATA)
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
area_code = SchemaField("area_code", "STRING", "REQUIRED")
local_number = SchemaField("local_number", "STRING", "REQUIRED")
rank = SchemaField("rank", "INTEGER", "REQUIRED")
phone = SchemaField(
"phone", "RECORD", mode="NULLABLE", fields=[area_code, local_number, rank]
)
table = Table(self.TABLE_REF, schema=[full_name, phone])
iterator = client.list_rows(table)
page = six.next(iterator.pages)
rows = list(page)
total_rows = iterator.total_rows
page_token = iterator.next_page_token
self.assertEqual(len(rows), 3)
self.assertEqual(rows[0][0], "Phred Phlyntstone")
self.assertEqual(
rows[0][1], {"area_code": "800", "local_number": "555-1212", "rank": 1}
)
self.assertEqual(rows[1][0], "Bharney Rhubble")
self.assertEqual(
rows[1][1], {"area_code": "877", "local_number": "768-5309", "rank": 2}
)
self.assertEqual(rows[2][0], "Wylma Phlyntstone")
self.assertIsNone(rows[2][1])
self.assertEqual(total_rows, ROWS)
self.assertEqual(page_token, TOKEN)
conn.api_request.assert_called_once_with(
method="GET", path="/%s" % PATH, query_params={}
)
def test_list_rows_with_missing_schema(self):
from google.cloud.bigquery.table import Table, TableListItem
table_path = "/projects/{}/datasets/{}/tables/{}".format(
self.PROJECT, self.DS_ID, self.TABLE_ID
)
tabledata_path = "{}/data".format(table_path)
table_list_item_data = {
"id": "%s:%s:%s" % (self.PROJECT, self.DS_ID, self.TABLE_ID),
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_ID,
},
}
table_data = copy.deepcopy(table_list_item_data)
# Intentionally make wrong, since total_rows can update during iteration.
table_data["numRows"] = 2
table_data["schema"] = {
"fields": [
{"name": "name", "type": "STRING"},
{"name": "age", "type": "INTEGER"},
]
}
rows_data = {
"totalRows": 3,
"pageToken": None,
"rows": [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "31"}]},
{"f": [{"v": "Wylma Phlyntstone"}, {"v": None}]},
],
}
creds = _make_credentials()
http = object()
schemaless_tables = (
"{}.{}".format(self.DS_ID, self.TABLE_ID),
self.TABLE_REF,
Table(self.TABLE_REF),
TableListItem(table_list_item_data),
)
for table in schemaless_tables:
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
conn = client._connection = make_connection(table_data, rows_data)
row_iter = client.list_rows(table)
conn.api_request.assert_called_once_with(method="GET", path=table_path)
conn.api_request.reset_mock()
self.assertEqual(row_iter.total_rows, 2, msg=repr(table))
rows = list(row_iter)
conn.api_request.assert_called_once_with(
method="GET", path=tabledata_path, query_params={}
)
self.assertEqual(row_iter.total_rows, 3, msg=repr(table))
self.assertEqual(rows[0].name, "Phred Phlyntstone", msg=repr(table))
self.assertEqual(rows[1].age, 31, msg=repr(table))
self.assertIsNone(rows[2].age, msg=repr(table))
def test_list_rows_error(self):
creds = _make_credentials()
http = object()
client = self._make_one(project=self.PROJECT, credentials=creds, _http=http)
# neither Table nor tableReference
with self.assertRaises(TypeError):
client.list_rows(1)
class Test_make_job_id(unittest.TestCase):
def _call_fut(self, job_id, prefix=None):
from google.cloud.bigquery.client import _make_job_id
return _make_job_id(job_id, prefix=prefix)
def test__make_job_id_wo_suffix(self):
job_id = self._call_fut("job_id")
self.assertEqual(job_id, "job_id")
def test__make_job_id_w_suffix(self):
with mock.patch("uuid.uuid4", side_effect=["212345"]):
job_id = self._call_fut(None, prefix="job_id")
self.assertEqual(job_id, "job_id212345")
def test__make_random_job_id(self):
with mock.patch("uuid.uuid4", side_effect=["212345"]):
job_id = self._call_fut(None)
self.assertEqual(job_id, "212345")
def test__make_job_id_w_job_id_overrides_prefix(self):
job_id = self._call_fut("job_id", prefix="unused_prefix")
self.assertEqual(job_id, "job_id")
class TestClientUpload(object):
# NOTE: This is a "partner" to `TestClient` meant to test some of the
# "load_table_from_file" portions of `Client`. It also uses
# `pytest`-style tests rather than `unittest`-style.
from google.cloud.bigquery.job import SourceFormat
TABLE_REF = DatasetReference("project_id", "test_dataset").table("test_table")
LOCATION = "us-central"
@staticmethod
def _make_client(transport=None, location=None):
from google.cloud.bigquery import _http
from google.cloud.bigquery import client
cl = client.Client(
project="project_id",
credentials=_make_credentials(),
_http=transport,
location=location,
)
cl._connection = mock.create_autospec(_http.Connection, instance=True)
return cl
@staticmethod
def _make_response(status_code, content="", headers={}):
"""Make a mock HTTP response."""
import requests
response = requests.Response()
response.request = requests.Request("POST", "http://example.com").prepare()
response._content = content.encode("utf-8")
response.headers.update(headers)
response.status_code = status_code
return response
@classmethod
def _make_do_upload_patch(cls, client, method, resource={}, side_effect=None):
"""Patches the low-level upload helpers."""
if side_effect is None:
side_effect = [
cls._make_response(
http_client.OK,
json.dumps(resource),
{"Content-Type": "application/json"},
)
]
return mock.patch.object(client, method, side_effect=side_effect, autospec=True)
EXPECTED_CONFIGURATION = {
"jobReference": {"projectId": "project_id", "jobId": "job_id"},
"configuration": {
"load": {
"sourceFormat": SourceFormat.CSV,
"destinationTable": {
"projectId": "project_id",
"datasetId": "test_dataset",
"tableId": "test_table",
},
}
},
}
@staticmethod
def _make_file_obj():
return io.BytesIO(b"hello, is it me you're looking for?")
def _make_gzip_file_obj(self, writable):
if writable:
return gzip.GzipFile(mode="w", fileobj=io.BytesIO())
else:
return gzip.GzipFile(mode="r", fileobj=self._make_file_obj())
@staticmethod
def _make_config():
from google.cloud.bigquery.job import LoadJobConfig
from google.cloud.bigquery.job import SourceFormat
config = LoadJobConfig()
config.source_format = SourceFormat.CSV
return config
# High-level tests
def test_load_table_from_file_resumable(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
client = self._make_client()
file_obj = self._make_file_obj()
do_upload_patch = self._make_do_upload_patch(
client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
)
with do_upload_patch as do_upload:
client.load_table_from_file(
file_obj,
self.TABLE_REF,
job_id="job_id",
job_config=self._make_config(),
)
do_upload.assert_called_once_with(
file_obj, self.EXPECTED_CONFIGURATION, _DEFAULT_NUM_RETRIES
)
def test_load_table_from_file_w_explicit_project(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
client = self._make_client()
file_obj = self._make_file_obj()
do_upload_patch = self._make_do_upload_patch(
client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
)
with do_upload_patch as do_upload:
client.load_table_from_file(
file_obj,
self.TABLE_REF,
job_id="job_id",
project="other-project",
location=self.LOCATION,
job_config=self._make_config(),
)
expected_resource = copy.deepcopy(self.EXPECTED_CONFIGURATION)
expected_resource["jobReference"]["location"] = self.LOCATION
expected_resource["jobReference"]["projectId"] = "other-project"
do_upload.assert_called_once_with(
file_obj, expected_resource, _DEFAULT_NUM_RETRIES
)
def test_load_table_from_file_w_client_location(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
client = self._make_client(location=self.LOCATION)
file_obj = self._make_file_obj()
do_upload_patch = self._make_do_upload_patch(
client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
)
with do_upload_patch as do_upload:
client.load_table_from_file(
file_obj,
# Test with string for table ID.
"{}.{}.{}".format(
self.TABLE_REF.project,
self.TABLE_REF.dataset_id,
self.TABLE_REF.table_id,
),
job_id="job_id",
project="other-project",
job_config=self._make_config(),
)
expected_resource = copy.deepcopy(self.EXPECTED_CONFIGURATION)
expected_resource["jobReference"]["location"] = self.LOCATION
expected_resource["jobReference"]["projectId"] = "other-project"
do_upload.assert_called_once_with(
file_obj, expected_resource, _DEFAULT_NUM_RETRIES
)
def test_load_table_from_file_resumable_metadata(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import WriteDisposition
client = self._make_client()
file_obj = self._make_file_obj()
config = self._make_config()
config.allow_jagged_rows = False
config.allow_quoted_newlines = False
config.create_disposition = CreateDisposition.CREATE_IF_NEEDED
config.encoding = "utf8"
config.field_delimiter = ","
config.ignore_unknown_values = False
config.max_bad_records = 0
config.quote_character = '"'
config.skip_leading_rows = 1
config.write_disposition = WriteDisposition.WRITE_APPEND
config.null_marker = r"\N"
expected_config = {
"jobReference": {"projectId": "project_id", "jobId": "job_id"},
"configuration": {
"load": {
"destinationTable": {
"projectId": self.TABLE_REF.project,
"datasetId": self.TABLE_REF.dataset_id,
"tableId": self.TABLE_REF.table_id,
},
"sourceFormat": config.source_format,
"allowJaggedRows": config.allow_jagged_rows,
"allowQuotedNewlines": config.allow_quoted_newlines,
"createDisposition": config.create_disposition,
"encoding": config.encoding,
"fieldDelimiter": config.field_delimiter,
"ignoreUnknownValues": config.ignore_unknown_values,
"maxBadRecords": config.max_bad_records,
"quote": config.quote_character,
"skipLeadingRows": str(config.skip_leading_rows),
"writeDisposition": config.write_disposition,
"nullMarker": config.null_marker,
}
},
}
do_upload_patch = self._make_do_upload_patch(
client, "_do_resumable_upload", expected_config
)
with do_upload_patch as do_upload:
client.load_table_from_file(
file_obj, self.TABLE_REF, job_id="job_id", job_config=config
)
do_upload.assert_called_once_with(
file_obj, expected_config, _DEFAULT_NUM_RETRIES
)
def test_load_table_from_file_multipart(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
client = self._make_client()
file_obj = self._make_file_obj()
file_obj_size = 10
config = self._make_config()
do_upload_patch = self._make_do_upload_patch(
client, "_do_multipart_upload", self.EXPECTED_CONFIGURATION
)
with do_upload_patch as do_upload:
client.load_table_from_file(
file_obj,
self.TABLE_REF,
job_id="job_id",
job_config=config,
size=file_obj_size,
)
do_upload.assert_called_once_with(
file_obj, self.EXPECTED_CONFIGURATION, file_obj_size, _DEFAULT_NUM_RETRIES
)
def test_load_table_from_file_with_retries(self):
client = self._make_client()
file_obj = self._make_file_obj()
num_retries = 20
do_upload_patch = self._make_do_upload_patch(
client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
)
with do_upload_patch as do_upload:
client.load_table_from_file(
file_obj,
self.TABLE_REF,
num_retries=num_retries,
job_id="job_id",
job_config=self._make_config(),
)
do_upload.assert_called_once_with(
file_obj, self.EXPECTED_CONFIGURATION, num_retries
)
def test_load_table_from_file_with_rewind(self):
client = self._make_client()
file_obj = self._make_file_obj()
file_obj.seek(2)
with self._make_do_upload_patch(
client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
):
client.load_table_from_file(file_obj, self.TABLE_REF, rewind=True)
assert file_obj.tell() == 0
def test_load_table_from_file_with_readable_gzip(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
client = self._make_client()
gzip_file = self._make_gzip_file_obj(writable=False)
do_upload_patch = self._make_do_upload_patch(
client, "_do_resumable_upload", self.EXPECTED_CONFIGURATION
)
with do_upload_patch as do_upload:
client.load_table_from_file(
gzip_file,
self.TABLE_REF,
job_id="job_id",
job_config=self._make_config(),
)
do_upload.assert_called_once_with(
gzip_file, self.EXPECTED_CONFIGURATION, _DEFAULT_NUM_RETRIES
)
def test_load_table_from_file_with_writable_gzip(self):
client = self._make_client()
gzip_file = self._make_gzip_file_obj(writable=True)
with pytest.raises(ValueError):
client.load_table_from_file(
gzip_file,
self.TABLE_REF,
job_id="job_id",
job_config=self._make_config(),
)
def test_load_table_from_file_failure(self):
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
client = self._make_client()
file_obj = self._make_file_obj()
response = self._make_response(
content="Someone is already in this spot.", status_code=http_client.CONFLICT
)
do_upload_patch = self._make_do_upload_patch(
client, "_do_resumable_upload", side_effect=InvalidResponse(response)
)
with do_upload_patch, pytest.raises(exceptions.Conflict) as exc_info:
client.load_table_from_file(file_obj, self.TABLE_REF, rewind=True)
assert response.text in exc_info.value.message
assert exc_info.value.errors == []
def test_load_table_from_file_bad_mode(self):
client = self._make_client()
file_obj = mock.Mock(spec=["mode"])
file_obj.mode = "x"
with pytest.raises(ValueError):
client.load_table_from_file(file_obj, self.TABLE_REF)
def test_load_table_from_file_w_invalid_job_config(self):
from google.cloud.bigquery import job
client = self._make_client()
gzip_file = self._make_gzip_file_obj(writable=True)
config = job.QueryJobConfig()
with pytest.raises(TypeError) as exc:
client.load_table_from_file(
gzip_file, self.TABLE_REF, job_id="job_id", job_config=config
)
err_msg = str(exc.value)
assert "Expected an instance of LoadJobConfig" in err_msg
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery import job
from google.cloud.bigquery.schema import SchemaField
client = self._make_client()
records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
dataframe = pandas.DataFrame(records)
get_table_patch = mock.patch(
"google.cloud.bigquery.client.Client.get_table",
autospec=True,
return_value=mock.Mock(
schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
),
)
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
with load_patch as load_table_from_file, get_table_patch:
client.load_table_from_dataframe(dataframe, self.TABLE_REF)
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
rewind=True,
job_id=mock.ANY,
job_id_prefix=None,
location=None,
project=None,
job_config=mock.ANY,
)
sent_file = load_table_from_file.mock_calls[0][1][1]
assert sent_file.closed
sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
assert sent_config.source_format == job.SourceFormat.PARQUET
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_client_location(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery import job
from google.cloud.bigquery.schema import SchemaField
client = self._make_client(location=self.LOCATION)
records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
dataframe = pandas.DataFrame(records)
get_table_patch = mock.patch(
"google.cloud.bigquery.client.Client.get_table",
autospec=True,
return_value=mock.Mock(
schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
),
)
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
with load_patch as load_table_from_file, get_table_patch:
client.load_table_from_dataframe(dataframe, self.TABLE_REF)
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
rewind=True,
job_id=mock.ANY,
job_id_prefix=None,
location=self.LOCATION,
project=None,
job_config=mock.ANY,
)
sent_file = load_table_from_file.mock_calls[0][1][1]
assert sent_file.closed
sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
assert sent_config.source_format == job.SourceFormat.PARQUET
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_custom_job_config(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery import job
from google.cloud.bigquery.schema import SchemaField
client = self._make_client()
records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
dataframe = pandas.DataFrame(records)
job_config = job.LoadJobConfig(
write_disposition=job.WriteDisposition.WRITE_TRUNCATE
)
get_table_patch = mock.patch(
"google.cloud.bigquery.client.Client.get_table",
autospec=True,
return_value=mock.Mock(
schema=[SchemaField("id", "INTEGER"), SchemaField("age", "INTEGER")]
),
)
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
with load_patch as load_table_from_file, get_table_patch as get_table:
client.load_table_from_dataframe(
dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
)
# no need to fetch and inspect table schema for WRITE_TRUNCATE jobs
assert not get_table.called
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
rewind=True,
job_id=mock.ANY,
job_id_prefix=None,
location=self.LOCATION,
project=None,
job_config=mock.ANY,
)
sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
assert sent_config.source_format == job.SourceFormat.PARQUET
assert sent_config.write_disposition == job.WriteDisposition.WRITE_TRUNCATE
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_automatic_schema(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery import job
from google.cloud.bigquery.schema import SchemaField
client = self._make_client()
df_data = collections.OrderedDict(
[
("int_col", [1, 2, 3]),
("float_col", [1.0, 2.0, 3.0]),
("bool_col", [True, False, True]),
(
"dt_col",
pandas.Series(
[
datetime.datetime(2010, 1, 2, 3, 44, 50),
datetime.datetime(2011, 2, 3, 14, 50, 59),
datetime.datetime(2012, 3, 14, 15, 16),
],
dtype="datetime64[ns]",
),
),
(
"ts_col",
pandas.Series(
[
datetime.datetime(2010, 1, 2, 3, 44, 50),
datetime.datetime(2011, 2, 3, 14, 50, 59),
datetime.datetime(2012, 3, 14, 15, 16),
],
dtype="datetime64[ns]",
).dt.tz_localize(pytz.utc),
),
]
)
dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
get_table_patch = mock.patch(
"google.cloud.bigquery.client.Client.get_table",
autospec=True,
side_effect=google.api_core.exceptions.NotFound("Table not found"),
)
with load_patch as load_table_from_file, get_table_patch:
client.load_table_from_dataframe(
dataframe, self.TABLE_REF, location=self.LOCATION
)
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
rewind=True,
job_id=mock.ANY,
job_id_prefix=None,
location=self.LOCATION,
project=None,
job_config=mock.ANY,
)
sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
assert sent_config.source_format == job.SourceFormat.PARQUET
assert tuple(sent_config.schema) == (
SchemaField("int_col", "INTEGER"),
SchemaField("float_col", "FLOAT"),
SchemaField("bool_col", "BOOLEAN"),
SchemaField("dt_col", "DATETIME"),
SchemaField("ts_col", "TIMESTAMP"),
)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_index_and_auto_schema(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery import job
from google.cloud.bigquery.schema import SchemaField
client = self._make_client()
df_data = collections.OrderedDict(
[("int_col", [10, 20, 30]), ("float_col", [1.0, 2.0, 3.0])]
)
dataframe = pandas.DataFrame(
df_data,
index=pandas.Index(name="unique_name", data=["one", "two", "three"]),
)
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
get_table_patch = mock.patch(
"google.cloud.bigquery.client.Client.get_table",
autospec=True,
return_value=mock.Mock(
schema=[
SchemaField("int_col", "INTEGER"),
SchemaField("float_col", "FLOAT"),
SchemaField("unique_name", "STRING"),
]
),
)
with load_patch as load_table_from_file, get_table_patch:
client.load_table_from_dataframe(
dataframe, self.TABLE_REF, location=self.LOCATION
)
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
rewind=True,
job_id=mock.ANY,
job_id_prefix=None,
location=self.LOCATION,
project=None,
job_config=mock.ANY,
)
sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
assert sent_config.source_format == job.SourceFormat.PARQUET
sent_schema = sorted(sent_config.schema, key=operator.attrgetter("name"))
expected_sent_schema = [
SchemaField("float_col", "FLOAT"),
SchemaField("int_col", "INTEGER"),
SchemaField("unique_name", "STRING"),
]
assert sent_schema == expected_sent_schema
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_unknown_table(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
client = self._make_client()
records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
dataframe = pandas.DataFrame(records)
get_table_patch = mock.patch(
"google.cloud.bigquery.client.Client.get_table",
autospec=True,
side_effect=google.api_core.exceptions.NotFound("Table not found"),
)
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
with load_patch as load_table_from_file, get_table_patch:
# there should be no error
client.load_table_from_dataframe(dataframe, self.TABLE_REF)
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
rewind=True,
job_id=mock.ANY,
job_id_prefix=None,
location=None,
project=None,
job_config=mock.ANY,
)
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_load_table_from_dataframe_no_schema_warning_wo_pyarrow(self):
client = self._make_client()
# Pick at least one column type that translates to Pandas dtype
# "object". A string column matches that.
records = [{"name": "Monty", "age": 100}, {"name": "Python", "age": 60}]
dataframe = pandas.DataFrame(records)
get_table_patch = mock.patch(
"google.cloud.bigquery.client.Client.get_table",
autospec=True,
side_effect=google.api_core.exceptions.NotFound("Table not found"),
)
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
pyarrow_patch = mock.patch("google.cloud.bigquery.client.pyarrow", None)
pyarrow_patch_helpers = mock.patch(
"google.cloud.bigquery._pandas_helpers.pyarrow", None
)
catch_warnings = warnings.catch_warnings(record=True)
with get_table_patch, load_patch, pyarrow_patch, pyarrow_patch_helpers, catch_warnings as warned:
client.load_table_from_dataframe(
dataframe, self.TABLE_REF, location=self.LOCATION
)
matches = [
warning
for warning in warned
if warning.category in (DeprecationWarning, PendingDeprecationWarning)
and "could not be detected" in str(warning)
and "please provide a schema" in str(warning)
]
assert matches, "A missing schema deprecation warning was not raised."
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_struct_fields_error(self):
from google.cloud.bigquery import job
from google.cloud.bigquery.schema import SchemaField
client = self._make_client()
records = [{"float_column": 3.14, "struct_column": [{"foo": 1}, {"bar": -1}]}]
dataframe = pandas.DataFrame(data=records)
schema = [
SchemaField("float_column", "FLOAT"),
SchemaField(
"agg_col",
"RECORD",
fields=[SchemaField("foo", "INTEGER"), SchemaField("bar", "INTEGER")],
),
]
job_config = job.LoadJobConfig(schema=schema)
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
with pytest.raises(ValueError) as exc_info, load_patch:
client.load_table_from_dataframe(
dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
)
err_msg = str(exc_info.value)
assert "struct" in err_msg
assert "not support" in err_msg
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_partial_schema(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery import job
from google.cloud.bigquery.schema import SchemaField
client = self._make_client()
df_data = collections.OrderedDict(
[
("int_col", [1, 2, 3]),
("int_as_float_col", [1.0, float("nan"), 3.0]),
("float_col", [1.0, 2.0, 3.0]),
("bool_col", [True, False, True]),
(
"dt_col",
pandas.Series(
[
datetime.datetime(2010, 1, 2, 3, 44, 50),
datetime.datetime(2011, 2, 3, 14, 50, 59),
datetime.datetime(2012, 3, 14, 15, 16),
],
dtype="datetime64[ns]",
),
),
(
"ts_col",
pandas.Series(
[
datetime.datetime(2010, 1, 2, 3, 44, 50),
datetime.datetime(2011, 2, 3, 14, 50, 59),
datetime.datetime(2012, 3, 14, 15, 16),
],
dtype="datetime64[ns]",
).dt.tz_localize(pytz.utc),
),
("string_col", [u"abc", None, u"def"]),
("bytes_col", [b"abc", b"def", None]),
]
)
dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
schema = (
SchemaField("int_as_float_col", "INTEGER"),
SchemaField("string_col", "STRING"),
SchemaField("bytes_col", "BYTES"),
)
job_config = job.LoadJobConfig(schema=schema)
with load_patch as load_table_from_file:
client.load_table_from_dataframe(
dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
)
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
rewind=True,
job_id=mock.ANY,
job_id_prefix=None,
location=self.LOCATION,
project=None,
job_config=mock.ANY,
)
sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
assert sent_config.source_format == job.SourceFormat.PARQUET
assert tuple(sent_config.schema) == (
SchemaField("int_col", "INTEGER"),
SchemaField("int_as_float_col", "INTEGER"),
SchemaField("float_col", "FLOAT"),
SchemaField("bool_col", "BOOLEAN"),
SchemaField("dt_col", "DATETIME"),
SchemaField("ts_col", "TIMESTAMP"),
SchemaField("string_col", "STRING"),
SchemaField("bytes_col", "BYTES"),
)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_partial_schema_extra_types(self):
from google.cloud.bigquery import job
from google.cloud.bigquery.schema import SchemaField
client = self._make_client()
df_data = collections.OrderedDict(
[
("int_col", [1, 2, 3]),
("int_as_float_col", [1.0, float("nan"), 3.0]),
("string_col", [u"abc", None, u"def"]),
]
)
dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
schema = (
SchemaField("int_as_float_col", "INTEGER"),
SchemaField("string_col", "STRING"),
SchemaField("unknown_col", "BYTES"),
)
job_config = job.LoadJobConfig(schema=schema)
with load_patch as load_table_from_file, pytest.raises(
ValueError
) as exc_context:
client.load_table_from_dataframe(
dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
)
load_table_from_file.assert_not_called()
message = str(exc_context.value)
assert "bq_schema contains fields not present in dataframe" in message
assert "unknown_col" in message
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_load_table_from_dataframe_w_partial_schema_missing_types(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery import job
from google.cloud.bigquery.schema import SchemaField
client = self._make_client()
df_data = collections.OrderedDict(
[
("string_col", [u"abc", u"def", u"ghi"]),
("unknown_col", [b"jkl", None, b"mno"]),
]
)
dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
pyarrow_patch = mock.patch(
"google.cloud.bigquery._pandas_helpers.pyarrow", None
)
schema = (SchemaField("string_col", "STRING"),)
job_config = job.LoadJobConfig(schema=schema)
with pyarrow_patch, load_patch as load_table_from_file, warnings.catch_warnings(
record=True
) as warned:
client.load_table_from_dataframe(
dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
)
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
rewind=True,
job_id=mock.ANY,
job_id_prefix=None,
location=self.LOCATION,
project=None,
job_config=mock.ANY,
)
assert warned # there should be at least one warning
unknown_col_warnings = [
warning for warning in warned if "unknown_col" in str(warning)
]
assert unknown_col_warnings
assert unknown_col_warnings[0].category == UserWarning
sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
assert sent_config.source_format == job.SourceFormat.PARQUET
assert sent_config.schema is None
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_schema_wo_pyarrow(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery import job
from google.cloud.bigquery.schema import SchemaField
client = self._make_client()
records = [{"name": u"Monty", "age": 100}, {"name": u"Python", "age": 60}]
dataframe = pandas.DataFrame(records, columns=["name", "age"])
schema = (SchemaField("name", "STRING"), SchemaField("age", "INTEGER"))
job_config = job.LoadJobConfig(schema=schema)
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
pyarrow_patch = mock.patch("google.cloud.bigquery.client.pyarrow", None)
with load_patch as load_table_from_file, pyarrow_patch, warnings.catch_warnings(
record=True
) as warned:
client.load_table_from_dataframe(
dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
)
assert warned # there should be at least one warning
for warning in warned:
assert "pyarrow" in str(warning)
assert warning.category in (DeprecationWarning, PendingDeprecationWarning)
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
rewind=True,
job_id=mock.ANY,
job_id_prefix=None,
location=self.LOCATION,
project=None,
job_config=mock.ANY,
)
sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
assert sent_config.source_format == job.SourceFormat.PARQUET
assert tuple(sent_config.schema) == schema
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_schema_arrow_custom_compression(self):
from google.cloud.bigquery import job
from google.cloud.bigquery.schema import SchemaField
client = self._make_client()
records = [{"name": u"Monty", "age": 100}, {"name": u"Python", "age": 60}]
dataframe = pandas.DataFrame(records)
schema = (SchemaField("name", "STRING"), SchemaField("age", "INTEGER"))
job_config = job.LoadJobConfig(schema=schema)
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
to_parquet_patch = mock.patch(
"google.cloud.bigquery.client._pandas_helpers.dataframe_to_parquet",
autospec=True,
)
with load_patch, to_parquet_patch as fake_to_parquet:
client.load_table_from_dataframe(
dataframe,
self.TABLE_REF,
job_config=job_config,
location=self.LOCATION,
parquet_compression="LZ4",
)
call_args = fake_to_parquet.call_args
assert call_args is not None
assert call_args.kwargs.get("parquet_compression") == "LZ4"
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_wo_pyarrow_custom_compression(self):
client = self._make_client()
records = [{"id": 1, "age": 100}, {"id": 2, "age": 60}]
dataframe = pandas.DataFrame(records)
get_table_patch = mock.patch(
"google.cloud.bigquery.client.Client.get_table",
autospec=True,
side_effect=google.api_core.exceptions.NotFound("Table not found"),
)
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
pyarrow_patch = mock.patch("google.cloud.bigquery.client.pyarrow", None)
to_parquet_patch = mock.patch.object(
dataframe, "to_parquet", wraps=dataframe.to_parquet
)
with load_patch, get_table_patch, pyarrow_patch, to_parquet_patch as to_parquet_spy:
client.load_table_from_dataframe(
dataframe,
self.TABLE_REF,
location=self.LOCATION,
parquet_compression="gzip",
)
call_args = to_parquet_spy.call_args
assert call_args is not None
assert call_args.kwargs.get("compression") == "gzip"
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_load_table_from_dataframe_w_nulls(self):
"""Test that a DataFrame with null columns can be uploaded if a
BigQuery schema is specified.
See: https://github.com/googleapis/google-cloud-python/issues/7370
"""
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery import job
client = self._make_client()
records = [{"name": None, "age": None}, {"name": None, "age": None}]
dataframe = pandas.DataFrame(records, columns=["name", "age"])
schema = [SchemaField("name", "STRING"), SchemaField("age", "INTEGER")]
job_config = job.LoadJobConfig(schema=schema)
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
with load_patch as load_table_from_file:
client.load_table_from_dataframe(
dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
)
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
rewind=True,
job_id=mock.ANY,
job_id_prefix=None,
location=self.LOCATION,
project=None,
job_config=mock.ANY,
)
sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
assert sent_config.schema == schema
assert sent_config.source_format == job.SourceFormat.PARQUET
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_load_table_from_dataframe_w_invaild_job_config(self):
from google.cloud.bigquery import job
client = self._make_client()
records = [{"float_column": 3.14, "struct_column": [{"foo": 1}, {"bar": -1}]}]
dataframe = pandas.DataFrame(data=records)
job_config = job.CopyJobConfig()
with pytest.raises(TypeError) as exc:
client.load_table_from_dataframe(
dataframe, self.TABLE_REF, job_config=job_config, location=self.LOCATION
)
err_msg = str(exc.value)
assert "Expected an instance of LoadJobConfig" in err_msg
def test_load_table_from_json_basic_use(self):
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery import job
client = self._make_client()
json_rows = [
{"name": "One", "age": 11, "birthday": "2008-09-10", "adult": False},
{"name": "Two", "age": 22, "birthday": "1997-08-09", "adult": True},
]
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
with load_patch as load_table_from_file:
client.load_table_from_json(json_rows, self.TABLE_REF)
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=mock.ANY,
job_id_prefix=None,
location=client.location,
project=client.project,
job_config=mock.ANY,
)
sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
assert sent_config.source_format == job.SourceFormat.NEWLINE_DELIMITED_JSON
assert sent_config.schema is None
assert sent_config.autodetect
def test_load_table_from_json_non_default_args(self):
from google.cloud.bigquery import job
from google.cloud.bigquery.client import _DEFAULT_NUM_RETRIES
from google.cloud.bigquery.schema import SchemaField
client = self._make_client()
json_rows = [
{"name": "One", "age": 11, "birthday": "2008-09-10", "adult": False},
{"name": "Two", "age": 22, "birthday": "1997-08-09", "adult": True},
]
schema = [
SchemaField("name", "STRING"),
SchemaField("age", "INTEGER"),
SchemaField("adult", "BOOLEAN"),
]
job_config = job.LoadJobConfig(schema=schema)
job_config._properties["load"]["unknown_field"] = "foobar"
load_patch = mock.patch(
"google.cloud.bigquery.client.Client.load_table_from_file", autospec=True
)
with load_patch as load_table_from_file:
client.load_table_from_json(
json_rows,
self.TABLE_REF,
job_config=job_config,
project="project-x",
location="EU",
)
load_table_from_file.assert_called_once_with(
client,
mock.ANY,
self.TABLE_REF,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=mock.ANY,
job_id_prefix=None,
location="EU",
project="project-x",
job_config=mock.ANY,
)
sent_config = load_table_from_file.mock_calls[0][2]["job_config"]
assert job_config.source_format is None # the original was not modified
assert sent_config.source_format == job.SourceFormat.NEWLINE_DELIMITED_JSON
assert sent_config.schema == schema
assert not sent_config.autodetect
# all properties should have been cloned and sent to the backend
assert sent_config._properties.get("load", {}).get("unknown_field") == "foobar"
def test_load_table_from_json_w_invalid_job_config(self):
from google.cloud.bigquery import job
client = self._make_client()
json_rows = [
{"name": "One", "age": 11, "birthday": "2008-09-10", "adult": False},
{"name": "Two", "age": 22, "birthday": "1997-08-09", "adult": True},
]
job_config = job.CopyJobConfig()
with pytest.raises(TypeError) as exc:
client.load_table_from_json(
json_rows,
self.TABLE_REF,
job_config=job_config,
project="project-x",
location="EU",
)
err_msg = str(exc.value)
assert "Expected an instance of LoadJobConfig" in err_msg
# Low-level tests
@classmethod
def _make_resumable_upload_responses(cls, size):
"""Make a series of responses for a successful resumable upload."""
from google import resumable_media
resumable_url = "http://test.invalid?upload_id=and-then-there-was-1"
initial_response = cls._make_response(
http_client.OK, "", {"location": resumable_url}
)
data_response = cls._make_response(
resumable_media.PERMANENT_REDIRECT,
"",
{"range": "bytes=0-{:d}".format(size - 1)},
)
final_response = cls._make_response(
http_client.OK,
json.dumps({"size": size}),
{"Content-Type": "application/json"},
)
return [initial_response, data_response, final_response]
@staticmethod
def _make_transport(responses=None):
import google.auth.transport.requests
transport = mock.create_autospec(
google.auth.transport.requests.AuthorizedSession, instance=True
)
transport.request.side_effect = responses
return transport
def test__do_resumable_upload(self):
file_obj = self._make_file_obj()
file_obj_len = len(file_obj.getvalue())
transport = self._make_transport(
self._make_resumable_upload_responses(file_obj_len)
)
client = self._make_client(transport)
result = client._do_resumable_upload(
file_obj, self.EXPECTED_CONFIGURATION, None
)
content = result.content.decode("utf-8")
assert json.loads(content) == {"size": file_obj_len}
# Verify that configuration data was passed in with the initial
# request.
transport.request.assert_any_call(
"POST",
mock.ANY,
data=json.dumps(self.EXPECTED_CONFIGURATION).encode("utf-8"),
headers=mock.ANY,
timeout=mock.ANY,
)
def test__do_multipart_upload(self):
transport = self._make_transport([self._make_response(http_client.OK)])
client = self._make_client(transport)
file_obj = self._make_file_obj()
file_obj_len = len(file_obj.getvalue())
client._do_multipart_upload(
file_obj, self.EXPECTED_CONFIGURATION, file_obj_len, None
)
# Verify that configuration data was passed in with the initial
# request.
request_args = transport.request.mock_calls[0][2]
request_data = request_args["data"].decode("utf-8")
request_headers = request_args["headers"]
request_content = email.message_from_string(
"Content-Type: {}\r\n{}".format(
request_headers["content-type"].decode("utf-8"), request_data
)
)
# There should be two payloads: the configuration and the binary daya.
configuration_data = request_content.get_payload(0).get_payload()
binary_data = request_content.get_payload(1).get_payload()
assert json.loads(configuration_data) == self.EXPECTED_CONFIGURATION
assert binary_data.encode("utf-8") == file_obj.getvalue()
def test__do_multipart_upload_wrong_size(self):
client = self._make_client()
file_obj = self._make_file_obj()
file_obj_len = len(file_obj.getvalue())
with pytest.raises(ValueError):
client._do_multipart_upload(file_obj, {}, file_obj_len + 1, None)
def test_schema_from_json_with_file_path(self):
from google.cloud.bigquery.schema import SchemaField
file_content = """[
{
"description": "quarter",
"mode": "REQUIRED",
"name": "qtr",
"type": "STRING"
},
{
"description": "sales representative",
"mode": "NULLABLE",
"name": "rep",
"type": "STRING"
},
{
"description": "total sales",
"mode": "NULLABLE",
"name": "sales",
"type": "FLOAT"
}
]"""
expected = [
SchemaField("qtr", "STRING", "REQUIRED", "quarter"),
SchemaField("rep", "STRING", "NULLABLE", "sales representative"),
SchemaField("sales", "FLOAT", "NULLABLE", "total sales"),
]
client = self._make_client()
mock_file_path = "/mocked/file.json"
if six.PY2:
open_patch = mock.patch(
"__builtin__.open", mock.mock_open(read_data=file_content)
)
else:
open_patch = mock.patch(
"builtins.open", new=mock.mock_open(read_data=file_content)
)
with open_patch as _mock_file:
actual = client.schema_from_json(mock_file_path)
_mock_file.assert_called_once_with(mock_file_path)
# This assert is to make sure __exit__ is called in the context
# manager that opens the file in the function
_mock_file().__exit__.assert_called_once()
assert expected == actual
def test_schema_from_json_with_file_object(self):
from google.cloud.bigquery.schema import SchemaField
file_content = """[
{
"description": "quarter",
"mode": "REQUIRED",
"name": "qtr",
"type": "STRING"
},
{
"description": "sales representative",
"mode": "NULLABLE",
"name": "rep",
"type": "STRING"
},
{
"description": "total sales",
"mode": "NULLABLE",
"name": "sales",
"type": "FLOAT"
}
]"""
expected = [
SchemaField("qtr", "STRING", "REQUIRED", "quarter"),
SchemaField("rep", "STRING", "NULLABLE", "sales representative"),
SchemaField("sales", "FLOAT", "NULLABLE", "total sales"),
]
client = self._make_client()
if six.PY2:
fake_file = io.BytesIO(file_content)
else:
fake_file = io.StringIO(file_content)
actual = client.schema_from_json(fake_file)
assert expected == actual
def test_schema_to_json_with_file_path(self):
from google.cloud.bigquery.schema import SchemaField
file_content = [
{
"description": "quarter",
"mode": "REQUIRED",
"name": "qtr",
"type": "STRING",
},
{
"description": "sales representative",
"mode": "NULLABLE",
"name": "rep",
"type": "STRING",
},
{
"description": "total sales",
"mode": "NULLABLE",
"name": "sales",
"type": "FLOAT",
},
]
schema_list = [
SchemaField("qtr", "STRING", "REQUIRED", "quarter"),
SchemaField("rep", "STRING", "NULLABLE", "sales representative"),
SchemaField("sales", "FLOAT", "NULLABLE", "total sales"),
]
client = self._make_client()
mock_file_path = "/mocked/file.json"
if six.PY2:
open_patch = mock.patch("__builtin__.open", mock.mock_open())
else:
open_patch = mock.patch("builtins.open", mock.mock_open())
with open_patch as mock_file, mock.patch("json.dump") as mock_dump:
client.schema_to_json(schema_list, mock_file_path)
mock_file.assert_called_once_with(mock_file_path, mode="w")
# This assert is to make sure __exit__ is called in the context
# manager that opens the file in the function
mock_file().__exit__.assert_called_once()
mock_dump.assert_called_with(
file_content, mock_file.return_value, indent=2, sort_keys=True
)
def test_schema_to_json_with_file_object(self):
from google.cloud.bigquery.schema import SchemaField
file_content = [
{
"description": "quarter",
"mode": "REQUIRED",
"name": "qtr",
"type": "STRING",
},
{
"description": "sales representative",
"mode": "NULLABLE",
"name": "rep",
"type": "STRING",
},
{
"description": "total sales",
"mode": "NULLABLE",
"name": "sales",
"type": "FLOAT",
},
]
schema_list = [
SchemaField("qtr", "STRING", "REQUIRED", "quarter"),
SchemaField("rep", "STRING", "NULLABLE", "sales representative"),
SchemaField("sales", "FLOAT", "NULLABLE", "total sales"),
]
if six.PY2:
fake_file = io.BytesIO()
else:
fake_file = io.StringIO()
client = self._make_client()
client.schema_to_json(schema_list, fake_file)
assert file_content == json.loads(fake_file.getvalue())
| 37.450346
| 106
| 0.567826
|
63b02d551772ab48b977037382797f342280817e
| 25,310
|
py
|
Python
|
selfdrive/car/honda/interface.py
|
jzluo/openpilot
|
99301a5d71a930e6645a4362896cb3a59d15d2b3
|
[
"MIT"
] | null | null | null |
selfdrive/car/honda/interface.py
|
jzluo/openpilot
|
99301a5d71a930e6645a4362896cb3a59d15d2b3
|
[
"MIT"
] | null | null | null |
selfdrive/car/honda/interface.py
|
jzluo/openpilot
|
99301a5d71a930e6645a4362896cb3a59d15d2b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import numpy as np
from cereal import car
from common.numpy_fast import clip, interp
from common.realtime import DT_CTRL
from selfdrive.swaglog import cloudlog
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.events import ET
from selfdrive.car.honda.values import CruiseButtons, CAR, HONDA_BOSCH
from selfdrive.car import STD_CARGO_KG, CivicParams, scale_rot_inertia, scale_tire_stiffness, gen_empty_fingerprint
from selfdrive.controls.lib.longitudinal_planner import _A_CRUISE_MAX_V_FOLLOWING
from selfdrive.car.interfaces import CarInterfaceBase
A_ACC_MAX = max(_A_CRUISE_MAX_V_FOLLOWING)
ButtonType = car.CarState.ButtonEvent.Type
EventName = car.CarEvent.EventName
def compute_gb_honda(accel, speed):
creep_brake = 0.0
creep_speed = 2.3
creep_brake_value = 0.15
if speed < creep_speed:
creep_brake = (creep_speed - speed) / creep_speed * creep_brake_value
return float(accel) / 4.8 - creep_brake
def get_compute_gb_acura():
# generate a function that takes in [desired_accel, current_speed] -> [-1.0, 1.0]
# where -1.0 is max brake and 1.0 is max gas
# see debug/dump_accel_from_fiber.py to see how those parameters were generated
w0 = np.array([[ 1.22056961, -0.39625418, 0.67952657],
[ 1.03691769, 0.78210306, -0.41343188]])
b0 = np.array([ 0.01536703, -0.14335321, -0.26932889])
w2 = np.array([[-0.59124422, 0.42899439, 0.38660881],
[ 0.79973811, 0.13178682, 0.08550351],
[-0.15651935, -0.44360259, 0.76910877]])
b2 = np.array([ 0.15624429, 0.02294923, -0.0341086 ])
w4 = np.array([[-0.31521443],
[-0.38626176],
[ 0.52667892]])
b4 = np.array([-0.02922216])
def compute_output(dat, w0, b0, w2, b2, w4, b4):
m0 = np.dot(dat, w0) + b0
m0 = leakyrelu(m0, 0.1)
m2 = np.dot(m0, w2) + b2
m2 = leakyrelu(m2, 0.1)
m4 = np.dot(m2, w4) + b4
return m4
def leakyrelu(x, alpha):
return np.maximum(x, alpha * x)
def _compute_gb_acura(accel, speed):
# linearly extrap below v1 using v1 and v2 data
v1 = 5.
v2 = 10.
dat = np.array([accel, speed])
if speed > 5.:
m4 = compute_output(dat, w0, b0, w2, b2, w4, b4)
else:
dat[1] = v1
m4v1 = compute_output(dat, w0, b0, w2, b2, w4, b4)
dat[1] = v2
m4v2 = compute_output(dat, w0, b0, w2, b2, w4, b4)
m4 = (speed - v1) * (m4v2 - m4v1) / (v2 - v1) + m4v1
return float(m4)
return _compute_gb_acura
class CarInterface(CarInterfaceBase):
def __init__(self, CP, CarController, CarState):
super().__init__(CP, CarController, CarState)
self.last_enable_pressed = 0
self.last_enable_sent = 0
if self.CS.CP.carFingerprint == CAR.ACURA_ILX:
self.compute_gb = get_compute_gb_acura()
else:
self.compute_gb = compute_gb_honda
@staticmethod
def compute_gb(accel, speed): # pylint: disable=method-hidden
raise NotImplementedError
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
# normalized max accel. Allowing max accel at low speed causes speed overshoots
max_accel_bp = [10, 20] # m/s
max_accel_v = [0.714, 1.0] # unit of max accel
max_accel = interp(v_ego, max_accel_bp, max_accel_v)
# limit the pcm accel cmd if:
# - v_ego exceeds v_target, or
# - a_ego exceeds a_target and v_ego is close to v_target
eA = a_ego - a_target
valuesA = [1.0, 0.1]
bpA = [0.3, 1.1]
eV = v_ego - v_target
valuesV = [1.0, 0.1]
bpV = [0.0, 0.5]
valuesRangeV = [1., 0.]
bpRangeV = [-1., 0.]
# only limit if v_ego is close to v_target
speedLimiter = interp(eV, bpV, valuesV)
accelLimiter = max(interp(eA, bpA, valuesA), interp(eV, bpRangeV, valuesRangeV))
# accelOverride is more or less the max throttle allowed to pcm: usually set to a constant
# unless aTargetMax is very high and then we scale with it; this help in quicker restart
return float(max(max_accel, a_target / A_ACC_MAX)) * min(speedLimiter, accelLimiter)
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=[]): # pylint: disable=dangerous-default-value
ret = CarInterfaceBase.get_std_params(candidate, fingerprint)
ret.carName = "honda"
if candidate in HONDA_BOSCH:
ret.safetyModel = car.CarParams.SafetyModel.hondaBoschHarness
ret.enableCamera = True
ret.radarOffCan = True
ret.openpilotLongitudinalControl = False
else:
ret.safetyModel = car.CarParams.SafetyModel.hondaNidec
ret.enableCamera = True
ret.enableGasInterceptor = 0x201 in fingerprint[0]
ret.openpilotLongitudinalControl = ret.enableCamera
cloudlog.warning("ECU Camera Simulated: %r", ret.enableCamera)
cloudlog.warning("ECU Gas Interceptor: %r", ret.enableGasInterceptor)
ret.enableCruise = not ret.enableGasInterceptor
ret.communityFeature = ret.enableGasInterceptor
# Certain Hondas have an extra steering sensor at the bottom of the steering rack,
# which improves controls quality as it removes the steering column torsion from feedback.
# Tire stiffness factor fictitiously lower if it includes the steering column torsion effect.
# For modeling details, see p.198-200 in "The Science of Vehicle Dynamics (2014), M. Guiggiani"
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0], [0]]
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0.], [0.]]
ret.lateralTuning.pid.kf = 0.00006 # conservative feed-forward
eps_modified = False
for fw in car_fw:
if fw.ecu == "eps" and b"," in fw.fwVersion:
eps_modified = True
if candidate == CAR.CIVIC:
stop_and_go = True
ret.mass = CivicParams.MASS
ret.wheelbase = CivicParams.WHEELBASE
ret.centerToFront = CivicParams.CENTER_TO_FRONT
ret.steerRatio = 15.38 # 10.93 is end-to-end spec
if eps_modified:
# stock request input values: 0x0000, 0x00DE, 0x014D, 0x01EF, 0x0290, 0x0377, 0x0454, 0x0610, 0x06EE
# stock request output values: 0x0000, 0x0917, 0x0DC5, 0x1017, 0x119F, 0x140B, 0x1680, 0x1680, 0x1680
# modified request output values: 0x0000, 0x0917, 0x0DC5, 0x1017, 0x119F, 0x140B, 0x1680, 0x2880, 0x3180
# stock filter output values: 0x009F, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108
# modified filter output values: 0x009F, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0400, 0x0480
# note: max request allowed is 4096, but request is capped at 3840 in firmware, so modifications result in 2x max
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 2560, 8000], [0, 2560, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.1]]
else:
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 2560], [0, 2560]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[1.1], [0.33]]
tire_stiffness_factor = 1.
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [3.6, 2.4, 1.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.54, 0.36]
elif candidate in (CAR.CIVIC_BOSCH, CAR.CIVIC_BOSCH_DIESEL):
stop_and_go = True
ret.mass = CivicParams.MASS
ret.wheelbase = CivicParams.WHEELBASE
ret.centerToFront = CivicParams.CENTER_TO_FRONT
ret.steerRatio = 15.38 # 10.93 is end-to-end spec
if eps_modified:
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 2566, 8000], [0, 2566, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.4], [0.12]]
else:
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
tire_stiffness_factor = 1.
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate in (CAR.ACCORD, CAR.ACCORD_15, CAR.ACCORDH):
stop_and_go = True
if not candidate == CAR.ACCORDH: # Hybrid uses same brake msg as hatch
ret.safetyParam = 1 # Accord(ICE), CRV 5G, and RDX 3G use an alternate user brake msg
ret.mass = 3279. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.83
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 16.33 # 11.82 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.8467
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
if eps_modified:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.09]]
else:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
elif candidate == CAR.ACURA_ILX:
stop_and_go = False
ret.mass = 3095. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.37
ret.steerRatio = 18.61 # 15.3 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 3840], [0, 3840]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.72
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate in (CAR.CRV, CAR.CRV_EU):
stop_and_go = False
ret.mass = 3572. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.62
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.89 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 1000], [0, 1000]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.CRV_5G:
stop_and_go = True
ret.safetyParam = 1 # Accord(ICE), CRV 5G, and RDX 3G use an alternate user brake msg
ret.mass = 3410. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.66
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.0 # 12.3 is spec end-to-end
if eps_modified:
# stock request input values: 0x0000, 0x00DB, 0x01BB, 0x0296, 0x0377, 0x0454, 0x0532, 0x0610, 0x067F
# stock request output values: 0x0000, 0x0500, 0x0A15, 0x0E6D, 0x1100, 0x1200, 0x129A, 0x134D, 0x1400
# modified request output values: 0x0000, 0x0500, 0x0A15, 0x0E6D, 0x1100, 0x1200, 0x1ACD, 0x239A, 0x2800
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 2560, 10000], [0, 2560, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.21], [0.07]]
else:
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 3840], [0, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.64], [0.192]]
tire_stiffness_factor = 0.677
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.CRV_HYBRID:
stop_and_go = True
ret.safetyParam = 1 # Accord(ICE), CRV 5G, and RDX 3G use an alternate user brake msg
ret.mass = 1667. + STD_CARGO_KG # mean of 4 models in kg
ret.wheelbase = 2.66
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.0 # 12.3 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.677
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.FIT:
stop_and_go = False
ret.mass = 2644. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.53
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 13.06
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.75
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.2], [0.05]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.HRV:
stop_and_go = False
ret.mass = 3125 * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.61
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.2
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]]
tire_stiffness_factor = 0.5
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.16], [0.025]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.ACURA_RDX:
stop_and_go = False
ret.mass = 3935. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.68
ret.centerToFront = ret.wheelbase * 0.38
ret.steerRatio = 15.0 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 1000], [0, 1000]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.ACURA_RDX_3G:
stop_and_go = True
ret.safetyParam = 1 # Accord(ICE), CRV 5G, and RDX 3G use an alternate user brake msg
ret.mass = 4068. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.75
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 11.95 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 3840], [0, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
tire_stiffness_factor = 0.677
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.ODYSSEY:
stop_and_go = False
ret.mass = 4471. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 3.00
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 14.35 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.82
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.28], [0.08]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.ODYSSEY_CHN:
stop_and_go = False
ret.mass = 1849.2 + STD_CARGO_KG # mean of 4 models in kg
ret.wheelbase = 2.90
ret.centerToFront = ret.wheelbase * 0.41 # from CAR.ODYSSEY
ret.steerRatio = 14.35
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 32767], [0, 32767]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.82
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.28], [0.08]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate in (CAR.PILOT, CAR.PILOT_2019):
stop_and_go = False
ret.mass = 4204. * CV.LB_TO_KG + STD_CARGO_KG # average weight
ret.wheelbase = 2.82
ret.centerToFront = ret.wheelbase * 0.428
ret.steerRatio = 17.25 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.38], [0.11]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.RIDGELINE:
stop_and_go = False
ret.mass = 4515. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 3.18
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.59 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.38], [0.11]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.INSIGHT:
stop_and_go = True
ret.mass = 2987. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.7
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 15.0 # 12.58 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.82
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
else:
raise ValueError("unsupported car %s" % candidate)
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter. Otherwise, add 0.5 mph margin to not
# conflict with PCM acc
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGasInterceptor) else 25.5 * CV.MPH_TO_MS
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront,
tire_stiffness_factor=tire_stiffness_factor)
ret.gasMaxBP = [0.] # m/s
ret.gasMaxV = [0.6] if ret.enableGasInterceptor else [0.] # max gas allowed
ret.brakeMaxBP = [5., 20.] # m/s
ret.brakeMaxV = [1., 0.8] # max brake allowed
ret.stoppingControl = True
ret.startAccel = 0.5
ret.steerActuatorDelay = 0.1
ret.steerRateCost = 0.5
ret.steerLimitTimer = 0.8
return ret
# returns a car.CarState
def update(self, c, can_strings):
# ******************* do can recv *******************
self.cp.update_strings(can_strings)
self.cp_cam.update_strings(can_strings)
if self.cp_body:
self.cp_body.update_strings(can_strings)
ret = self.CS.update(self.cp, self.cp_cam, self.cp_body)
ret.canValid = self.cp.can_valid and self.cp_cam.can_valid and (self.cp_body is None or self.cp_body.can_valid)
ret.yawRate = self.VM.yaw_rate(ret.steeringAngleDeg * CV.DEG_TO_RAD, ret.vEgo)
# FIXME: read sendcan for brakelights
brakelights_threshold = 0.02 if self.CS.CP.carFingerprint == CAR.CIVIC else 0.1
ret.brakeLights = bool(self.CS.brake_switch or
c.actuators.brake > brakelights_threshold)
buttonEvents = []
if self.CS.cruise_buttons != self.CS.prev_cruise_buttons:
be = car.CarState.ButtonEvent.new_message()
be.type = ButtonType.unknown
if self.CS.cruise_buttons != 0:
be.pressed = True
but = self.CS.cruise_buttons
else:
be.pressed = False
but = self.CS.prev_cruise_buttons
if but == CruiseButtons.RES_ACCEL:
be.type = ButtonType.accelCruise
elif but == CruiseButtons.DECEL_SET:
be.type = ButtonType.decelCruise
elif but == CruiseButtons.CANCEL:
be.type = ButtonType.cancel
elif but == CruiseButtons.MAIN:
be.type = ButtonType.altButton3
buttonEvents.append(be)
if self.CS.cruise_setting != self.CS.prev_cruise_setting:
be = car.CarState.ButtonEvent.new_message()
be.type = ButtonType.unknown
if self.CS.cruise_setting != 0:
be.pressed = True
but = self.CS.cruise_setting
else:
be.pressed = False
but = self.CS.prev_cruise_setting
if but == 1:
be.type = ButtonType.altButton1
# TODO: more buttons?
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
# events
events = self.create_common_events(ret, pcm_enable=False)
if self.CS.brake_error:
events.add(EventName.brakeUnavailable)
if self.CS.brake_hold and self.CS.CP.openpilotLongitudinalControl:
events.add(EventName.brakeHold)
if self.CS.park_brake:
events.add(EventName.parkBrake)
if self.CP.enableCruise and ret.vEgo < self.CP.minEnableSpeed:
events.add(EventName.belowEngageSpeed)
# it can happen that car cruise disables while comma system is enabled: need to
# keep braking if needed or if the speed is very low
if self.CP.enableCruise and not ret.cruiseState.enabled \
and (c.actuators.brake <= 0. or not self.CP.openpilotLongitudinalControl):
# non loud alert if cruise disables below 25mph as expected (+ a little margin)
if ret.vEgo < self.CP.minEnableSpeed + 2.:
events.add(EventName.speedTooLow)
else:
events.add(EventName.cruiseDisabled)
if self.CS.CP.minEnableSpeed > 0 and ret.vEgo < 0.001:
events.add(EventName.manualRestart)
cur_time = self.frame * DT_CTRL
enable_pressed = False
# handle button presses
for b in ret.buttonEvents:
# do enable on both accel and decel buttons
if b.type in [ButtonType.accelCruise, ButtonType.decelCruise] and not b.pressed:
self.last_enable_pressed = cur_time
enable_pressed = True
# do disable on button down
if b.type == "cancel" and b.pressed:
events.add(EventName.buttonCancel)
if self.CP.enableCruise:
# KEEP THIS EVENT LAST! send enable event if button is pressed and there are
# NO_ENTRY events, so controlsd will display alerts. Also not send enable events
# too close in time, so a no_entry will not be followed by another one.
# TODO: button press should be the only thing that triggers enable
if ((cur_time - self.last_enable_pressed) < 0.2 and
(cur_time - self.last_enable_sent) > 0.2 and
ret.cruiseState.enabled) or \
(enable_pressed and events.any(ET.NO_ENTRY)):
events.add(EventName.buttonEnable)
self.last_enable_sent = cur_time
elif enable_pressed:
events.add(EventName.buttonEnable)
ret.events = events.to_msg()
self.CS.out = ret.as_reader()
return self.CS.out
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
if c.hudControl.speedVisible:
hud_v_cruise = c.hudControl.setSpeed * CV.MS_TO_KPH
else:
hud_v_cruise = 255
pcm_accel = int(clip(c.cruiseControl.accelOverride, 0, 1) * 0xc6)
can_sends = self.CC.update(c.enabled, self.CS, self.frame,
c.actuators,
c.cruiseControl.speedOverride,
c.cruiseControl.override,
c.cruiseControl.cancel,
pcm_accel,
hud_v_cruise,
c.hudControl.lanesVisible,
hud_show_car=c.hudControl.leadVisible,
hud_alert=c.hudControl.visualAlert)
self.frame += 1
return can_sends
| 43.940972
| 144
| 0.654326
|
860589ec69c6aef2f6ac55e687ee0ca53be43059
| 178,837
|
py
|
Python
|
pytorch/test/quantization/test_quantized_op.py
|
zhou3968322/dl-code-read
|
aca204a986dabe2755becff0f42de1082299d791
|
[
"MIT"
] | null | null | null |
pytorch/test/quantization/test_quantized_op.py
|
zhou3968322/dl-code-read
|
aca204a986dabe2755becff0f42de1082299d791
|
[
"MIT"
] | null | null | null |
pytorch/test/quantization/test_quantized_op.py
|
zhou3968322/dl-code-read
|
aca204a986dabe2755becff0f42de1082299d791
|
[
"MIT"
] | null | null | null |
from __future__ import division
from builtins import round
import itertools
import numpy as np
import sys
import unittest
import torch
from torch import _VF
import torch.jit
import torch.nn.functional as F
from torch.nn.modules.utils import _single, _pair
from hypothesis import settings, HealthCheck
from hypothesis import assume, given, note
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_quantized import _quantize, _dequantize, _calculate_dynamic_qparams, \
override_quantized_engine, supported_qengines, override_qengines
np_dtype = {
torch.quint8 : np.uint8,
torch.qint8 : np.int8,
torch.qint32 : np.int32
}
# Make sure we won't have overflows from vpmaddubsw instruction used in FBGEMM.
# On the current Intel x86 architecture, we need to utilize vpmaddubsw instruction
# for the 8-bit int multiplication. This instruction vertically multiplies each
# unsigned 8-bit integer from a with the corresponding signed 8-bit integer from
# b, producing intermediate signed 16-bit integers. This function modifies the
# weights to eliminate the overflow on the signed 16-bit integers.
def avoid_vpmaddubsw_overflow_linear(
batch_size, input_channels, output_channels, X, X_min, X_max, W, W_min, W_max
):
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
if x0 * w0 + x1 * w1 < -(1 << 15):
w1_adjusted = (-(1 << 15) - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
elif x0 * w0 + x1 * w1 > (1 << 15) - 1:
w1_adjusted = ((1 << 15) - 1 - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
# Go through the same loop again to double check we don't have any overflow
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
assert -(1 << 15) <= x0 * w0 + x1 * w1 < (1 << 15)
# Reference quantized Linear operator
def qlinear_ref(X_q, X_scale, X_zp, W_q, W_scale, W_zp, b_q, Y_scale, Y_zp):
X_q = np.reshape(X_q, (-1, X_q.shape[X_q.ndim - 1]))
row_offsets_ref = X_q.sum(axis=1).astype(np.int32).reshape((-1, 1))
col_offsets_ref = W_q.sum(axis=1).astype(np.int32).reshape((1, -1))
assert X_q.ndim == 2
batch_size, input_channels = X_q.shape
Prod_XqWq_ref = (
np.matmul(X_q.astype(np.int32), W_q.astype(np.int32).T)
- W_zp * row_offsets_ref
- X_zp * col_offsets_ref
+ input_channels * X_zp * W_zp
)
if b_q is not None:
Prod_XqWq_ref += b_q
Y_q_ref = _quantize(Prod_XqWq_ref, Y_scale / (X_scale * W_scale), Y_zp)
return Y_q_ref
"""Computes the output shape given pooling parameters."""
def pool_output_shape(input_size, kernel_size, padding, stride,
dilation, ceiling_mode=False):
if stride is None:
stride = kernel_size
output_size = (
(input_size + 2 * padding - dilation * (kernel_size - 1) - 1
+ (stride - 1 if ceiling_mode else 0)) // stride + 1)
if (padding > 0 and
((output_size - 1) * stride >= input_size + padding)):
output_size += 1
return output_size
"""
Util for creating a random tensor and quantization params when Hypothesis
is undesirable.
"""
def _get_random_tensor_and_q_params(shapes, rand_scale, torch_type):
X = (torch.rand(*shapes, dtype=torch.float) - 0.5) * rand_scale
# Calculate reasonable quantization params
min_val = torch.min(X)
max_val = torch.max(X)
if torch_type == torch.qint32:
X_zero_point = int(torch.randint(-1 * (2 ** 31), 2 ** 31 - 1, (1,)))
num_bins = 2 ** 32
X_scale = float(max_val - min_val) / num_bins
elif torch_type == torch.qint8:
X_zero_point = int(torch.randint(-128, 127, (1,)))
num_bins = 2 ** 8
X_scale = float(max_val - min_val) / num_bins
else: # torch.quint8
X_zero_point = 127
num_bins = 2 ** 8
X_scale = float(max_val - min_val) / num_bins
if X_scale == 0:
X_scale = 1e-10
return X, X_scale, X_zero_point
class TestQuantizedOps(TestCase):
"""Helper function to test quantized activation functions."""
def _test_activation_function(self, X, fn_name, test_configs):
r"""
When writing a unit test for the activation function,
instead of specifying the test routines only applicable to the activation function itself,
you utilize the _test_activation_function that provides general testing.
To utilize the helper function, a test config must be provided.
A test config is a list that contains metadata about the quantized activation
functions that will be tested and how the tests need to be set up; it allows simpler and
more concise unit tests to be written by specifying the configurations needed
and calling the provided helper function _test_activation_function.
Inside the list, each config (as a dictionary) represents a suite of tests that assert the
correctness of various quantization functions.
You can check out the test_qrelu, test_qrelu6, test_qsigmoid, and test_qhardsigmoid for
how their test configs are specified.
Here's a list of the fields that can be included in a test config:
quantized_fn: a list of the quantized functions to be tested
reference_fn: the original reference function to be called on the
the dequantized X
inplace_kwarg: the additional inplace keyword argument to test in-place
for each test entry in ops_under_test, it must have at least the fields
for quantized_fn and reference_fn. If inplace_kwarg is missing, the
quantized function is assumed to be either inplace by default or the
test is not testing an inplace function.
output_range: the output range the operator will map to. By default, if it is
no specified, the range will not be controlled and depend on Xmin and Xmax.
change_zero_point: a boolean flag indicating if the zero point parameter should
be determined based on torch_type during quantization (see sigmoid/hardsigmoid for
examples). By default, if it is not specified, change_zero_point is assumed to be
False and zero point will just take on the default value from X.
"""
# Retrives the default parameters from X.
X, (scale, zero_point, torch_type) = X
X = torch.from_numpy(X)
# Quantizes the reference to account for max error.
# q_min and q_max only depend on the initial torch_type.
q_min, q_max = torch.iinfo(torch_type).min, torch.iinfo(torch_type).max
for op_group in test_configs:
ref_op = op_group['reference_fn']
for q_op in op_group['quantized_fn']:
# Quantizes and dequantizes to account for max error.
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
dqX = qX.dequantize()
dqY_hat = ref_op(dqX.clone())
# Retrieves the inplace keyword arguments
# some functions require inplace=True to test in-place.
inplace_kwarg = op_group.get('inplace_kwarg', dict())
# Adjusts output_scale if needed.
# The output_scale determines the quantization scale for functions that
# have a constrained output range. e.x. sigmoid ranges from 0 to 1.
output_scale = scale
if 'output_range' in op_group:
(f_min, f_max) = op_group['output_range']
output_scale = (f_max - f_min) / (q_max - q_min + 1.0)
# Adjusts output_zero_point if needed (see explanation for the
# change_zero_point parameter above).
# output_zero_point determines the additional offset that will be
# added to a scaled value during quantization.
if op_group.get('change_zero_point', False):
output_zero_point = 0 if torch_type == torch.qint32 else q_min
else:
output_zero_point = zero_point
# Quantizes the dequantized version of Y_hat.
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=output_scale,
zero_point=output_zero_point,
dtype=torch_type)
# Finds qY using in-place or non-in-place quantized operators.
qY = q_op(qX, **inplace_kwarg)
self.assertEqual(qY, qY_hat, msg='{} - {} failed: ({} vs. {})'.format(
fn_name, q_op, qY, qY_hat
))
"""Tests the correctness of the quantized::relu op."""
@override_qengines
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_qrelu(self, X):
relu_test_configs = [
{
'quantized_fn': [
torch.relu,
torch.relu_,
torch.nn.functional.relu,
torch.nn.quantized.functional.relu,
],
'reference_fn': torch.nn.functional.relu
},
{
'quantized_fn': [
torch.nn.functional.relu,
torch.nn.quantized.functional.relu,
],
'reference_fn': torch.nn.functional.relu,
'inplace_kwarg': {
'inplace': True
}
}
]
self._test_activation_function(X, 'relu', relu_test_configs)
"""Tests the correctness of the quantized::relu6 op."""
@override_qengines
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_qrelu6(self, X):
relu6_test_configs = [
{
'quantized_fn': [
torch.ops.quantized.relu6,
torch.nn.quantized.ReLU6(inplace=False),
torch.nn.quantized.ReLU6(inplace=True)
],
'reference_fn': torch.nn.functional.relu6
}
]
self._test_activation_function(X, 'relu6', relu6_test_configs)
"""Tests the correctness of the quantized::sigmoid op."""
@override_qengines
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_qsigmoid(self, X):
sigmoid_test_configs = [
{
'quantized_fn': [
torch.sigmoid
],
'reference_fn': torch.sigmoid,
'output_range': (0.0, 1.0),
'change_zero_point': True
}
]
self._test_activation_function(X, 'sigmoid', sigmoid_test_configs)
"""Tests the correctness of the quantized::hardsigmoid op."""
@override_qengines
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_qhardsigmoid(self, X):
hardsigmoid_test_configs = [
{
'quantized_fn': [
torch.nn.quantized.functional.hardsigmoid
],
'reference_fn': torch.nn.functional.hardsigmoid,
'output_range': (0.0, 1.0),
'change_zero_point': True
}
]
self._test_activation_function(X, 'hardsigmoid', hardsigmoid_test_configs)
"""Tests the correctness of the quantized::relu op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()),
alpha=st.floats(0.0, 1.0, allow_nan=False, allow_infinity=False))
def test_qrelu_leaky(self, X, alpha):
X, (scale, zero_point, torch_type) = X
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
dqX = qX.dequantize()
# torch.nn.functional
op = torch.nn.functional.leaky_relu
dqY = op(dqX, negative_slope=alpha)
qY = torch.quantize_per_tensor(dqY, scale=scale, zero_point=zero_point,
dtype=torch_type)
qY_hat = op(qX, negative_slope=alpha)
self.assertEqual(qY.dequantize(), qY_hat.dequantize(),
msg="F.leaky_relu failed ({} vs {})".format(qY, qY_hat))
"""Tests the correctness of the quantized::elu op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams()),
alpha=st.floats(0.01, 10.0, allow_nan=False, allow_infinity=False))
def test_qelu(self, X, alpha):
X, (scale, zero_point, torch_type) = X
output_scale = 0.5
output_zero_point = 1
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
# calculate ELU(dqX) and quantize
dqX = qX.dequantize()
dqY_hat = dqX.clone()
dqY_hat = torch.nn.functional.elu(dqX, alpha)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=output_scale, zero_point=output_zero_point,
dtype=torch_type)
qY = torch.nn.quantized.functional.elu(qX, output_scale, output_zero_point, alpha=alpha)
self.assertEqual(qY, qY_hat,
msg="F.elu failed ({} vs {})".format(qY, qY_hat))
"""Tests the correctness of the quantized::celu op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
elements=hu.floats(-1e2, 1e2, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(scale_max=9.999999747378752e-06)),
alpha=st.floats(0.01, 100.0, allow_nan=False, allow_infinity=False))
def test_qcelu(self, X, alpha):
X, (scale, zero_point, torch_type) = X
output_scale = 0.5
output_zero_point = 1
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
# calculate CELU(dqX) and quantize
dqX = qX.dequantize()
dqY_hat = torch.nn.functional.celu(dqX, alpha)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=output_scale, zero_point=output_zero_point,
dtype=torch_type)
# test regular
qY = torch.ops.quantized.celu(qX, output_scale, output_zero_point, alpha=alpha)
self.assertEqual(qY, qY_hat,
msg="F.celu failed ({} vs {})".format(qY, qY_hat))
"""Tests the correctness of the quantized::qlayer_norm op."""
@skipIfNoFBGEMM
def test_qlayer_norm(self):
# hypothesis is flaky for this test, create test cases manually
side_lens = (1, 8, 11)
torch_types = (torch.qint8, torch.quint8)
y_scales = (0.1, 4.23)
y_zero_points = (0, 1)
channels_last_list = (True, False)
affine_list = (True, False)
combined = [side_lens, torch_types, y_scales, y_zero_points,
channels_last_list, affine_list]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
side_len, torch_type, Y_scale, Y_zero_point, channels_last, \
affine = test_case
shapes = [side_len] * 4
# In the FP kernel, mean and variance are calculated in floating point.
# In the quantized kernel, they are calculated in integer arithmetic.
# Because of this, the numerics do not always match exactly which is
# expected and acceptable. We do two things to allow this failure
# in this test:
# 1. do not use Hypothesis to generate the input tensor. Hypothesis
# favors homogeneous inputs in its search strategies which isn't
# representative of the inputs we care about, and tends to maximize
# this particular numerics difference.
# 2. allow a small % of off by Y_scale errors. Even when the
# variance of the input is high, there can be off by one errors
# in the result if the input value happens to fall exactly on
# the bin boundary of the output scale.
#
# If we want the numerics to match we could switch to calculating
# mean+var in floating point in the future, at the cost of speed.
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
qX = torch.quantize_per_tensor(X, scale=X_scale,
zero_point=X_zero_point,
dtype=torch_type)
if channels_last:
qX = qX.contiguous(memory_format=torch.channels_last)
dqX = qX.dequantize()
# Enforce non-homogeneous inputs
enough_unique_vals_in_each_layer = sum(
1 if (
dqX[i].shape[0] < 5 or
float(torch.unique(dqX[i]).shape[0]) / dqX[i].shape[0] > 0.01
) else 0
for i in range(dqX.shape[0])
) == dqX.shape[0]
assume(enough_unique_vals_in_each_layer)
# Initialize the weights non-randomly for reproducibility, to avoid
# flaky tests
if affine:
weight = torch.ones(*qX.size()[1:], dtype=torch.float) * 0.5
bias = torch.ones(*qX.size()[1:], dtype=torch.float) * 1
else:
weight = None
bias = None
epsilon = 1e-5
qY = torch.ops.quantized.layer_norm(
qX, qX.size()[1:], weight=weight, bias=bias, eps=epsilon,
output_scale=Y_scale, output_zero_point=Y_zero_point)
Y_hat = F.layer_norm(
dqX, dqX.size()[1:], weight=weight, bias=bias, eps=epsilon)
qY_hat = torch.quantize_per_tensor(
Y_hat, scale=Y_scale, zero_point=Y_zero_point, dtype=torch_type)
# Due to the numerics difference mentioned above between calculating
# the variance in float vs int, the results can still be slightly
# different.
dqY = qY.dequantize()
dqY_hat = qY_hat.dequantize()
diff = dqY - dqY_hat
# off-by-one errors are magnitude of Y_scale
num_diff = torch.sum(diff > Y_scale * 1.0001)
pct_diff = float(num_diff) / (diff.numel() + 1e-5)
num_diff_off_by_one = torch.sum((diff > 0) * (diff <= Y_scale))
pct_diff_off_by_one = float(num_diff_off_by_one) / (diff.numel() + 1e-5)
self.assertTrue(pct_diff < 1e-6)
self.assertTrue(pct_diff_off_by_one < 0.01)
"""Tests the correctness of the quantized::qnnpack_tanh op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_qtanh(self, X):
# Note: QNNPACK is tested separately in TestQNNPackOps
X, (scale, zero_point, torch_type) = X
X = torch.from_numpy(X)
Y = torch.tanh(X)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=torch_type)
# Quantize the reference to account for max error.
# Note that the output scale has +1, because we use scale of 2.0/2^BITS
# in the implementations.
f_min, f_max = -1.0, 1.0
q_min, q_max = torch.iinfo(torch_type).min, torch.iinfo(torch_type).max
output_scale = (f_max - f_min) / (q_max - q_min + 1.0)
output_zero_point = int(round((q_max + q_min) / 2.0))
qY = torch.quantize_per_tensor(Y, scale=output_scale,
zero_point=output_zero_point,
dtype=torch_type)
qY_hat = torch.tanh(qX)
self.assertEqual(qY, qY_hat,
msg="TanH failed: {} vs. {}".format(qY, qY_hat))
"""Tests the correctness of the quantized::threshold op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams()),
threshold=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
value=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False))
def test_qthreshold(self, X, threshold, value):
X, (scale, zero_point, torch_type) = X
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
# calculate threshold(dqX) and quantize
dqX = qX.dequantize()
dqY_hat = dqX.clone()
dqY_hat = torch.nn.functional.threshold(dqY_hat, threshold, value)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
'native': torch.threshold,
'nn.functional': torch.nn.functional.threshold,
'nn.quantized.functional': torch.nn.quantized.functional.threshold
}
for name, op in ops_under_test.items():
qY = op(qX, threshold, value)
self.assertEqual(qY, qY_hat, msg="{} qthreshold failed".format(name))
"""Tests the correctness of the quantized::clamp op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 8, 1, 8, max_numel=10**5),
elements=hu.floats(-1e6, 1e6, allow_nan=False),
qparams=hu.qparams()),
min_val=hu.floats(-1e6, 1e6, allow_nan=False),
max_val=hu.floats(-1e6, 1e6, allow_nan=False))
def test_qclamp(self, X, min_val, max_val):
X, (scale, zero_point, torch_type) = X
assume(min_val <= max_val)
Y = X.copy()
Y[Y < min_val] = min_val
Y[Y > max_val] = max_val
qY = torch.quantize_per_tensor(torch.from_numpy(Y), scale=scale,
zero_point=zero_point, dtype=torch_type)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
'ops.quantized': torch.ops.quantized.clamp,
}
for name, op in ops_under_test.items():
qY_hat = op(qX, min_val, max_val)
self.assertEqual(qY, qY_hat, msg="{} qclamp failed".format(name))
"""Tests the correctness of the quantized::hardtanh op."""
@skipIfNoFBGEMM
@given(X=hu.tensor(shapes=hu.array_shapes(1, 8, 1, 8, max_numel=10**5),
elements=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False),
qparams=hu.qparams()),
min_val=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False),
max_val=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False))
def test_hardtanh(self, X, min_val, max_val):
with override_quantized_engine('fbgemm'):
X, (scale, zero_point, torch_type) = X
assume(min_val <= max_val)
Y = X.copy()
Y[Y < min_val] = min_val
Y[Y > max_val] = max_val
qY = torch.quantize_per_tensor(torch.from_numpy(Y), scale=scale,
zero_point=zero_point, dtype=torch_type)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
'nn.quantized.functional.hardtanh':
torch.nn.quantized.functional.hardtanh,
}
for name, op in ops_under_test.items():
qY_hat = op(qX, min_val, max_val)
self.assertEqual(qY, qY_hat, msg="{} hardtanh failed".format(name))
ops_under_test_inplace = {
'inplace nn.quantized.functional.hardtanh':
torch.nn.quantized.functional.hardtanh,
}
for name, op_ in ops_under_test_inplace.items():
qY_hat = qX.clone()
op_(qY_hat, min_val, max_val, inplace=True)
self.assertEqual(qY, qY_hat, msg="{} hardtanh failed".format(name))
"""Tests the correctness of the quantized::hardswish op."""
@override_qengines
def test_hardswish(self):
max_sides = (3, 5)
side_lens = (1, 7, 8)
torch_types = (torch.quint8, torch.qint8)
y_scales = (0.1, 4.23)
y_zero_points = (0, 1)
combined = [max_sides, side_lens, torch_types, y_scales, y_zero_points]
test_cases = itertools.product(*combined)
for test_case in test_cases:
max_side, side_len, torch_type, Y_scale, Y_zero_point = test_case
if torch.backends.quantized.engine == 'qnnpack' and torch_type != torch.quint8:
continue
shapes = [side_len] * max_side
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 2.0, torch_type)
qX = torch.quantize_per_tensor(X, scale=X_scale, zero_point=X_zero_point,
dtype=torch_type)
dqX = qX.dequantize()
dqY_hat = F.hardswish(dqX)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=Y_scale,
zero_point=Y_zero_point,
dtype=torch_type)
qY = torch.nn.quantized.functional.hardswish(
qX, scale=Y_scale, zero_point=Y_zero_point)
self.assertEqual(
qY, qY_hat,
msg="Hardswish failed: {} vs {}, {}".format(qY, qY_hat, torch.backends.quantized.engine))
"""Tests the correctness of the scalar addition."""
@unittest.skip("Failing on MacOS")
@given(A=hu.tensor(shapes=hu.array_shapes(1, 4, 1, 5),
elements=hu.floats(-1e6, 1e6, allow_nan=False),
qparams=hu.qparams()),
b=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False))
def test_qadd_scalar_relu(self, A, b):
import copy
add_scalar = torch.ops.quantized.add_scalar
add_scalar_relu = torch.ops.quantized.add_scalar_relu
A, (scale, zero_point, dtype) = A
A = A.astype(np.float32)
qA = torch.quantize_per_tensor(torch.from_numpy(A), scale, zero_point, dtype)
C = qA.dequantize() + round(b / scale) * scale
C_relu = copy.deepcopy(C)
C_relu[C_relu < 0] = 0
C_hat = add_scalar(qA, b)
C_ref = torch.quantize_per_tensor(C, C_hat.q_scale(), C_hat.q_zero_point(), dtype)
C_relu_hat = add_scalar_relu(qA, b)
C_relu_ref = torch.quantize_per_tensor(
C_relu, C_relu_hat.q_scale(), C_relu_hat.q_zero_point(), dtype)
self.assertEqual(C_ref.dequantize(), C_hat.dequantize(),
msg="Scalar add results don't match:\
{} vs {}".format(C_ref.dequantize(), C_hat.dequantize()))
self.assertEqual(C_relu_ref.dequantize(), C_relu_hat.dequantize(),
msg="Scalar add relu results don't match:\
{} vs {}".format(C_relu_ref.dequantize(), C_relu_hat.dequantize()))
"""Tests the correctness of the add and add_relu op."""
def test_qadd_relu_same_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
add_relu = torch.ops.quantized.add_relu
add = torch.ops.quantized.add
add_out = torch.ops.quantized.add_out
add_relu_out = torch.ops.quantized.add_relu_out
# NB: This is a strange size so that we exercise both the vectorized
# implementation (64-element chunks at at time) as well as the scalar
# implementation
A = torch.arange(-128, 130, dtype=torch.float)
B = torch.arange(-128, 130, dtype=torch.float)
scale = 2.0
zero_point = 127
qA = torch.quantize_per_tensor(A, scale=scale, zero_point=zero_point,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale, zero_point=zero_point,
dtype=dtype)
# Add ReLU ground truth
C = (qA.dequantize() + qB.dequantize()).numpy()
qC = _quantize(C, scale, zero_point, dtype=np_dtype[dtype])
qC_hat = add(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized addition failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
add_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="Add.out failed")
# Add + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale, zero_point, dtype=np_dtype[dtype])
qCrelu_hat = add_relu(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
add_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="AddReLU.out failed")
"""Tests the correctness of the add and add_relu op."""
def test_qadd_relu_different_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
add_relu = torch.ops.quantized.add_relu
add = torch.ops.quantized.add
add_out = torch.ops.quantized.add_out
add_relu_out = torch.ops.quantized.add_relu_out
# NB: This is a strange size so that we exercise both the vectorized
# implementation (64-element chunks at at time) as well as the scalar
# implementation
A = torch.arange(-128, 130, dtype=torch.float)
B = torch.arange(-128, 130, dtype=torch.float)
scale_A = 3.0
zero_point_A = 7
scale_B = 5.0
zero_point_B = 127
scale_C = 0.5
zero_point_C = 5
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point_A,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point_B,
dtype=dtype)
# Add ground truth
C = (qA.dequantize() + qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C, dtype=np_dtype[dtype])
qC_hat = add(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized addition failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
add_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="Add.out failed")
# Add + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale_C, zero_point_C, dtype=np_dtype[dtype])
qCrelu_hat = add_relu(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
add_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="AddReLU.out failed")
"""Tests the correctness of the mul and mul_relu op."""
def test_qmul_relu_same_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
mul_relu = torch.ops.quantized.mul_relu
mul = torch.ops.quantized.mul
mul_out = torch.ops.quantized.mul_out
mul_relu_out = torch.ops.quantized.mul_relu_out
A = torch.arange(-100, 100, dtype=torch.float)
B = torch.arange(-100, 100, dtype=torch.float)
scale = 2.0
zero_point = 127
qA = torch.quantize_per_tensor(A, scale=scale, zero_point=zero_point,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale, zero_point=zero_point,
dtype=dtype)
# mul ReLU ground truth
C = (qA.dequantize() * qB.dequantize()).numpy()
qC = _quantize(C, scale, zero_point, dtype=np_dtype[dtype])
qC_hat = mul(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized mulition failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
mul_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="mul.out failed")
# mul + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale, zero_point, dtype=np_dtype[dtype])
qCrelu_hat = mul_relu(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized mulition with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
mul_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="mulReLU.out failed")
# Scalar multiplication
for b in B:
C_ref = qA.dequantize().numpy() * b.item()
qC_hat = torch.ops.quantized.mul_scalar(qA, b.item())
self.assertEqual(C_ref, qC_hat.dequantize())
# Scalar multiplication + relu
for b in B:
C_ref = qA.dequantize().numpy() * b.item()
C_ref[C_ref < 0] = 0
qC_hat = torch.ops.quantized.mul_scalar_relu(qA, b.item())
self.assertEqual(C_ref, qC_hat.dequantize())
"""Tests the correctness of the mul and mul_relu op."""
def test_qmul_relu_different_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
mul_relu = torch.ops.quantized.mul_relu
mul = torch.ops.quantized.mul
mul_out = torch.ops.quantized.mul_out
mul_relu_out = torch.ops.quantized.mul_relu_out
A = torch.arange(-100, 100, dtype=torch.float)
B = torch.arange(-100, 100, dtype=torch.float)
scale_A = 3.0
zero_point_A = 7
scale_B = 5.0
zero_point_B = 127
scale_C = 0.5
zero_point_C = 5
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point_A,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point_B,
dtype=dtype)
# mul ground truth
C = (qA.dequantize() * qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C, dtype=np_dtype[dtype])
qC_hat = mul(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized multiplication failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
mul_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="mul.out failed")
# mul + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale_C, zero_point_C, dtype=np_dtype[dtype])
qCrelu_hat = mul_relu(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized multiplication with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
mul_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="mulReLU.out failed")
"""Tests the correctness of the mul and mul_relu op."""
def test_qmul_broadcast(self):
mul_relu = torch.ops.quantized.mul_relu
mul = torch.ops.quantized.mul
mul_out = torch.ops.quantized.mul_out
mul_relu_out = torch.ops.quantized.mul_relu_out
# A = torch.arange(-25, 25, dtype=torch.float)
# B = torch.arange(-25, 25, dtype=torch.float)
A = torch.randn(8, 1, 6, 1)
B = torch.randn(7, 1, 5)
scale_A = 3.0
zero_point_A = 7
scale_B = 5.0
zero_point_B = 127
scale_C = 0.5
zero_point_C = 5
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point_A,
dtype=torch.quint8)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point_B,
dtype=torch.quint8)
# mul ground truth
C = (qA.dequantize() * qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C)
qC_hat = mul(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized multiplication failed.")
"""Tests channel shuffle operation on quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=2, max_side=32, max_numel=10**5),
qparams=hu.qparams(dtypes=[torch.quint8])),
groups=st.integers(2, 6))
def test_channel_shuffle(self, X, groups):
X, (scale, zero_point, torch_type) = X
channels = X.shape[-3]
iH, iW = X.shape[-2:]
assume(channels % groups == 0)
a = torch.from_numpy(X)
a = torch.rand(a.shape)
a_out = torch.nn.functional.channel_shuffle(a, groups)
a_ref = torch.quantize_per_tensor(a_out, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
a_hat = torch.nn.functional.channel_shuffle(qa, groups)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="torch.nn.functional.channel_shuffle results are off")
"""Tests max pool operation on quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
kernel=st.sampled_from((3, 5, 7)),
stride=st.sampled_from((None, 1, 2)),
dilation=st.integers(1, 2),
padding=st.integers(0, 2),
ceil_mode=st.booleans())
def test_max_pool2d(self, X, kernel, stride, dilation, padding, ceil_mode):
X, (scale, zero_point, torch_type) = X
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation, ceil_mode)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
assume(oW > 0)
a = torch.from_numpy(X)
a_pool = torch.nn.functional.max_pool2d(a, kernel_size=kernel,
stride=stride,
padding=padding, dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
"torch": torch.max_pool2d,
"nn.functional": torch.nn.functional.max_pool2d,
"nn.quantized.functional": torch.nn.quantized.functional.max_pool2d
}
for name, op in ops_under_test.items():
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
dilation=dilation, ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="{} results are off".format(name))
# Test the ops.quantized separately, because None is not treated.
a_hat = torch.ops.quantized.max_pool2d(
qa, kernel_size=_pair(kernel),
stride=_pair(kernel if stride is None else stride),
padding=_pair(padding), dilation=_pair(dilation), ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="ops.quantized.max_pool2d results are off")
"""Tests max pool operation on NHWC quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
kernel=st.sampled_from((3, 5, 7)),
stride=st.sampled_from((None, 1, 2)),
dilation=st.integers(1, 2),
padding=st.integers(0, 2),
ceil_mode=st.booleans())
def test_max_pool2d_nhwc(self, X, kernel, stride, dilation, padding, ceil_mode):
X, (scale, zero_point, torch_type) = X
# Ensure we hit the vectorized paths
# 176 = 128 + 32 + 16
# 128 hits the interleaved path
# 32 hits the non-interleaved path
# 16 hits the scalar path
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation, ceil_mode)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
assume(oW > 0)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
a = torch.from_numpy(X_nchw).permute([0, 3, 1, 2])
a_pool = torch.nn.functional.max_pool2d(a, kernel_size=kernel,
stride=stride,
padding=padding, dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(torch.from_numpy(X_nchw), scale=scale, zero_point=zero_point,
dtype=torch_type).permute([0, 3, 1, 2])
self.assertTrue(qa.stride() != sorted(qa.stride()))
ops_under_test = {
"torch": torch.max_pool2d,
"nn.functional": torch.nn.functional.max_pool2d,
"nn.quantized.functional": torch.nn.quantized.functional.max_pool2d
}
for name, op in ops_under_test.items():
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
dilation=dilation, ceil_mode=ceil_mode)
self.assertTrue(a_hat.stride() != sorted(a_hat.stride()))
self.assertEqual(a_ref, a_hat.dequantize(),
msg="{} results are off".format(name))
# Test the ops.quantized separately, because None is not treated.
a_hat = torch.ops.quantized.max_pool2d(
qa, kernel_size=_pair(kernel),
stride=_pair(kernel if stride is None else stride),
padding=_pair(padding), dilation=_pair(dilation), ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="ops.quantized.max_pool2d results are off")
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.quint8)),
kernel=st.sampled_from((3, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool2d(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
"""
X, (scale, zero_point, torch_type) = X
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X = qX.dequantize()
# Run reference on float tensor and then quantize the result for comparison
X_ref = torch.nn.functional.avg_pool2d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool2d,
"nn.quantized.functional": torch.nn.quantized.functional.avg_pool2d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
qX_ref = torch.quantize_per_tensor(X_ref, scale=qX_hat.q_scale(), zero_point=qX_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), qX_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), qX_hat.int_repr()))
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.qint8)),
kernel=st.sampled_from((4, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool2d_nhwc(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: 1) we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
2) we cannot test the qint32, since the float point precision is much lower than int32 for big number,
which will make the test be very flaky.
"""
X, (scale, zero_point, torch_type) = X
H, W = X.shape[-2:]
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw), scale=scale,
zero_point=zero_point, dtype=torch_type).permute([0, 3, 1, 2])
X = qX.dequantize()
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.avg_pool2d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool2d,
"nn.quantized.functional": torch.nn.quantized.functional.avg_pool2d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
qX_ref = torch.quantize_per_tensor(X_ref, scale=X_hat.q_scale(), zero_point=X_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), X_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), X_hat.int_repr()))
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=5, max_dims=5,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.quint8)),
kernel=st.sampled_from((3, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool3d(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
"""
X, (scale, zero_point, torch_type) = X
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iD, iH, iW = X.shape[-3:]
oD = pool_output_shape(iD, kernel, padding, stride, dilation=1)
assume(oD > 0)
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X = qX.dequantize()
# Run reference on float tensor and then quantize the result for comparison
X_ref = torch.nn.functional.avg_pool3d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool3d,
"nn.quantized.functional": torch.nn.quantized.functional.avg_pool3d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
qX_ref = torch.quantize_per_tensor(X_ref, scale=qX_hat.q_scale(), zero_point=qX_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), qX_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), qX_hat.int_repr()))
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=5, max_dims=5,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.qint8)),
kernel=st.sampled_from((4, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool3d_nhwc(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: 1) we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
2) we cannot test the qint32, since the float point precision is much lower than int32 for big number,
which will make the test be very flaky.
"""
X, (scale, zero_point, torch_type) = X
D, H, W = X.shape[-3:]
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iD, iH, iW = X.shape[-3:]
oD = pool_output_shape(iD, kernel, padding, stride, dilation=1)
assume(oD > 0)
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 4, 1]))
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw), scale=scale,
zero_point=zero_point, dtype=torch_type).permute([0, 4, 1, 2, 3])
X = qX.dequantize()
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.avg_pool3d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool3d,
"nn.quantized.functional": torch.nn.quantized.functional.avg_pool3d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
qX_ref = torch.quantize_per_tensor(X_ref, scale=X_hat.q_scale(), zero_point=X_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), X_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), X_hat.int_repr()))
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
"""Tests adaptive average pool operation on NHWC quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams(dtypes=torch.qint8)),
output_size_h=st.integers(1, 10),
output_size_w=st.integers(1, 10))
def test_adaptive_avg_pool2d_nhwc(self, X, output_size_h, output_size_w):
X, (scale, zero_point, torch_type) = X
H, W = X.shape[-2:]
assume(output_size_h <= H)
assume(output_size_w <= W)
if output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_h, output_size_w)
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
if X.ndim == 4:
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
X = torch.from_numpy(X_nchw).permute([0, 3, 1, 2])
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([0, 3, 1, 2])
else: # ndim == 3
X_nchw = np.ascontiguousarray(X.transpose([1, 2, 0]))
X = torch.from_numpy(X_nchw).permute([2, 0, 1])
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([2, 0, 1])
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.adaptive_avg_pool2d(qX.int_repr().to(torch.double), output_size).round()
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.adaptive_avg_pool2d,
"nn.quantized.functional":
torch.nn.quantized.functional.adaptive_avg_pool2d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, output_size=output_size)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(X_ref, X_hat.int_repr(), atol=1.0, rtol=0,
msg=error_message.format(name, X_ref, X_hat.int_repr()))
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=5,
min_side=1, max_side=10),
qparams=hu.qparams(dtypes=torch.quint8)),
output_size_d=st.integers(1, 10),
output_size_h=st.integers(1, 10),
output_size_w=st.integers(1, 10))
def test_adaptive_avg_pool(self, X, output_size_d, output_size_h,
output_size_w):
X, (scale, zero_point, torch_type) = X
ndim = X.ndim
dim_to_check = []
if ndim <= 4:
dim_to_check.append(2)
if ndim >= 4:
dim_to_check.append(3)
D, H, W = X.shape[-3:]
assume(output_size_d <= D)
assume(output_size_h <= H)
assume(output_size_w <= W)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
for dim in dim_to_check:
if dim == 2:
if output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_h, output_size_w)
elif dim == 3:
if output_size_d == output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_d, output_size_h, output_size_w)
# Run reference on int_repr + round to avoid double rounding error.
ref_op = getattr(torch.nn.functional, 'adaptive_avg_pool{}d'.format(dim))
X_ref = ref_op(qX.int_repr().to(torch.float), output_size).round()
ops_under_test = {
"nn.functional":
getattr(torch.nn.functional, 'adaptive_avg_pool{}d'.format(dim)),
"nn.quantized.functional":
getattr(torch.nn.quantized.functional, 'adaptive_avg_pool{}d'.format(dim))
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, output_size=output_size)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
X_ref, qX_hat.int_repr(), atol=1.0,
rtol=0, msg=error_message.format(name, X_ref, qX_hat))
self.assertEqual(
scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale,
qX_hat.q_scale()))
self.assertEqual(
zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
"""Tests adaptive average pool operation on NHWC quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=5,
min_side=1, max_side=10),
qparams=hu.qparams(dtypes=torch.qint8)),
output_size_d=st.integers(1, 10),
output_size_h=st.integers(1, 10),
output_size_w=st.integers(1, 10))
def test_adaptive_avg_pool3d_ndhwc(self, X, output_size_d, output_size_h,
output_size_w):
X, (scale, zero_point, torch_type) = X
D, H, W = X.shape[-3:]
assume(output_size_d <= D)
assume(output_size_h <= H)
assume(output_size_w <= W)
if output_size_d == output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_d, output_size_h, output_size_w)
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
if X.ndim == 5:
X_ncdhw = np.ascontiguousarray(X.transpose([0, 2, 3, 4, 1]))
X = torch.from_numpy(X_ncdhw).permute([0, 4, 1, 2, 3])
qX = torch.quantize_per_tensor(torch.from_numpy(X_ncdhw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([0, 4, 1, 2, 3])
else: # ndim == 4
X_ncdhw = np.ascontiguousarray(X.transpose([1, 2, 3, 0]))
X = torch.from_numpy(X_ncdhw).permute([3, 0, 1, 2])
qX = torch.quantize_per_tensor(torch.from_numpy(X_ncdhw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([3, 0, 1, 2])
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.adaptive_avg_pool3d(
qX.int_repr().to(torch.double), output_size).round()
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.adaptive_avg_pool3d,
"nn.quantized.functional":
torch.nn.quantized.functional.adaptive_avg_pool3d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, output_size=output_size)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(X_ref, X_hat.int_repr(), atol=1.0, rtol=0,
msg=error_message.format(name, X_ref, X_hat.int_repr()))
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
k=st.integers(1, 10),
dim=st.integers(1, 4),
largest=st.booleans(),
sorted=st.booleans())
def test_qtopk(self, X, k, dim, largest, sorted):
X, (scale, zero_point, torch_type) = X
qX = torch.quantize_per_tensor(torch.from_numpy(X), scale, zero_point, torch_type)
assume(dim < X.ndim)
assume(k < X.shape[dim])
unquantized_out = torch.topk(qX.dequantize(), k, dim=dim, largest=largest, sorted=sorted)
values = torch.quantize_per_tensor(torch.from_numpy(X), scale, zero_point, torch_type)
indices = torch.tensor(torch.from_numpy(X)).long()
quantized_out = torch.topk(qX, k, dim=dim, largest=largest, sorted=sorted)
assert(len(unquantized_out) == len(quantized_out))
torch.testing.assert_allclose(quantized_out[0].dequantize(), unquantized_out[0])
torch.testing.assert_allclose(quantized_out[1], unquantized_out[1])
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
k=st.integers(1, 10),
dim=st.integers(1, 4),
largest=st.booleans(),
sorted=st.booleans())
def test_qtopk_nhwc(self, X, k, dim, largest, sorted):
# X is NHWC, we permute to view as NCHW but keep NHWC in memory
X, (scale, zero_point, torch_type) = X
qX = torch.quantize_per_tensor(torch.from_numpy(X), scale, zero_point, torch_type).permute([0, 3, 1, 2])
X = np.transpose(X, [0, 3, 1, 2])
assume(dim < X.ndim)
assume(k < X.shape[dim])
unquantized_out = torch.topk(qX.dequantize(), k, dim=dim, largest=largest, sorted=sorted)
values = torch.quantize_per_tensor(torch.from_numpy(X), scale, zero_point, torch_type)
indices = torch.tensor(torch.from_numpy(X)).long()
quantized_out = torch.topk(qX, k, dim=dim, largest=largest, sorted=sorted)
assert(len(unquantized_out) == len(quantized_out))
torch.testing.assert_allclose(quantized_out[0].dequantize(), unquantized_out[0])
torch.testing.assert_allclose(quantized_out[1], unquantized_out[1])
"""Tests quantize concatenation (both fused and not)."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
num=st.integers(1, 4),
dim=st.integers(1, 4),
relu=st.booleans())
def test_cat(self, X, num, dim, relu):
tensors_q = []
tensors_ref = []
X, (scale, zero_point, torch_type) = X
assume(dim < X.ndim)
X = torch.from_numpy(X)
new_shape = np.array(X.shape)
new_shape[dim] = 0
for idx in range(num):
tensors_q.append(torch.quantize_per_tensor(X, scale, zero_point,
torch_type))
tensors_ref.append(X)
new_shape[dim] += tensors_ref[-1].shape[dim]
cat_ref = torch.cat(tensors_ref, dim=dim)
cat_ref = torch.quantize_per_tensor(cat_ref, scale, zero_point, torch_type)
cat_ref = cat_ref.dequantize()
if relu:
cat_ref = F.relu(cat_ref)
q_cat_op = torch.ops.quantized.cat_relu
q_cat_out_op = torch.ops.quantized.cat_relu_out
else:
q_cat_op = torch.ops.quantized.cat
q_cat_out_op = torch.ops.quantized.cat_out
cat_q = q_cat_op(tensors_q, dim=dim, scale=scale,
zero_point=zero_point)
cat_q = cat_q.dequantize()
np.testing.assert_equal(cat_ref.numpy(), cat_q.numpy())
cat_q_out = torch._empty_affine_quantized(
list(new_shape), scale=scale,
zero_point=zero_point, dtype=torch_type)
q_cat_out_op(tensors_q, dim=dim, out=cat_q_out)
cat_q_out = cat_q_out.dequantize()
np.testing.assert_equal(cat_ref.numpy(), cat_q_out.numpy())
# Test the cat on per-channel quantized tensor.
ch_axis = 1
scales = torch.from_numpy(np.array([1.0] * X.shape[ch_axis]))
scales = scales.to(torch.float64)
zero_points = torch.from_numpy(np.array([0] * X.shape[ch_axis]))
zero_points = zero_points.to(torch.long)
tensors_q[0] = torch.quantize_per_channel(
X, scales, zero_points, axis=ch_axis, dtype=torch_type)
with self.assertRaisesRegex(RuntimeError, "supported.*cat"):
cat_q = q_cat_op(tensors_q, dim=ch_axis, scale=scale,
zero_point=zero_point)
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=5, max_side=10),
qparams=hu.qparams()),
size=st.sampled_from((1, 3, 5, 10)),
mode=st.sampled_from(("bilinear", "nearest")),
scale_factor=st.sampled_from((None, 1.5, 2.0)),
align_corners=st.sampled_from((True, False)),
nhwc_layout=st.sampled_from((True, False)))
def test_interpolate(self, X, size, mode, scale_factor, align_corners, nhwc_layout):
"""
This test cover upsample_nearest2d and upsample_bilinear2d
"""
X, (scale, zero_point, torch_type) = X
H, W = X.shape[-2:]
if scale_factor is not None:
size = None
if mode == "nearest":
align_corners = None
if nhwc_layout:
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
X = torch.from_numpy(X_nchw).permute([0, 3, 1, 2])
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type).permute([0, 3, 1, 2])
else:
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X_ref = torch.nn.functional.interpolate(
qX.int_repr().to(torch.float), size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
ops_under_test = {
"nn.functional": torch.nn.functional.interpolate,
"nn.quantized.functional": torch.nn.quantized.functional.interpolate
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0,
msg="{} results are off: qX_hat={} X_ref={}"
.format(name, qX_hat.int_repr(), X_ref))
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=5, max_dims=5,
min_side=5, max_side=10),
qparams=hu.qparams()),
size=st.sampled_from((1, 3, 5, 5, 10)),
scale_factor=st.sampled_from((None, 1.5, 2.0)),
align_corners=st.sampled_from((True, False)),
nhwc_layout=st.sampled_from((True, False)))
def test_interpolate3d(self, X, size, scale_factor, align_corners, nhwc_layout):
"""
This test cover upsample_nearest2d and upsample_bilinear2d
"""
X, (scale, zero_point, torch_type) = X
D, H, W = X.shape[-3:]
mode = "nearest"
if scale_factor is not None:
size = None
if mode == "nearest":
align_corners = None
if nhwc_layout:
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 4, 1]))
X = torch.from_numpy(X_nchw).permute([0, 4, 1, 2, 3])
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type).permute([0, 4, 1, 2, 3])
else:
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X_ref = torch.nn.functional.interpolate(
qX.int_repr().to(torch.float), size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
ops_under_test = {
"nn.functional": torch.nn.functional.interpolate,
"nn.quantized.functional": torch.nn.quantized.functional.interpolate
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0,
msg="{} results are off: qX_hat={}, X_ref={}"
.format(name, qX_hat.int_repr(), X_ref))
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
"""Tests quantize concatenation (both fused and not)."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
relu=st.booleans())
def test_cat_nhwc(self, X, relu):
# X is NHWC
X, (scale, zero_point, torch_type) = X
# Tile out X so # channels is > 64
X = np.repeat(X, 70 / X.shape[3], 3)
X = torch.from_numpy(np.ascontiguousarray(X))
Y = X.clone()
Y = torch.from_numpy(np.ascontiguousarray(Y))
# Here, we quantize and get quantized tensors in NHWC for both dims and strides. The
# permute switches it so that the tensor looks like NCHW but it laid out in memory as
# NHWC.
qX = torch.quantize_per_tensor(X, scale, zero_point, torch_type).permute([0, 3, 1, 2])
qY = torch.quantize_per_tensor(Y, scale, zero_point, torch_type).permute([0, 3, 1, 2])
ref = torch.cat([qX.dequantize(), qY.dequantize()], dim=1)
if relu:
ref[ref < 0] = 0.0
ref = torch.quantize_per_tensor(ref, scale=scale, zero_point=zero_point, dtype=torch_type)
if relu:
out = torch.ops.quantized.cat_relu(
[qX, qY], dim=1, scale=scale, zero_point=zero_point)
else:
out = torch.ops.quantized.cat([qX, qY], dim=1, scale=scale, zero_point=zero_point)
torch.testing.assert_allclose(out.dequantize(), ref.dequantize())
self.assertNotEqual(out.stride(), sorted(out.stride()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=3,
min_side=1, max_side=2),
qparams=hu.qparams()),
dim=st.integers(1, 2))
def test_mean(self, X, dim):
X, (scale, zero_point, torch_type) = X
qX = torch.quantize_per_tensor(torch.tensor(X).float(), scale, zero_point, torch_type)
Y = torch.mean(qX.dequantize(), dim)
Y = torch.quantize_per_tensor(Y, scale, zero_point, torch_type).dequantize()
qY = torch.mean(qX, dim)
self.assertEqual(Y, qY.dequantize())
"""Tests the correctness of the quantized equal op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()),
X2=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()),
X_per_channel=st.booleans(),
X2_per_channel=st.booleans())
def test_equal(self, X, X2, X_per_channel, X2_per_channel):
X, X_params = X
(scale, zero_point, torch_type) = X_params
X2, X2_params = X2
(scale2, zero_point2, torch_type2) = X2_params
X = torch.from_numpy(X)
if X_per_channel:
X_scheme = 'per_channel'
channels = X.shape[-1]
qX = torch.quantize_per_channel(
X,
scales=torch.tensor([scale] * channels),
zero_points=torch.tensor([zero_point] * channels),
dtype=torch_type,
axis=X.ndim - 1)
else:
X_scheme = 'per_tensor'
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X2 = torch.from_numpy(X2)
if X2_per_channel:
X2_scheme = 'per_channel'
channels = X2.shape[-1]
qX2 = torch.quantize_per_channel(
X2,
scales=torch.tensor([scale2] * channels),
zero_points=torch.tensor([zero_point2] * channels),
dtype=torch_type2,
axis=X2.ndim - 1)
else:
X2_scheme = 'per_tensor'
qX2 = torch.quantize_per_tensor(X2, scale=scale2, zero_point=zero_point2,
dtype=torch_type2)
def equal_ref(qX, qX2):
if qX.qscheme() != qX2.qscheme():
return False
if qX.shape != qX2.shape:
return False
if qX.dtype != qX2.dtype:
return False
if qX.qscheme() == torch.per_tensor_affine:
if qX.q_scale() != qX2.q_scale():
return False
if qX.q_zero_point() != qX2.q_zero_point():
return False
elif qX.qscheme() == torch.per_channel_affine:
if (qX.q_per_channel_scales() !=
qX2.q_per_channel_scales()).any():
return False
if (qX.q_per_channel_zero_points() !=
qX2.q_per_channel_zero_points()).any():
return False
else:
raise NotImplementedError("Don't know what to do with",
qX.qscheme())
if (qX.int_repr().to(float) != qX2.int_repr().to(float)).any():
return False
return True
self.assertEqual(qX.equal(qX), equal_ref(qX, qX))
self.assertEqual(qX.equal(qX2), equal_ref(qX, qX2))
@skipIfNoFBGEMM
def test_group_norm(self):
# hypothesis is flaky for this test, create test cases manually
batches_list = (1, 7)
num_groups_list = (1, 2)
channels_per_groups = (1, 2)
elements_per_channels = (8, 17)
torch_types = (torch.qint8, torch.quint8)
y_scales = (0.1, 4.23)
y_zero_points = (0, 1)
channels_last_list = [True, False]
affine_list = [True, False]
combined = [batches_list, num_groups_list, channels_per_groups, elements_per_channels,
torch_types, y_scales, y_zero_points, channels_last_list, affine_list]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
batches, num_groups, channels_per_group, elements_per_channel, \
torch_type, Y_scale, Y_zero_point, channels_last, \
affine = test_case
num_channels = num_groups * channels_per_group
# minimum rank for for channels_last
shapes = (batches, num_channels, elements_per_channel, 1)
# In the FP kernel, sums and sums of squares are calculated in floating point.
# In the int8 and uint8 versions of the quantized kernel, they are
# calculated in integer arithmetic (which is exact).
# Because of this, the numerics do not always match exactly which is
# expected and acceptable. We do the following to allow this failure
# in this test:
# 1. do not use Hypothesis to generate the input tensor. Hypothesis
# favors homogeneous inputs in its search strategies which isn't
# representative of the inputs we care about, and tends to maximize
# this particular numerics difference.
# 2. allow a small % of off by Y_scale errors. Even when the
# variance of the input is high, there can be off by one errors
# in the result if the input value happens to fall exactly on
# the bin boundary of the output scale.
#
# If we want the numerics to match we could switch to calculating
# mean+var in floating point in the future, at the cost of speed.
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
# Initialize the weights non-randomly for reproducibility
if affine:
weight = torch.ones(num_channels).float() * 0.5
bias = torch.ones(num_channels).float()
for i in range(num_channels):
weight[i] *= i
bias[i] *= i
else:
weight = None
bias = None
eps = 0.001
qX = torch.quantize_per_tensor(X, X_scale, X_zero_point, torch_type)
if channels_last:
qX = qX.contiguous(memory_format=torch.channels_last)
dqX = qX.dequantize()
# Enforce non-homogeneous inputs
for batch_idx in range(batches):
for group_idx in range(num_groups):
ch_start = group_idx * channels_per_group
ch_end = ch_start + channels_per_group
group_vals = dqX[batch_idx][ch_start:ch_end]
assume(
float(torch.unique(group_vals).shape[0]) / group_vals.numel() > 0.01
or group_vals.numel() < 5)
qY = torch.ops.quantized.group_norm(qX, num_groups, weight, bias, eps, Y_scale, Y_zero_point)
dqY_hat = F.group_norm(dqX, num_groups=num_groups, weight=weight, bias=bias, eps=eps)
qY_hat = torch.quantize_per_tensor(dqY_hat, Y_scale, Y_zero_point, torch_type)
# Due to the numerics difference mentioned above between calculating
# the variance in float vs int, the results can still be slightly
# different.
dqY = qY.dequantize()
dqY_hat = qY_hat.dequantize()
diff = dqY - dqY_hat
# off-by-one errors are magnitude of Y_scale
num_diff = torch.sum(diff > Y_scale * 1.0001)
pct_diff = float(num_diff) / (diff.numel() + 1e-5)
num_diff_off_by_one = torch.sum((diff > 0) * (diff <= Y_scale))
pct_diff_off_by_one = float(num_diff_off_by_one) / (diff.numel() + 1e-5)
self.assertTrue(pct_diff < 1e-6)
self.assertTrue(pct_diff_off_by_one < 0.01)
@skipIfNoFBGEMM
def test_instance_norm(self):
max_sides = (4, 5)
side_lens = (2, 8, 11)
torch_types = (torch.qint8, torch.quint8)
y_scales = (0.1, 4.23)
y_zero_points = (0, 1)
channels_last_list = (True, False)
affine_list = (True, False)
combined = [side_lens, torch_types, y_scales, y_zero_points, channels_last_list, affine_list]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
side_len, torch_type, Y_scale, Y_zero_point, channels_last, affine = test_case
shapes = [side_len] * 4
# In the FP kernel, sums and sums of squares are calculated in floating point.
# In the int8 and uint8 versions of the quantized kernel, they are
# calculated in integer arithmetic (which is exact).
# Because of this, the numerics do not always match exactly which is
# expected and acceptable. We do the following to allow this failure
# in this test:
# 1. do not use Hypothesis to generate the input tensor. Hypothesis
# favors homogeneous inputs in its search strategies which isn't
# representative of the inputs we care about, and tends to maximize
# this particular numerics difference.
# 2. allow a small % of off by Y_scale errors. Even when the
# variance of the input is high, there can be off by one errors
# in the result if the input value happens to fall exactly on
# the bin boundary of the output scale.
#
# If we want the numerics to match we could switch to calculating
# mean+var in floating point in the future, at the cost of speed.
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
num_channels = shapes[1]
if affine:
weight = torch.rand(num_channels).float() * 0.5
bias = torch.rand(num_channels).float()
for i in range(num_channels):
weight[i] *= i
bias[i] *= i
else:
weight = None
bias = None
eps = 0.001
qX = torch.quantize_per_tensor(X, X_scale, X_zero_point, torch_type)
if channels_last:
qX = qX.contiguous(memory_format=torch.channels_last)
dqX = qX.dequantize()
# Enforce non-homogeneous inputs
batches = shapes[0]
for batch_idx in range(batches):
for ch_idx in range(num_channels):
ch_vals = dqX[batch_idx][ch_idx]
assume(
float(torch.unique(ch_vals).shape[0]) / ch_vals.numel() > 0.01
or group_vals.numel() < 5)
qY = torch.ops.quantized.instance_norm(qX, weight, bias, eps, Y_scale, Y_zero_point)
dqY_hat = F.instance_norm(dqX, weight=weight, bias=bias, eps=eps)
qY_hat = torch.quantize_per_tensor(dqY_hat, Y_scale, Y_zero_point, torch_type)
# Due to the numerics difference mentioned above between calculating
# the variance in float vs int, the results can still be slightly
# different.
dqY = qY.dequantize()
dqY_hat = qY_hat.dequantize()
diff = dqY - dqY_hat
# off-by-one errors are magnitude of Y_scale
num_diff = torch.sum(diff > Y_scale * 1.0001)
pct_diff = float(num_diff) / (diff.numel() + 1e-5)
num_diff_off_by_one = torch.sum((diff > 0) * (diff <= Y_scale))
pct_diff_off_by_one = float(num_diff_off_by_one) / (diff.numel() + 1e-5)
self.assertTrue(pct_diff < 1e-6)
self.assertTrue(pct_diff_off_by_one < 0.01)
@skipIfNoFBGEMM
def test_batch_norm_relu(self):
# hypothesis too slow for this test, create test cases manually
max_sides = (3, 4, 5)
side_lens = (1, 8, 11)
torch_types = (torch.qint8, torch.quint8)
combined = [max_sides, side_lens, torch_types]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
max_side, side_len, torch_type = test_case
Y_zero_point = 1
Y_scale = 0.5
shapes = [side_len] * max_side
X, scale_x, zero_point_x = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
dtype_x = torch_type
c = X.shape[1]
mean = torch.rand(c).float()
var = torch.rand(c).float()
weight = torch.rand(c).float()
bias = torch.rand(c).float()
eps = 0.001
qx = torch.quantize_per_tensor(X, scale_x, zero_point_x, dtype_x)
if len(X.shape) == 3:
qy = torch.ops.quantized.batch_norm1d_relu(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
elif len(X.shape) == 4:
qy = torch.ops.quantized.batch_norm2d_relu(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
else:
qy = torch.ops.quantized.batch_norm3d_relu(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
float_ref = F.batch_norm(qx.dequantize(), weight=weight, bias=bias,
running_mean=mean, running_var=var,
training=False, momentum=0, eps=eps).numpy()
float_ref_relu = float_ref.copy()
float_ref_relu[float_ref < 0] = 0
quantize_ref = torch.quantize_per_tensor(
torch.from_numpy(float_ref_relu), Y_scale, Y_zero_point, dtype_x)
self.assertEqual(
qy.int_repr().numpy(),
quantize_ref.int_repr().numpy(),
msg="{} vs {}".format(qy, quantize_ref))
@skipIfNoFBGEMM
def test_batch_norm(self):
# hypothesis too slow for this test, create test cases manually
max_sides = (3, 4, 5)
side_lens = (1, 8, 11)
torch_types = (torch.qint8, torch.quint8)
combined = [max_sides, side_lens, torch_types]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
max_side, side_len, torch_type = test_case
Y_zero_point = 1
Y_scale = 0.5
shapes = [side_len] * max_side
X, scale_x, zero_point_x = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
dtype_x = torch_type
c = X.shape[1]
mean = torch.rand(c).float()
var = torch.rand(c).float()
weight = torch.rand(c).float()
bias = torch.rand(c).float()
eps = 0.001
qx = torch.quantize_per_tensor(X, scale_x, zero_point_x, dtype_x)
if len(X.shape) == 3:
qy = torch.ops.quantized.batch_norm1d(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
if len(X.shape) == 4:
qy = torch.ops.quantized.batch_norm2d(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
if len(X.shape) == 5:
qy = torch.ops.quantized.batch_norm3d(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
float_ref = F.batch_norm(qx.dequantize(), weight=weight, bias=bias,
running_mean=mean, running_var=var, training=False,
momentum=0, eps=eps)
quantize_ref = torch.quantize_per_tensor(float_ref, Y_scale, Y_zero_point, dtype_x)
self.assertEqual(
qy.int_repr().numpy(), quantize_ref.int_repr().numpy(),
msg="{} vs {}".format(qy, quantize_ref))
@override_qengines
def test_empty_batch(self):
scale = 1.0
zero_point = 0
X = torch.ones((0, 2, 4, 4), dtype=torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
# relu
qY = torch.nn.functional.relu(qX)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized relu with batch size 0 failed.")
# tanh
qY = torch.tanh(qX)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized tanh with batch size 0 failed.")
# sigmoid
qY = torch.sigmoid(qX)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized sigmoid with batch size 0 failed.")
# interpolate
op = torch.nn.quantized.functional.interpolate
for mode in ["nearest", "bilinear"]:
qY = op(qX, scale_factor=2, mode=mode)
np.testing.assert_equal(qY.size(), (0, 2, 8, 8),
"Quantized interpolate with batch size 0 failed.")
# avg_pool
kernel = (2, 2)
stride = (1, 1)
padding = (0, 0)
op = torch.nn.quantized.functional.avg_pool2d
qY = op(qX, kernel, stride, padding)
np.testing.assert_equal(qY.size(), (0, 2, 3, 3),
"Quantized avg_pool2d with batch size 0 failed.")
# adaptive_avg_pool
op = torch.nn.quantized.functional.adaptive_avg_pool2d
qY = op(qX, (3, 3))
np.testing.assert_equal(qY.size(), (0, 2, 3, 3),
"Quantized adaptive_avg_pool2d with batch size 0 failed.")
# max_pool
dilation = (1, 1)
qY = torch.ops.quantized.max_pool2d(qX, kernel, stride, padding, dilation, ceil_mode=False)
oH = pool_output_shape(4, 2, 0, 1, 1)
oW = pool_output_shape(4, 2, 0, 1, 1)
np.testing.assert_equal(qY.size(), (0, 2, oH, oW),
"Quantized maxpool2d with batch size 0 failed.")
# hardtanh
qY = torch.nn.quantized.functional.hardtanh(qX, -1, 6)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized hardtanh with batch size 0 failed.")
# mul
qY = torch.ops.quantized.mul(qX, qX, 1.0, 0)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized mul with batch size 0 failed.")
# add
qY = torch.ops.quantized.add(qX, qX, 1.0, 0)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized addition with batch size 0 failed.")
# conv
w = torch.randn((2, 2, 2, 2), dtype=torch.float)
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.qint8)
bias_float = torch.ones(2, dtype=torch.float)
strides = [1, 1]
pads = [0, 0]
dilations = [1, 1]
w_packed = torch.ops.quantized.conv2d_prepack(qw, bias_float, strides, pads, dilations, 1)
result = torch.ops.quantized.conv2d(qX, w_packed, 1.0, 0)
self.assertEqual(result.shape, (0, 2, 3, 3))
# linear
X = torch.ones((0, 2), dtype=torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
w = torch.randn((2, 2), dtype=torch.float)
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.qint8)
w_packed = torch.ops.quantized.linear_prepack(qw, bias_float)
result = torch.ops.quantized.linear(qX, w_packed, 1.0, 0)
self.assertEqual(result.shape, (0, 2))
# dynamic linear
result = torch.ops.quantized.linear_dynamic(X, w_packed)
self.assertEqual(result.shape, (0, 2))
class TestDynamicQuantizedLinear(TestCase):
"""Tests the correctness of the dynamic quantized linear and linear_relu op."""
@override_qengines
@given(
batch_size=st.integers(1, 4),
input_channels=st.integers(16, 32),
output_channels=st.integers(4, 8),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_multi_dim_input=st.booleans(),
use_channelwise=st.booleans(),
reduce_range=st.booleans())
def test_qlinear(self, batch_size, input_channels, output_channels,
use_bias, use_relu, use_multi_dim_input, use_channelwise, reduce_range):
if torch.backends.quantized.engine == 'qnnpack':
use_relu = False
reduce_range = False
qlinear_prepack = torch.ops.quantized.linear_prepack
if use_relu:
qlinear_dynamic = torch.ops.quantized.linear_relu_dynamic
else:
qlinear_dynamic = torch.ops.quantized.linear_dynamic
if use_multi_dim_input:
batch_size *= 3 # Test the multi-dim input tensor
X_scale = 1.0
X_zp = 0
X_value_min = 0
X_value_max = 255
if reduce_range:
X_value_max = 127
X_q0 = np.round(np.random.rand(batch_size, input_channels) *
(X_value_max - X_value_min) + X_value_min).astype(np.uint8)
X_q0[0, 0] = X_value_min
X_q0[0, 1] = X_value_max
# W_scale = 1.0
# W_zp = 0
W_scales = np.ones(output_channels)
W_zps = np.zeros(output_channels).astype(np.int)
W_value_min = -128
W_value_max = 127
W_q0 = np.round(
np.random.rand(output_channels, input_channels)
* (W_value_max - W_value_min)
+ W_value_min
).astype(np.int8)
W_q0[0, 0] = W_value_min
W_q0[1, 0] = W_value_max
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) *
(b_value_max - b_value_min) + b_value_min
).astype(np.int32) if use_bias else None
if torch.backends.quantized.engine == 'fbgemm':
avoid_vpmaddubsw_overflow_linear(
batch_size,
input_channels,
output_channels,
X_q0,
X_value_min,
X_value_max,
W_q0,
W_value_min,
W_value_max,
)
X_fp32 = torch.from_numpy(_dequantize(X_q0, X_scale, X_zp)).to(dtype=torch.float)
if use_multi_dim_input:
X_fp32 = X_fp32.view(3, int(batch_size / 3), input_channels)
# W_scale, W_zp = _calculate_dynamic_qparams(W_fp32, torch.qint8)
# We currently only check the case where W_scale = 1.0, W_zp = 0.
if use_channelwise:
W_fp32 = torch.from_numpy(_dequantize(W_q0, W_scales.reshape(
(-1, 1)), W_zps.reshape((-1, 1)))).to(dtype=torch.float)
W_q = torch.quantize_per_channel(W_fp32, scales=torch.from_numpy(W_scales),
zero_points=torch.from_numpy(W_zps), axis=0, dtype=torch.qint8)
b_fp32 = torch.from_numpy(
_dequantize(b_q0, X_scale * W_scales, 0)
).to(dtype=torch.float) if use_bias else None
else:
W_fp32 = torch.from_numpy(_dequantize(
W_q0, W_scales[0], W_zps[0])).to(dtype=torch.float)
W_q = torch.quantize_per_tensor(W_fp32, scale=W_scales[0], zero_point=(
W_zps[0].astype(int).item()), dtype=torch.qint8)
b_fp32 = torch.from_numpy(
_dequantize(b_q0, X_scale * int(W_scales[0].item()), 0)
).to(dtype=torch.float) if use_bias else None
# Observe X_fp32 and determine X_scale and X_zero_point, this should match
# internals of dynamic linear.
X_scale, X_zp = _calculate_dynamic_qparams(X_fp32, torch.quint8, reduce_range)
X_q = torch.quantize_per_tensor(X_fp32, scale=X_scale, zero_point=X_zp, dtype=torch.quint8)
# Weight prepacking operator for dynamic quantized Linear
W_prepack = qlinear_prepack(W_q, b_fp32)
# Dynamic quantized Linear operator with prepacked weight
Y_fp32 = qlinear_dynamic(X_q.dequantize(), W_prepack, reduce_range)
# Y_fp32 = qlinear_dynamic(X_fp32, W_prepack, b_fp32)
Y_fp32_ref = F.linear(X_q.dequantize(), W_q.dequantize(), b_fp32)
# Y_fp32_ref = F.linear(X_fp32, W_fp32, b_fp32)
# if use_multi_dim_input:
# Y_fp32_ref = Y_fp32_ref.view(3, int(batch_size / 3), output_channels)
if use_relu:
Y_fp32_ref[Y_fp32_ref < 0.0] = 0.0
self.assertEqual(Y_fp32, Y_fp32_ref,
msg="torch.ops.quantized.linear_dynamic results are off")
class TestDynamicQuantizedRNNOp(TestCase):
"""Tests the correctness of the dynamic quantized lstm/gru."""
def _get_rnn_inputs(self, seq_len, num_batches, input_size, hidden_size, num_directions):
# For Input (seq_len, batch, input_size)
X = torch.randn(seq_len, num_batches, input_size)
s, z = _calculate_dynamic_qparams(X, torch.quint8, reduce_range=True)
Xq = torch.quantize_per_tensor(X, s, z, torch.quint8)
# For H and C: (num_layers(1) * num_directions, batch, hidden_size)
if num_directions == 1:
H = torch.randn(num_directions, num_batches, hidden_size)
C = torch.randn(num_directions, num_batches, hidden_size)
else:
H = torch.zeros(num_directions, num_batches, hidden_size)
C = torch.zeros(num_directions, num_batches, hidden_size)
s, z = _calculate_dynamic_qparams(H, torch.quint8, reduce_range=True)
Hq = torch.quantize_per_tensor(H, s, z, torch.quint8)
s, z = _calculate_dynamic_qparams(C, torch.quint8, reduce_range=True)
Cq = torch.quantize_per_tensor(C, s, z, torch.quint8)
return Xq, Hq, Cq
def _get_rnn_weights_and_bias(self, input_size, hidden_size, num_directions, per_channel_quant, rnn_type):
hidden_mult_map = {'LSTM': 4, 'LSTMCell': 4, 'GRU': 3, 'GRUCell': 3, 'RNNTanh': 2, 'RNNReLU': 2}
hidden_mult = hidden_mult_map[rnn_type]
weights1 = torch.randn(hidden_mult * hidden_size, input_size)
weights2 = torch.randn(hidden_mult * hidden_size, hidden_size)
scale1 = 0.1 * torch.ones([weights1.size()[0]])
scale2 = 0.3 * torch.ones([weights2.size()[0]])
zero_point1 = torch.zeros(scale1.size()).to(int)
zero_point2 = torch.zeros(scale2.size()).to(int)
b1 = torch.zeros(hidden_mult * hidden_size)
if per_channel_quant:
Wq1 = torch.quantize_per_channel(weights1, scale1, zero_point1, 0, torch.qint8)
Wq2 = torch.quantize_per_channel(weights2, scale2, zero_point2, 0, torch.qint8)
else:
Wq1 = torch.quantize_per_tensor(weights1, float(scale1[0]), int(zero_point1[0]), torch.qint8)
Wq2 = torch.quantize_per_tensor(weights2, float(scale2[0]), int(zero_point2[0]), torch.qint8)
return Wq1, Wq2, b1, b1
@given(
num_batches=st.integers(1, 4),
input_size=st.integers(16, 32),
hidden_size=st.integers(4, 8),
num_directions=st.integers(1, 2),
per_channel_quant=st.booleans())
@override_qengines
def test_qlstmGRU(self, num_batches, input_size, hidden_size,
num_directions, per_channel_quant):
# We test only for seq length of 1 and num layers of 1 as dynamic quantization occurs multiple times
# within the LSTM op and we do not model the quantization between multiple calls of the linear op within the
# lstm op
seq_len = 1
for rnn_type in ['LSTM', 'GRU']:
for dtype in [torch.qint8, torch.float16]:
# Fp16 quantization is not supported for qnnpack
if torch.backends.quantized.engine == 'qnnpack' and dtype == torch.float16:
continue
Xq, Hq, Cq = self._get_rnn_inputs(seq_len, num_batches, input_size, hidden_size, num_directions)
Wq1, Wq2, b1, b2 = self._get_rnn_weights_and_bias(input_size,
hidden_size,
num_directions,
per_channel_quant,
rnn_type)
if dtype == torch.qint8:
packed_ih = torch.ops.quantized.linear_prepack(Wq1, b1)
packed_hh = torch.ops.quantized.linear_prepack(Wq2, b2)
cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(packed_ih, packed_hh, b1, b2, True)
W_ref1 = Wq1.dequantize()
W_ref2 = Wq2.dequantize()
else:
packed_ih = torch.ops.quantized.linear_prepack_fp16(Wq1.dequantize(), b1)
packed_hh = torch.ops.quantized.linear_prepack_fp16(Wq2.dequantize(), b2)
cell_params = torch.ops.quantized.make_quantized_cell_params_fp16(packed_ih, packed_hh)
W_ref1 = Wq1.dequantize().to(torch.float16).to(torch.float32)
W_ref2 = Wq2.dequantize().to(torch.float16).to(torch.float32)
if rnn_type == 'LSTM':
if num_directions > 1:
result_ref = _VF.lstm(Xq.dequantize(),
(Hq.dequantize(), Cq.dequantize()),
[W_ref1, W_ref2, b1, b2, W_ref1, W_ref2, b1, b2],
True,
1,
0,
False,
num_directions > 1,
False)
result_dynamic = torch.quantized_lstm(Xq.dequantize(),
(Hq.dequantize(), Cq.dequantize()),
([cell_params, cell_params]),
True,
1,
0,
False,
True,
False,
dtype=torch.qint8,
use_dynamic=True)
else:
result_ref = _VF.lstm(Xq.dequantize(),
(Hq.dequantize(), Cq.dequantize()),
[W_ref1, W_ref2, b1, b2],
True,
1,
0,
False,
num_directions > 1,
False)
result_dynamic = torch.quantized_lstm(Xq.dequantize(),
(Hq.dequantize(), Cq.dequantize()),
([cell_params]),
True,
1,
0,
False,
num_directions > 1,
False,
dtype=torch.qint8,
use_dynamic=True)
if rnn_type == 'GRU':
if num_directions > 1:
result_ref = _VF.gru(Xq.dequantize(),
Hq.dequantize(),
[W_ref1, W_ref2, b1, b2, W_ref1, W_ref2, b1, b2],
True,
1,
0,
False,
True,
False)
result_dynamic = torch.quantized_gru(Xq.dequantize(),
Hq.dequantize(),
([cell_params, cell_params]),
True,
1,
0,
False,
True,
False)
else:
result_ref = _VF.gru(Xq.dequantize(),
Hq.dequantize(),
[W_ref1, W_ref2, b1, b2],
True,
1,
0,
False,
False,
False)
result_dynamic = torch.quantized_gru(Xq.dequantize(),
Hq.dequantize(),
([cell_params]),
True,
1,
0,
False,
False,
False)
self.assertEqual(result_ref[0], result_dynamic[0], msg="torch.quantized_lstm results are off")
@given(
num_batches=st.integers(1, 4),
input_size=st.integers(16, 32),
hidden_size=st.integers(4, 8),
per_channel_quant=st.booleans())
@override_qengines
def test_qrnncell(self, num_batches, input_size, hidden_size, per_channel_quant):
# We test only for seq length of 1 and num layers of 1 as dynamic quantization occurs multiple times
# within the LSTM op and we do not model the quantization between multiple calls of the linear op within the
# lstm op
seq_len = 1
for rnn_type in ['LSTMCell', 'GRUCell', 'RNNTanh', 'RNNReLU']:
for dtype in [torch.qint8, torch.float16]:
# Fp16 quantization is not supported for qnnpack
if torch.backends.quantized.engine == 'qnnpack' and dtype == torch.float16:
continue
Xq, Hq, Cq = self._get_rnn_inputs(seq_len, num_batches, input_size, hidden_size, 1)
Wq1, Wq2, b1, b2 = self._get_rnn_weights_and_bias(input_size, hidden_size, 1, per_channel_quant, rnn_type)
if dtype == torch.qint8:
packed_ih = torch.ops.quantized.linear_prepack(Wq1, b1)
packed_hh = torch.ops.quantized.linear_prepack(Wq2, b2)
W_ref1 = Wq1.dequantize()
W_ref2 = Wq2.dequantize()
else:
packed_ih = torch.ops.quantized.linear_prepack_fp16(Wq1.dequantize(), b1)
packed_hh = torch.ops.quantized.linear_prepack_fp16(Wq2.dequantize(), b2)
W_ref1 = Wq1.dequantize().to(torch.float16).to(torch.float32)
W_ref2 = Wq2.dequantize().to(torch.float16).to(torch.float32)
state = {'LSTMCell': (Hq.dequantize()[0], Cq.dequantize()[0]),
'GRUCell': Hq.dequantize()[0],
'RNNTanh': Hq.dequantize()[0],
'RNNReLU': Hq.dequantize()[0]}
fn_dict = {'LSTMCell': torch._VF.lstm_cell,
'GRUCell': torch._VF.gru_cell,
'RNNTanh': torch._VF.rnn_tanh_cell,
'RNNReLU': torch._VF.rnn_relu_cell}
qfn_dict = {'LSTMCell': torch.ops.quantized.quantized_lstm_cell_dynamic,
'GRUCell': torch.ops.quantized.quantized_gru_cell_dynamic,
'RNNTanh': torch.ops.quantized.quantized_rnn_tanh_cell_dynamic,
'RNNReLU': torch.ops.quantized.quantized_rnn_relu_cell_dynamic}
W_ref_dict = {torch.float16: (Wq1.dequantize().to(torch.float16).to(torch.float32),
Wq2.dequantize().to(torch.float16).to(torch.float32)),
torch.qint8: (Wq1.dequantize(), Wq2.dequantize())}
result_ref = fn_dict[rnn_type](Xq.dequantize()[0], state[rnn_type], W_ref1, W_ref2, b1, b2)
result_dynamic = qfn_dict[rnn_type](Xq.dequantize()[0], state[rnn_type], packed_ih, packed_hh, b1, b2)
self.assertEqual(result_ref[0], result_dynamic[0], msg="torch.quantized_rnncell results are off")
@skipIfNoFBGEMM
@given(
batch_size=st.integers(1, 4),
input_channels=st.integers(16, 32),
output_channels=st.integers(4, 8),
)
def test_qlinear_legacy(self, batch_size, input_channels, output_channels):
X_scale = 1.0
X_zp = 0
X_value_min = 0
X_value_max = 255
X_q0 = np.round(np.random.rand(batch_size, input_channels) * (
X_value_max - X_value_min) + X_value_min
).astype(np.uint8)
X_q0[0, 0] = X_value_min
X_q0[0, 1] = X_value_max
W_scale = 1.0
W_zp = 0
W_value_min = -128
W_value_max = 127
W_q0 = np.round(
np.random.rand(output_channels, input_channels)
* (W_value_max - W_value_min)
+ W_value_min
).astype(np.int8)
W_q0[0, 0] = W_value_min
W_q0[1, 0] = W_value_max
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) * (b_value_max - b_value_min) +
b_value_min
).astype(np.int32)
avoid_vpmaddubsw_overflow_linear(
batch_size,
input_channels,
output_channels,
X_q0,
X_value_min,
X_value_max,
W_q0,
W_value_min,
W_value_max,
)
X_fp32 = torch.from_numpy(_dequantize(X_q0, X_scale, X_zp)).to(dtype=torch.float)
W_fp32 = torch.from_numpy(_dequantize(W_q0, W_scale, W_zp)).to(dtype=torch.float)
b_fp32 = torch.from_numpy(
_dequantize(b_q0, X_scale * W_scale, 0)
).to(dtype=torch.float)
W_scale, W_zp = _calculate_dynamic_qparams(W_fp32, torch.qint8)
W_q = torch.quantize_per_tensor(W_fp32, scale=W_scale, zero_point=W_zp, dtype=torch.qint8)
# Observe X_fp32 and determine X_scale and X_zero_point, this should match
# internals of dynamic linear.
X_scale, X_zp = _calculate_dynamic_qparams(X_fp32, torch.quint8)
X_q = torch.quantize_per_tensor(X_fp32, scale=X_scale, zero_point=X_zp, dtype=torch.quint8)
W_int8, col_offsets, W_scale, W_zp = torch.fbgemm_linear_quantize_weight(W_q.dequantize())
W_prepack = torch.fbgemm_pack_quantized_matrix(W_int8.clone(), W_int8.size(1), W_int8.size(0))
# Quantized Linear operator with prepacked weight
Y_fp32 = torch.fbgemm_linear_int8_weight(
X_q.dequantize(), W_q.dequantize(), W_prepack, col_offsets,
W_scale, W_zp, b_fp32)
Y_fp32_ref = F.linear(X_q.dequantize(), W_q.dequantize(), b_fp32)
# Y_fp32_ref = F.linear(X_fp32, W_fp32, b_fp32)
self.assertEqual(Y_fp32, Y_fp32_ref,
msg="torch.ops.quantized.fbgemm_linear_dynamic results are off")
class TestQuantizedLinear(unittest.TestCase):
"""Tests the correctness of the quantized linear and linear_relu op."""
@given(batch_size=st.integers(1, 4),
input_channels=st.integers(16, 32),
output_channels=st.integers(4, 8),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_multi_dim_input=st.booleans(),
use_channelwise=st.booleans())
@override_qengines
def test_qlinear(self, batch_size, input_channels, output_channels, use_bias,
use_relu, use_multi_dim_input, use_channelwise):
decimal_val = 4
if torch.backends.quantized.engine == 'qnnpack':
use_multi_dim_input = False
# QNNPACK supports uint8 in the kernels. In the op we shift the int8
# weight values to uint8 to be on par with fbgemm. However, this causes
# some rounding issues in rare cases. So, we relax the check to allow
# off by one results.
decimal_val = 0
qlinear_prepack = torch.ops.quantized.linear_prepack
if use_relu:
qlinear = torch.ops.quantized.linear_relu
else:
qlinear = torch.ops.quantized.linear
if use_multi_dim_input:
batch_size *= 3 # Test the multi-dim input tensor
X_scale = 1.5
X_zp = 5
X_value_min = 0
X_value_max = 225
X_q0 = np.round(
np.random.rand(batch_size, input_channels) *
(X_value_max - X_value_min)
+ X_value_min
).astype(np.uint8)
W_scales = np.random.rand(output_channels)
W_zps = np.round(np.random.rand(output_channels) * 100 - 50).astype(np.int)
W_value_min = -128
W_value_max = 127
W_q0 = np.round(
np.random.rand(output_channels, input_channels)
* (W_value_max - W_value_min)
+ W_value_min
).astype(np.int8)
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) *
(b_value_max - b_value_min) + b_value_min
).astype(np.int32) if use_bias else None
avoid_vpmaddubsw_overflow_linear(
batch_size,
input_channels,
output_channels,
X_q0,
X_value_min,
X_value_max,
W_q0,
W_value_min,
W_value_max,
)
X = torch.from_numpy(_dequantize(
X_q0, X_scale, X_zp)).to(dtype=torch.float)
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zp, dtype=torch.quint8)
if use_channelwise:
W = torch.from_numpy(_dequantize(W_q0, W_scales.reshape(
(-1, 1)), W_zps.reshape((-1, 1)))).to(dtype=torch.float)
W_q = torch.quantize_per_channel(W, scales=torch.from_numpy(W_scales),
zero_points=torch.from_numpy(W_zps), axis=0, dtype=torch.qint8)
b = torch.from_numpy(_dequantize(
b_q0, X_scale * W_scales, 0)).to(dtype=torch.float) if use_bias else None
b_q = torch.quantize_per_channel(b, scales=torch.from_numpy(X_scale * W_scales),
zero_points=torch.zeros(output_channels, dtype=torch.long),
axis=0, dtype=torch.qint32) if use_bias else None
else:
W = torch.from_numpy(_dequantize(
W_q0, W_scales[0], W_zps[0])).to(dtype=torch.float)
W_q = torch.quantize_per_tensor(W, scale=W_scales[0], zero_point=(
W_zps[0].astype(int).item()), dtype=torch.qint8)
b = torch.from_numpy(_dequantize(
b_q0, X_scale * (W_scales[0].item()), 0)).to(dtype=torch.float) if use_bias else None
b_q = torch.quantize_per_tensor(
b, scale=X_scale * (W_scales[0].item()), zero_point=0, dtype=torch.qint32) if use_bias else None
# Compare X_scale * W_scale * input_channels * X_value_max * W_value_max with
# Y_scale * 255 (max for uint8).
Y_scale = 125.1234
Y_zp = 5
# Weight prepacking operator for quantized Linear
float_bias = b if use_bias else None
W_prepack = qlinear_prepack(W_q, float_bias)
if use_multi_dim_input:
X_q = X_q.view(3, int(batch_size / 3), input_channels)
# Quantized Linear operator with prepacked weight
Y_q = qlinear(X_q, W_prepack, Y_scale, Y_zp)
if not use_channelwise:
# Test the per-tensor quantization only
# Reference quantized Linear operator
Y_q_ref = qlinear_ref(X_q0, X_scale, X_zp, W_q0,
W_scales[0], W_zps[0], b_q0, Y_scale, Y_zp)
if use_relu:
Y_q_ref[Y_q_ref < Y_zp] = Y_zp
if use_multi_dim_input:
Y_q_ref = np.reshape(
Y_q_ref, (3, int(batch_size / 3), output_channels))
# Assert equal
np.testing.assert_array_almost_equal(Y_q_ref, Y_q.int_repr().numpy(), decimal=decimal_val)
# Test both per-tensor and per-channel quantization
# Reference quantized result from PyTorch Linear operator
W_fp32 = W_q.dequantize().to(dtype=torch.float)
X_fp32 = X_q.dequantize().to(dtype=torch.float)
b_fp32 = b_q.dequantize().to(dtype=torch.float) if use_bias else None
Y_fp32_ref = F.linear(X_fp32, W_fp32, b_fp32)
if use_relu:
Y_fp32_ref[Y_fp32_ref < 0.0] = 0.0
Y_q_ref2 = torch.quantize_per_tensor(
Y_fp32_ref, Y_scale, Y_zp, torch.quint8)
# Assert equal
np.testing.assert_array_almost_equal(
Y_q_ref2.int_repr().numpy(), Y_q.int_repr().numpy(), decimal=decimal_val)
"""Tests the correctness of the quantized::linear_unpack op."""
@given(W=hu.tensor(shapes=hu.array_shapes(2, 2,),
qparams=hu.qparams(dtypes=torch.qint8)),
use_channelwise=st.booleans())
@override_qengines
def test_qlinear_unpack(self, W, use_channelwise):
W, (W_scale, W_zp, torch_type) = W
if use_channelwise:
output_channels = W.shape[0]
W_scales = torch.rand(output_channels).to(torch.double)
W_zps = torch.round(torch.rand(output_channels)
* 100 - 50).to(torch.int64)
qlinear_prepack = torch.ops.quantized.linear_prepack
qlinear_unpack = torch.ops.quantized.linear_unpack
W = torch.from_numpy(W)
if use_channelwise:
W_q = torch.quantize_per_channel(
W, W_scales, W_zps, 0, dtype=torch_type)
else:
W_q = torch.quantize_per_tensor(W, scale=W_scale, zero_point=W_zp,
dtype=torch_type)
# Weight prepacking operator for quantized Linear
W_prepack = qlinear_prepack(W_q)
# Weight unpack operator for quantized Linear (Used for serialization)
W_q_origin = qlinear_unpack(W_prepack)[0]
# Assert equal
np.testing.assert_equal(W_q.int_repr(), W_q_origin.int_repr().numpy())
if use_channelwise:
np.testing.assert_array_almost_equal(np.float32(W_q.q_per_channel_scales().numpy()),
np.float32(
W_q_origin.q_per_channel_scales().numpy()),
decimal=4)
np.testing.assert_equal(W_q.q_per_channel_zero_points(
).numpy(), W_q_origin.q_per_channel_zero_points().numpy())
else:
np.testing.assert_equal(np.float32(
W_q.q_scale()), np.float32(W_q_origin.q_scale()))
np.testing.assert_equal(
W_q.q_zero_point(), W_q_origin.q_zero_point())
@unittest.skipIf(sys.platform == "darwin", "Known test failure on Mac.")
class TestQuantizedEmbeddingBag(TestCase):
def _test_embedding_bag_unpack_fn(self, pack_fn, unpack_fn, num_embeddings, embedding_dim, bit_rate):
weights = torch.from_numpy((np.random.random_sample((
num_embeddings, embedding_dim)) + 1).astype(np.float32))
w_packed = pack_fn(weights)
w_unpacked = unpack_fn(w_packed)
# compare against C2 to ensure numerical equivalency.
from caffe2.python import core, workspace
conversion_op = "FloatToFused8BitRowwiseQuantized"
if bit_rate == 4:
conversion_op = "FloatToFused4BitRowwiseQuantized"
def get_c2_weights(weights):
workspace.ResetWorkspace()
workspace.FeedBlob("weights", weights)
workspace.RunOperatorOnce(
core.CreateOperator(
conversion_op, ["weights"], ["quantized_weights"]
)
)
emb_q = workspace.FetchBlob("quantized_weights")
if bit_rate == 4:
workspace.RunOperatorOnce(
core.CreateOperator(
"Fused4BitRowwiseQuantizedToFloat", ["quantized_weights"], ["dequantized_weights"]
)
)
dequantized_data = torch.from_numpy(workspace.FetchBlob("dequantized_weights"))
else:
dequantized_data = torch.ops._caffe2.Fused8BitRowwiseQuantizedToFloat(
torch.tensor(emb_q)
)
return torch.from_numpy(emb_q), dequantized_data
w_packed_c2, w_unpacked_c2 = get_c2_weights(weights)
# Compare packed weights against C2.
np.testing.assert_equal(w_packed.numpy(), w_packed_c2.numpy())
# Compare unpacked weights against C2
np.testing.assert_equal(w_unpacked.numpy(), w_unpacked_c2.numpy())
""" Tests the correctness of the embedding_bag_8bit pack/unpack op against C2 """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),)
def test_embedding_bag_byte_unpack(self, num_embeddings, embedding_dim):
pack_fn = torch.ops.quantized.embedding_bag_byte_prepack
unpack_fn = torch.ops.quantized.embedding_bag_byte_unpack
self._test_embedding_bag_unpack_fn(pack_fn, unpack_fn, num_embeddings, embedding_dim, bit_rate=8)
""" Tests the correctness of the embedding_bag_4bit pack/unpack op against C2 """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),)
def test_embedding_bag_4bit_unpack(self, num_embeddings, embedding_dim):
pack_fn = torch.ops.quantized.embedding_bag_4bit_prepack
unpack_fn = torch.ops.quantized.embedding_bag_4bit_unpack
self._test_embedding_bag_unpack_fn(pack_fn, unpack_fn, num_embeddings, embedding_dim, bit_rate=4)
def embedding_bag_rowwise_offsets_run(
self, bit_rate, num_embeddings,
embedding_dim, num_offsets, enable_per_sample_weights,
include_last_offset, atol, rtol):
pt_op = torch.ops.quantized.embedding_bag_byte_rowwise_offsets
pt_prepack_op = torch.ops.quantized.embedding_bag_byte_prepack
if bit_rate == 4:
pt_op = torch.ops.quantized.embedding_bag_4bit_rowwise_offsets
pt_prepack_op = torch.ops.quantized.embedding_bag_4bit_prepack
weights = torch.from_numpy((np.random.random_sample((
num_embeddings, embedding_dim)) + 1).astype(np.float32))
max_segments = 5
max_segment_length = 20
num_lengths = np.random.randint(1, max_segments + 1)
lengths = np.random.randint(0, max_segment_length + 1,
size=num_lengths).astype(np.int32)
num_indices = np.sum(lengths)
def lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True):
"""
Convert lengths to offsets
"""
tt = np.zeros((t.shape[0] + 1,), dtype=offset_type)
tt[1:] = t
tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type))
if use_begin_offset:
return tt[:-1]
return tt[1:]
offsets = lengths_to_offsets(lengths)
indices = torch.from_numpy(np.random.randint(
low=0, high=num_embeddings, size=num_indices, dtype=np.int64))
q_weights = pt_prepack_op(weights)
per_sample_weights = torch.from_numpy(np.random.uniform(
low=0.01, high=0.5, size=[len(indices)]).astype(np.float32)) if \
enable_per_sample_weights else None
if include_last_offset:
offsets = torch.cat(
(offsets, torch.tensor([indices.size(0)], dtype=torch.long)), 0
)
# Reference result will be the floating point torch.nn.EmbeddingBag.
def get_reference_result(
num_embeddings, embedding_dim,
include_last_offset, weights, per_sample_weights,
indices, offsets):
embedding_bag = torch.nn.EmbeddingBag(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
include_last_offset=include_last_offset, _weight=weights,
scale_grad_by_freq=False, mode='sum'
)
return embedding_bag(indices, offsets,
per_sample_weights=per_sample_weights)
reference_result = get_reference_result(
num_embeddings, embedding_dim, include_last_offset, weights,
per_sample_weights, indices, offsets)
result = pt_op(
q_weights,
indices,
offsets,
mode=0,
per_sample_weights=per_sample_weights,
include_last_offset=include_last_offset,
)
torch.testing.assert_allclose(reference_result, result, atol=atol,
rtol=rtol)
""" Tests the correctness of the embedding_bag_8bit quantized operator """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
num_offsets=st.integers(1, 20),
enable_per_sample_weights=st.booleans(),
include_last_offset=st.booleans())
def test_embedding_bag_byte_rowwise_offsets(self, num_embeddings,
embedding_dim, num_offsets,
enable_per_sample_weights,
include_last_offset):
self.embedding_bag_rowwise_offsets_run(
8, num_embeddings, embedding_dim, num_offsets,
enable_per_sample_weights, include_last_offset,
atol=0.005, rtol=1e-3)
""" Tests the correctness of the embedding_bag_4bit quantized operator """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
num_offsets=st.integers(1, 20),
enable_per_sample_weights=st.booleans(),
include_last_offset=st.booleans())
def test_embedding_bag_4bit_rowwise_offsets(self, num_embeddings,
embedding_dim, num_offsets,
enable_per_sample_weights,
include_last_offset):
self.embedding_bag_rowwise_offsets_run(4, num_embeddings,
embedding_dim, num_offsets,
enable_per_sample_weights,
include_last_offset, atol=0.1,
rtol=1e-2)
class TestQuantizedConv(unittest.TestCase):
def _test_qconv_unpack_impl(
self, qconv_prepack_fn, qconv_unpack_fn, inputs, strides, pads,
channelwise
):
(X_data, W_data, bias_data, groups) = inputs
(X, (X_scale, X_zero_point, X_qtype)) = X_data
(W, (W_scale, W_zero_point, W_qtype)) = W_data
(bias, (bias_scale, bias_zero_point, bias_qtype)) = bias_data
if channelwise:
output_channels = W.shape[0]
W_scale = torch.tensor([W_scale] * output_channels)
W_zero_point = torch.tensor([W_zero_point] * output_channels)
W = torch.from_numpy(W).float()
bias = torch.from_numpy(bias).float()
if channelwise:
W_q = torch.quantize_per_channel(
W, scales=W_scale, zero_points=W_zero_point, axis=0,
dtype=W_qtype)
else:
W_q = torch.quantize_per_tensor(
W, scale=W_scale, zero_point=W_zero_point, dtype=W_qtype)
if isinstance(strides, int):
dilations = [1]
else:
dilations = (1,) * len(strides)
W_packed = qconv_prepack_fn(W_q, bias, strides, pads, dilations, groups)
(W_unpacked, bias) = qconv_unpack_fn(W_packed)
# Assert equal
np.testing.assert_equal(W_q.int_repr().numpy(),
W_unpacked.int_repr().numpy())
if channelwise:
np.testing.assert_array_almost_equal(
np.float32(W_q.q_per_channel_scales().numpy()),
np.float32(W_unpacked.q_per_channel_scales().numpy()),
decimal=4)
np.testing.assert_equal(W_q.q_per_channel_zero_points(
).numpy(), W_unpacked.q_per_channel_zero_points().numpy())
else:
np.testing.assert_equal(np.float32(
W_q.q_scale()), np.float32(W_unpacked.q_scale()))
np.testing.assert_equal(
W_q.q_zero_point(), W_unpacked.q_zero_point())
def _make_qconv_tensors(
self, batch_size,
input_channels_per_group, input_feature_map_shape,
output_channels_per_group, groups, kernels, strides, pads, dilations,
X_scale, X_zero_point, W_scale, W_zero_point,
use_bias, use_channelwise
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
# Padded input size should be at least as big as dilated kernel
kernels = _single(kernels)
strides = _single(strides)
pads = _single(pads)
dilations = _single(dilations)
for i in range(len(kernels)):
assume(input_feature_map_shape[i] + 2 * pads[i]
>= dilations[i] * (kernels[i] - 1) + 1)
W_scale = W_scale * output_channels
W_zero_point = W_zero_point * output_channels
# Resize W_scale and W_zero_points arrays equal to output_channels
W_scale = W_scale[:output_channels]
W_zero_point = W_zero_point[:output_channels]
# For testing, we use small values for weights and for activations
# so that no overflow occurs in vpmaddubsw instruction. If the
# overflow occurs in qconv implementation and if there is no
# overflow
# In reference we can't exactly match the results with reference.
# Please see the comment in qconv implementation file
# aten/src/ATen/native/quantized/cpu/qconv.cpp for more details.
(W_value_min, W_value_max) = (-5, 5)
# the operator expects them in the format
# (output_channels, input_channels/groups,
# kernel_d, kernel_h, kernel_w)
W_init = torch.randint(
W_value_min,
W_value_max,
(output_channels, input_channels_per_group,) + kernels,
)
b_init = torch.randint(0, 10, (output_channels,))
(X_value_min, X_value_max) = (0, 4)
X_init = torch.randint(
X_value_min,
X_value_max,
(batch_size, input_channels,) + input_feature_map_shape,
)
X = X_scale * (X_init - X_zero_point).float()
if use_channelwise:
W_shape = (-1, 1) + (1,) * len(kernels)
W_scales_tensor = torch.tensor(W_scale, dtype=torch.float)
W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float)
W = W_scales_tensor.reshape(*W_shape) * (
W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float()
b = X_scale * W_scales_tensor * b_init.float()
else:
W = W_scale[0] * (W_init - W_zero_point[0]).float()
b = X_scale * W_scale[0] * b_init.float()
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8)
if use_channelwise:
W_q = torch.quantize_per_channel(
W, W_scales_tensor, W_zero_points_tensor.long(), 0,
dtype=torch.qint8)
else:
W_q = torch.quantize_per_tensor(
W, scale=W_scale[0], zero_point=W_zero_point[0],
dtype=torch.qint8)
bias_float = b if use_bias else None
return (X, W), (X_q, W_q), bias_float
def _test_qconv_impl(
self, qconv_fn, qconv_prepack_fn, conv_op, batch_size,
input_channels_per_group, input_feature_map_shape,
output_channels_per_group, groups, kernels, strides, pads, dilations,
X_scale, X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point,
use_bias, use_relu, use_channelwise
):
(X, W), (X_q, W_q), bias_float = self._make_qconv_tensors(
batch_size, input_channels_per_group, input_feature_map_shape,
output_channels_per_group, groups, kernels,
strides, pads, dilations, X_scale, X_zero_point, W_scale,
W_zero_point, use_bias, use_channelwise)
# Assign weights
conv_op.weight = torch.nn.Parameter(W, requires_grad=False)
conv_op.bias = torch.nn.Parameter(
bias_float, requires_grad=False) if use_bias else None
result_ref = conv_op(X)
if use_relu:
relu = torch.nn.ReLU()
result_ref = relu(result_ref)
# Quantize reference results for comparison
result_ref_q = torch.quantize_per_tensor(
result_ref, scale=Y_scale, zero_point=Y_zero_point,
dtype=torch.quint8)
W_prepack = qconv_prepack_fn(
W_q, bias_float, strides, pads, dilations, groups)
Y_q = qconv_fn(
X_q,
W_prepack,
Y_scale,
Y_zero_point,
)
# Make sure the results match
# assert_array_almost_equal compares using the following formula:
# abs(desired-actual) < 1.5 * 10**(-decimal)
# (https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html)
# We use decimal = 0 to ignore off-by-1 differences between
# reference and test. Off-by-1 differences arise due to the order of
# round and zero_point addition operation, i.e., if addition
# followed by round is used by reference and round followed by
# addition is used by test, the results may differ by 1.
# For example, the result of round(2.5) + 1 is 3 while
# round(2.5 + 1) is 4 assuming the rounding mode is
# round-to-nearest, ties-to-even.
np.testing.assert_array_almost_equal(
result_ref_q.int_repr().numpy(), Y_q.int_repr().numpy(), decimal=0)
"""Tests the correctness of quantized convolution op."""
@given(batch_size=st.integers(1, 3),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(10, 16),
width=st.integers(7, 14),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 3),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_relu=st.sampled_from([False]),
use_channelwise=st.booleans())
@override_qengines
def test_qconv2d(
self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_relu,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
pads = (pad_h, pad_w)
dilations = (dilation, dilation)
qconv = torch.ops.quantized.conv2d
if use_relu:
qconv = torch.ops.quantized.conv2d_relu
qconv_prepack = torch.ops.quantized.conv2d_prepack
conv_op = torch.nn.Conv2d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
)
self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (height, width),
output_channels_per_group, groups, kernels, strides, pads,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_relu, use_channelwise)
"""Tests the correctness of the quantized::qconv_unpack op."""
@given(
inputs=hu.tensor_conv(
spatial_dim=2, batch_size_range=(1, 3),
input_channels_per_group_range=(1, 4),
output_channels_per_group_range=(1, 4), feature_map_range=(4, 8),
kernel_range=(1, 4), max_groups=4,
qparams=[hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint32,
zero_point_min=0,
zero_point_max=0)]),
stride_h=st.integers(1, 3), stride_w=st.integers(1, 3),
pad_h=st.integers(1, 2), pad_w=st.integers(1, 2),
channelwise=st.booleans())
@override_qengines
def test_qconv_unpack(
self, inputs, stride_h, stride_w, pad_h, pad_w, channelwise
):
qconv_prepack = torch.ops.quantized.conv2d_prepack
qconv_unpack = torch.ops.quantized.conv2d_unpack
self._test_qconv_unpack_impl(
qconv_prepack, qconv_unpack, inputs, (stride_h, stride_w),
(pad_h, pad_w), channelwise)
@given(
inputs=hu.tensor_conv(
spatial_dim=1, batch_size_range=(1, 3),
input_channels_per_group_range=(1, 4),
output_channels_per_group_range=(1, 4), feature_map_range=(4, 8),
kernel_range=(1, 4), max_groups=4,
qparams=[hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint32,
zero_point_min=0,
zero_point_max=0)]),
stride=st.integers(1, 3),
pad=st.integers(1, 2),
channelwise=st.booleans(),
qengine=st.sampled_from(("qnnpack", "fbgemm")))
def test_qconv1d_unpack(
self, inputs, stride, pad, channelwise, qengine
):
if qengine not in supported_qengines:
return
if qengine == 'qnnpack':
channelwise = False
with override_quantized_engine(qengine):
qconv_prepack = torch.ops.quantized.conv1d_prepack
qconv_unpack = torch.ops.quantized.conv1d_unpack
self._test_qconv_unpack_impl(
qconv_prepack, qconv_unpack, inputs, [stride],
[pad], channelwise)
"""Tests the correctness of quantized 1D convolution op."""
@given(batch_size=st.integers(1, 6),
input_channels_per_group=st.sampled_from((2, 4, 5, 8, 16, 32)),
output_channels_per_group=st.sampled_from((2, 4, 5, 8, 16, 32)),
groups=st.integers(1, 3),
length=st.integers(4, 16),
kernel=st.integers(1, 7),
stride=st.integers(1, 2),
pad=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_channelwise=st.booleans())
@override_qengines
def test_qconv1d(
self,
batch_size,
input_channels_per_group,
output_channels_per_group,
groups,
length,
kernel,
stride,
pad,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_relu,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
if torch.backends.quantized.engine == 'qnnpack':
use_channelwise = False
true_conv1d = torch.nn.Conv1d(
input_channels,
output_channels,
kernel,
stride,
pad,
dilation,
groups,
)
qconv_prepack = torch.ops.quantized.conv1d_prepack
qconv = torch.ops.quantized.conv1d
if use_relu:
qconv = torch.ops.quantized.conv1d_relu
self._test_qconv_impl(
qconv, qconv_prepack, true_conv1d, batch_size,
input_channels_per_group, (length, ),
output_channels_per_group, groups, kernel, [stride], [pad],
[dilation], X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_relu, use_channelwise
)
@given(batch_size=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16]),
D=st.integers(4, 8),
H=st.integers(4, 8),
W=st.integers(4, 8),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16]),
groups=st.integers(1, 3),
kernel_d=st.integers(1, 4),
kernel_h=st.integers(1, 4),
kernel_w=st.integers(1, 4),
stride_d=st.integers(1, 2),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_d=st.integers(0, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_channelwise=st.booleans(),
qengine=st.sampled_from(("fbgemm",)))
def test_qconv3d(
self,
batch_size,
input_channels_per_group,
D,
H,
W,
output_channels_per_group,
groups,
kernel_d,
kernel_h,
kernel_w,
stride_d,
stride_h,
stride_w,
pad_d,
pad_h,
pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_relu,
use_channelwise,
qengine
):
if qengine not in supported_qengines:
return
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_d, kernel_h, kernel_w)
strides = (stride_d, stride_h, stride_w)
pads = (pad_d, pad_h, pad_w)
dilations = (dilation, dilation, dilation)
with override_quantized_engine(qengine):
qconv = torch.ops.quantized.conv3d
if use_relu:
qconv = torch.ops.quantized.conv3d_relu
qconv_prepack = torch.ops.quantized.conv3d_prepack
conv_op = torch.nn.Conv3d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
)
self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (D, H, W), output_channels_per_group,
groups, kernels, strides, pads, dilations, X_scale,
X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point,
use_bias, use_relu, use_channelwise)
"""Tests the correctness of the quantized::qconv3d_unpack op."""
@given(
inputs=hu.tensor_conv(
spatial_dim=3, batch_size_range=(1, 3),
input_channels_per_group_range=(1, 3),
output_channels_per_group_range=(1, 3), feature_map_range=(3, 6),
kernel_range=(1, 3), max_groups=3,
qparams=[hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint32,
zero_point_min=0,
zero_point_max=0)]),
stride_d=st.integers(1, 2), stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_d=st.integers(1, 2), pad_h=st.integers(1, 2),
pad_w=st.integers(1, 2),
channelwise=st.booleans(),
qengine=st.sampled_from(("fbgemm",)))
def test_qconv3d_unpack(
self, inputs, stride_d, stride_h, stride_w, pad_d, pad_h, pad_w,
channelwise, qengine
):
if qengine not in supported_qengines:
return
with override_quantized_engine(qengine):
qconv3d_prepack = torch.ops.quantized.conv3d_prepack
qconv3d_unpack = torch.ops.quantized.conv3d_unpack
self._test_qconv_unpack_impl(
qconv3d_prepack, qconv3d_unpack, inputs,
(stride_d, stride_h, stride_w), (pad_d, pad_h, pad_w),
channelwise)
class TestPadding(TestCase):
@given(batch_size=st.integers(1, 64),
channels=st.integers(1, 64),
width=st.integers(16, 128),
qtype=st.sampled_from(hu._ALL_QINT_TYPES))
def test_reflection_pad1d(self, batch_size, channels, width, qtype):
padding = width // 4
x = torch.arange(batch_size * channels * width).to(torch.float)
x = x.resize(batch_size, channels, width)
# Per-Tensor test
scale, zp = _calculate_dynamic_qparams(x, qtype)
qx = torch.quantize_per_tensor(x, scale, zp, qtype)
padding_op = torch.nn.ReflectionPad1d(padding)
y_ref = padding_op(x)
qy_ref = torch.quantize_per_tensor(y_ref, scale, zp, qtype)
qy_hat = padding_op(qx)
self.assertEqual(qy_ref, qy_hat)
@unittest.skipUnless('qnnpack' in supported_qengines,
"This Pytorch Build has not been built with or does not support QNNPACK")
class TestQNNPackOps(TestCase):
"""Tests the correctness of the quantized::qnnpack_relu op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0)))
def test_qnnpack_relu(self, X):
with override_quantized_engine('qnnpack'):
X, (scale, zero_point, torch_type) = X
relu = torch.nn.functional.relu
X = torch.from_numpy(X)
Y = X.clone()
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point, dtype=torch_type)
qY_hat = relu(qX)
Y[Y < 0] = 0
qY = torch.quantize_per_tensor(Y, scale=scale, zero_point=zero_point, dtype=torch_type)
self.assertEqual(qY, qY_hat)
"""Tests the correctness of the quantized::qnnpack_tanh op."""
@skipIfNoFBGEMM
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_qnnpack_tanh(self, X):
# Note: In QNNPACK the output scale and zero_point can only be
# 2.0/256, 128 respectively, as it uses a LUT with 256 bins.
X, (scale, zero_point, torch_type) = X
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=torch_type)
# Floating point reference
Y = torch.tanh(X)
qY = torch.quantize_per_tensor(Y, scale=1.0 / 128, zero_point=128,
dtype=torch.quint8)
with override_quantized_engine('fbgemm'):
qYserver = torch.tanh(qX)
with override_quantized_engine('qnnpack'):
qY_hat = torch.tanh(qX)
self.assertEqual(qY, qY_hat,
msg="QNNPACK TanH failed (FP ref)!")
self.assertEqual(qYserver, qY_hat,
msg="QNNPACK TanH failed (FBGEMM ref)!")
"""Tests the correctness of the quantized::qnnpack_sigmoid op."""
@skipIfNoFBGEMM
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_qnnpack_sigmoid(self, X):
# Note: In QNNPACK the output scale and zero_point can only be
# 1.0/256, 0 respectively, as it uses a LUT with 256 bins.
X, (scale, zero_point, torch_type) = X
X = torch.from_numpy(X).to(torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=torch_type)
# Floating point reference
Y = torch.sigmoid(X)
qY = torch.quantize_per_tensor(Y, scale=1.0 / 256, zero_point=0,
dtype=torch.quint8)
with override_quantized_engine('fbgemm'):
qYserver = torch.sigmoid(qX)
with override_quantized_engine('qnnpack'):
qY_hat = torch.sigmoid(qX)
self.assertEqual(qY, qY_hat,
msg="QNNPACK Sigmoid failed (FP ref)!")
self.assertEqual(qYserver, qY_hat,
msg="QNNPACK Sigmoid failed (FBGEMM ref)!")
@skipIfNoFBGEMM
def test_qnnpack_sigmoid_sweep(self):
# Input parameters
f_min = -4.0
f_max = 4.0
scale = (f_max - f_min) / 256.0
zero_point = 128
dtype = torch.quint8
step = scale / 2.0
x = np.arange(f_min, f_max + step, step)
X = torch.from_numpy(x).to(torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=dtype)
dqX = qX.dequantize()
# Floating point reference
Y = torch.sigmoid(dqX)
qY = torch.quantize_per_tensor(Y, scale=1.0 / 256, zero_point=0,
dtype=torch.quint8)
with override_quantized_engine('fbgemm'):
qYserver = torch.sigmoid(qX)
with override_quantized_engine('qnnpack'):
qY_hat = torch.sigmoid(qX)
self.assertEqual(qY, qY_hat,
msg="QNNPACK Sigmoid failed (FP ref)!")
self.assertEqual(qYserver, qY_hat,
msg="QNNPACK Sigmoid failed (FBGEMM ref)!")
"""Tests the correctness of the quantized::add (qnnpack) op."""
@settings(suppress_health_check=(HealthCheck.filter_too_much,))
@given(A=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams(dtypes=torch.quint8)),
zero_point=st.sampled_from([0, 2, 5, 15, 127]),
scale_A=st.sampled_from([0.001, 0.057, 0.889, 12.3]),
scale_B=st.sampled_from([0.008, 0.0821, 0.67, 7]),
scale_C=st.sampled_from([0.003, 0.07821, 0.457, 7.34]),)
def test_qnnpack_add(self, A, zero_point, scale_A, scale_B, scale_C):
with override_quantized_engine('qnnpack'):
A_temp = A
A, (scale_a, zero_point_A, torch_type) = A_temp
B, (scale_b, zero_point_B, torch_type) = A_temp
A = torch.from_numpy(A)
B = torch.from_numpy(B)
assume(scale_A // scale_C >= 2**-14)
assume(scale_A // scale_C < 2**8)
assume(scale_B // scale_C >= 2**-14)
assume(scale_B // scale_C < 2**8)
zero_point_C = 127
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point,
dtype=torch.quint8)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point,
dtype=torch.quint8)
# Add ground truth
C = (qA.dequantize() + qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C)
qC_qnnp = torch.ops.quantized.add(qA, qB, scale_C, zero_point_C)
np.testing.assert_equal(qC, qC_qnnp.int_repr(),
"Quantized addition failed.")
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = torch.quantize_per_tensor(torch.from_numpy(Crelu), scale_C,
zero_point_C, dtype=torch.quint8)
qCrelu_hat = torch.ops.quantized.add_relu(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qCrelu.int_repr().numpy(), qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
"""Tests the correctness of quantized::qnnpack_maxpool2d op."""
@given(A=hu.tensor(shapes=hu.array_shapes(4, 4, 3, 5),
qparams=hu.qparams(dtypes=torch.quint8)),
kernel=st.sampled_from([2, 4]),
stride=st.sampled_from([1, 2]),
padding=st.sampled_from([1, 2]))
def test_qnnpack_maxpool2d(self, A, kernel, stride, padding):
import torch.nn.functional as F
with override_quantized_engine('qnnpack'):
A, (scale, zero_point, torch_type) = A
X = torch.from_numpy(A)
np_type = np.uint8
dilation = 1
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation)
assume(oW > 0)
k = (kernel, kernel)
s = (stride, stride)
d = (dilation, dilation)
p = (padding, padding)
q_max_pool = torch.ops.quantized.max_pool2d
a = scale * (X - zero_point).to(dtype=torch.float)
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
a_ref = qa.dequantize()
a_pool = F.max_pool2d(a_ref, kernel_size=k, stride=s, padding=p,
dilation=d)
a_pool_nhwc = a_pool.permute([0, 2, 3, 1])
qa_pool = q_max_pool(qa, k, s, p, d, ceil_mode=False)
qa_pool_int = qa_pool.dequantize()
np.testing.assert_equal(a_pool.numpy(), qa_pool_int.numpy())
@given(batch_size=st.integers(1, 5),
channels=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(4, 10),
width=st.integers(4, 10),
kernel=st.integers(2, 5),
stride=st.integers(1, 2),
padding=st.integers(1, 2),
scale=st.floats(0.2, 1.6),
zero_point=st.integers(0, 25)
)
def test_avg_pool2d(
self,
batch_size,
channels,
height,
width,
kernel,
stride,
padding,
scale,
zero_point
):
with override_quantized_engine('qnnpack'):
import torch.nn.functional as F
X_init = torch.from_numpy(np.random.randint(
0, 50, (batch_size, channels, height, width)))
X = scale * (X_init - zero_point).to(dtype=torch.float)
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, 1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, 1)
assume(oW > 0)
k = (kernel, kernel)
s = (stride, stride)
p = (padding, padding)
q_avg_pool = torch.nn.quantized.functional.avg_pool2d
x_q = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
a_pool = F.avg_pool2d(x_q.dequantize().to(torch.float), kernel_size=k, stride=s, padding=p)
qa_pool = q_avg_pool(x_q, k, s, p)
# Quantize Ref Output
a_pool_q = torch.quantize_per_tensor(a_pool, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
np.testing.assert_array_almost_equal(a_pool_q.int_repr().numpy(),
qa_pool.int_repr().numpy(), decimal=0)
@given(batch_size=st.integers(1, 5),
channels=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(4, 20),
width=st.integers(4, 20),
output_height=st.integers(2, 10),
output_width=st.integers(2, 10),
scale=st.floats(0.2, 1.6),
zero_point=st.integers(0, 25)
)
def test_adaptive_avg_pool2d(
self,
batch_size,
channels,
height,
width,
output_height,
output_width,
scale,
zero_point
):
with override_quantized_engine('qnnpack'):
# Check constraints
assume(height >= output_height)
assume(width >= output_width)
import torch.nn.functional as F
X_init = torch.from_numpy(np.random.randint(
0, 50, (batch_size, channels, height, width)))
X = scale * (X_init - zero_point).to(dtype=torch.float)
iH, iW = X.shape[-2:]
q_avg_pool = torch.nn.quantized.functional.adaptive_avg_pool2d
x_q = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
a_pool = F.adaptive_avg_pool2d(x_q.dequantize().to(torch.float), (output_height, output_width))
qa_pool = q_avg_pool(x_q, (output_height, output_width))
# Quantize Ref Output
a_pool_q = torch.quantize_per_tensor(a_pool, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
np.testing.assert_array_almost_equal(a_pool_q.int_repr().numpy(),
qa_pool.int_repr().numpy(), decimal=0)
@given(batch_size=st.integers(1, 5),
channels=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(4, 10),
width=st.integers(4, 10),
scale=st.floats(0.02, 2.6),
zero_point=st.integers(0, 25))
def test_mean(self, batch_size, channels, height, width, scale, zero_point):
with override_quantized_engine('qnnpack'):
dim = (2, 3)
X_init = torch.from_numpy(np.random.randint(
0, 50, (batch_size, channels, height, width)))
X = scale * (X_init - zero_point).to(dtype=torch.float)
qX = torch.quantize_per_tensor(X, scale, zero_point, torch.quint8)
Y = torch.mean(qX.dequantize(), dim)
Y = torch.quantize_per_tensor(Y, scale, zero_point, torch.quint8)
qY = torch.mean(qX, dim)
np.testing.assert_array_almost_equal(Y.int_repr().numpy(), qY.int_repr().numpy(), decimal=0)
"""Tests the correctness of the quantized::hardtanh op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 8, 1, 8, max_numel=10**5),
elements=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)),
min_val=hu.floats(-1e6, -9.999999974752427e-07, allow_nan=False, allow_infinity=False),
max_val=hu.floats(9.999999974752427e-07, 1e6, allow_nan=False, allow_infinity=False))
def test_hardtanh(self, X, min_val, max_val):
if 'qnnpack' not in torch.backends.quantized.supported_engines:
return
with override_quantized_engine('qnnpack'):
X, (scale, zero_point, torch_type) = X
assume(min_val <= max_val)
Y = X.copy()
Y[Y < min_val] = min_val
Y[Y > max_val] = max_val
qY = torch.quantize_per_tensor(torch.from_numpy(Y), scale=scale,
zero_point=zero_point, dtype=torch_type)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
qY_hat = torch.nn.quantized.functional.hardtanh(qX, min_val, max_val)
self.assertEqual(
qY, qY_hat,
msg="hardtanh failed:\nactual {}\nexpected {}".format(qY_hat, qY))
"""Tests the correctness of the tensor comparators."""
class TestComparatorOps(TestCase):
"""Tests the element-wise equality ops."""
@given(A=hu.tensor(shapes=((3, 4, 5),),
qparams=hu.qparams()),
B=hu.tensor(shapes=((5,), (1, 5), (1, 1, 5), (4, 5), (3, 4, 5)),
qparams=hu.qparams()))
def test_compare_tensor_tensor(self, A, B):
A, (scale_a, zero_point_a, dtype_a) = A
B, (scale_b, zero_point_b, dtype_b) = B
tA = torch.from_numpy(A)
tB = torch.from_numpy(B)
qA = torch.quantize_per_tensor(tA, scale=scale_a, zero_point=zero_point_a,
dtype=dtype_a)
qB = torch.quantize_per_tensor(tB, scale=scale_b, zero_point=zero_point_b,
dtype=dtype_b)
dqA = qA.dequantize()
dqB = qB.dequantize()
ops_under_test = ('__eq__', '__ne__', '__ge__', '__le__', '__gt__',
'__lt__', 'eq', 'ne', 'ge', 'le', 'gt', 'lt')
for op in ops_under_test:
result_ref = getattr(dqA, op)(dqB)
result = getattr(qA, op)(qB)
self.assertEqual(result_ref, result,
msg="'tensor.{}(tensor)'' failed".format(op))
# Reversed broadcasting.
result_ref = getattr(dqB, op)(dqA)
result = getattr(qB, op)(qA)
self.assertEqual(result_ref, result,
msg="'tensor.{}(tensor)'' failed".format(op))
@given(A=hu.tensor(shapes=((3, 4, 5),),
qparams=hu.qparams()),
b=hu.floats(allow_infinity=False, allow_nan=False))
def test_compare_tensor_scalar(self, A, b):
A, (scale_a, zero_point_a, dtype_a) = A
tA = torch.from_numpy(A)
qA = torch.quantize_per_tensor(tA, scale=scale_a, zero_point=zero_point_a,
dtype=dtype_a)
dqA = qA.dequantize()
ops_under_test_reversible = ('__eq__', '__ne__', '__ge__', '__le__',
'__gt__', '__lt__')
ops_under_test_nonreversible = ('eq', 'ne', 'ge', 'le', 'gt', 'lt')
for op in ops_under_test_reversible:
result_ref = getattr(dqA, op)(b)
result = getattr(qA, op)(b)
note("result_ref 1: {}".format(result_ref))
note("result 1: {}".format(result))
self.assertEqual(result_ref, result,
msg="'tensor.{}(scalar)'' failed".format(op))
# Reversed broadcasting.
result_ref = getattr(b, op)(dqA)
result = getattr(b, op)(qA)
note("result_ref 2: {}".format(result_ref))
note("result 2: {}".format(result))
self.assertEqual(result_ref, result,
msg="'scalar.{}(tensor)'' failed".format(op))
for op in ops_under_test_nonreversible:
result_ref = getattr(dqA, op)(b)
result = getattr(qA, op)(b)
note("result_ref 3: {}".format(result_ref))
note("result 3: {}".format(result))
self.assertEqual(result_ref, result,
msg="'tensor.{}(scalar)'' failed".format(op))
| 46.754771
| 124
| 0.555422
|
933bb5cf4efa7bb1148bec519683973ebc68c2f0
| 19,544
|
py
|
Python
|
examples/frameworks/pytorch/pytorch_matplotlib.py
|
noklam/trains
|
70536544ed5e2b9aac8576ef2eaaef31c99ca670
|
[
"Apache-2.0"
] | 8
|
2019-04-24T18:55:50.000Z
|
2022-03-04T13:38:42.000Z
|
examples/frameworks/pytorch/pytorch_matplotlib.py
|
aliceUnhinged613/trains
|
8ec6bba4d91104a2bdd2e537bec21078529540e0
|
[
"Apache-2.0"
] | 2
|
2020-07-05T08:28:40.000Z
|
2020-08-11T13:32:49.000Z
|
examples/frameworks/pytorch/pytorch_matplotlib.py
|
aliceUnhinged613/trains
|
8ec6bba4d91104a2bdd2e537bec21078529540e0
|
[
"Apache-2.0"
] | 6
|
2021-03-06T03:18:14.000Z
|
2021-12-14T02:40:12.000Z
|
# TRAINS - Example of Pytorch and matplotlib integration and reporting
#
"""
Neural Transfer Using PyTorch
=============================
**Author**: `Alexis Jacq <https://alexis-jacq.github.io>`_
**Edited by**: `Winston Herring <https://github.com/winston6>`_
Introduction
------------
This tutorial explains how to implement the `Neural-Style algorithm <https://arxiv.org/abs/1508.06576>`__
developed by Leon A. Gatys, Alexander S. Ecker and Matthias Bethge.
Neural-Style, or Neural-Transfer, allows you to take an image and
reproduce it with a new artistic style. The algorithm takes three images,
an input image, a content-image, and a style-image, and changes the input
to resemble the content of the content-image and the artistic style of the style-image.
.. figure:: /_static/img/neural-style/neuralstyle.png
:alt: content1
"""
######################################################################
# Underlying Principle
# --------------------
#
# The principle is simple: we define two distances, one for the content
# (:math:`D_C`) and one for the style (:math:`D_S`). :math:`D_C` measures how different the content
# is between two images while :math:`D_S` measures how different the style is
# between two images. Then, we take a third image, the input, and
# transform it to minimize both its content-distance with the
# content-image and its style-distance with the style-image. Now we can
# import the necessary packages and begin the neural transfer.
#
# Importing Packages and Selecting a Device
# -----------------------------------------
# Below is a list of the packages needed to implement the neural transfer.
#
# - ``torch``, ``torch.nn``, ``numpy`` (indispensables packages for
# neural networks with PyTorch)
# - ``torch.optim`` (efficient gradient descents)
# - ``PIL``, ``PIL.Image``, ``matplotlib.pyplot`` (load and display
# images)
# - ``torchvision.transforms`` (transform PIL images into tensors)
# - ``torchvision.models`` (train or load pre-trained models)
# - ``copy`` (to deep copy the models; system package)
from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import torchvision.models as models
import copy
from trains import Task
task = Task.init(project_name='examples', task_name='pytorch with matplotlib example', task_type=Task.TaskTypes.testing)
######################################################################
# Next, we need to choose which device to run the network on and import the
# content and style images. Running the neural transfer algorithm on large
# images takes longer and will go much faster when running on a GPU. We can
# use ``torch.cuda.is_available()`` to detect if there is a GPU available.
# Next, we set the ``torch.device`` for use throughout the tutorial. Also the ``.to(device)``
# method is used to move tensors or modules to a desired device.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
######################################################################
# Loading the Images
# ------------------
#
# Now we will import the style and content images. The original PIL images have values between 0 and 255, but when
# transformed into torch tensors, their values are converted to be between
# 0 and 1. The images also need to be resized to have the same dimensions.
# An important detail to note is that neural networks from the
# torch library are trained with tensor values ranging from 0 to 1. If you
# try to feed the networks with 0 to 255 tensor images, then the activated
# feature maps will be unable sense the intended content and style.
# However, pre-trained networks from the Caffe library are trained with 0
# to 255 tensor images.
#
#
# .. Note::
# Here are links to download the images required to run the tutorial:
# `picasso.jpg <https://pytorch.org/tutorials/_static/img/neural-style/picasso.jpg>`__ and
# `dancing.jpg <https://pytorch.org/tutorials/_static/img/neural-style/dancing.jpg>`__.
# Download these two images and add them to a directory
# with name ``images`` in your current working directory.
# desired size of the output image
imsize = 512 if torch.cuda.is_available() else 128 # use small size if no gpu
loader = transforms.Compose([
transforms.Resize(imsize), # scale imported image
transforms.ToTensor()]) # transform it into a torch tensor
def image_loader(image_name):
image = Image.open(image_name)
# fake batch dimension required to fit network's input dimensions
image = loader(image).unsqueeze(0)
return image.to(device, torch.float)
style_img = image_loader(os.path.join("..", "..", "reporting", "data_samples", "picasso.jpg"))
content_img = image_loader(os.path.join("..", "..", "reporting", "data_samples", "dancing.jpg"))
assert style_img.size() == content_img.size(), \
"we need to import style and content images of the same size"
######################################################################
# Now, let's create a function that displays an image by reconverting a
# copy of it to PIL format and displaying the copy using
# ``plt.imshow``. We will try displaying the content and style images
# to ensure they were imported correctly.
unloader = transforms.ToPILImage() # reconvert into PIL image
plt.ion()
def imshow(tensor, title=None):
image = tensor.cpu().clone() # we clone the tensor to not do changes on it
image = image.squeeze(0) # remove the fake batch dimension
image = unloader(image)
plt.imshow(image)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
plt.figure()
imshow(style_img, title='Style Image')
plt.figure()
imshow(content_img, title='Content Image')
######################################################################
# Loss Functions
# --------------
# Content Loss
# ~~~~~~~~~~~~
#
# The content loss is a function that represents a weighted version of the
# content distance for an individual layer. The function takes the feature
# maps :math:`F_{XL}` of a layer :math:`L` in a network processing input :math:`X` and returns the
# weighted content distance :math:`w_{CL}.D_C^L(X,C)` between the image :math:`X` and the
# content image :math:`C`. The feature maps of the content image(:math:`F_{CL}`) must be
# known by the function in order to calculate the content distance. We
# implement this function as a torch module with a constructor that takes
# :math:`F_{CL}` as an input. The distance :math:`\|F_{XL} - F_{CL}\|^2` is the mean square error
# between the two sets of feature maps, and can be computed using ``nn.MSELoss``.
#
# We will add this content loss module directly after the convolution
# layer(s) that are being used to compute the content distance. This way
# each time the network is fed an input image the content losses will be
# computed at the desired layers and because of auto grad, all the
# gradients will be computed. Now, in order to make the content loss layer
# transparent we must define a ``forward`` method that computes the content
# loss and then returns the layer's input. The computed loss is saved as a
# parameter of the module.
#
class ContentLoss(nn.Module):
def __init__(self, target, ):
super(ContentLoss, self).__init__()
# we 'detach' the target content from the tree used
# to dynamically compute the gradient: this is a stated value,
# not a variable. Otherwise the forward method of the criterion
# will throw an error.
self.target = target.detach()
def forward(self, input):
self.loss = F.mse_loss(input, self.target)
return input
######################################################################
# .. Note::
# **Important detail**: although this module is named ``ContentLoss``, it
# is not a true PyTorch Loss function. If you want to define your content
# loss as a PyTorch Loss function, you have to create a PyTorch autograd function
# to recompute/implement the gradient manually in the ``backward``
# method.
######################################################################
# Style Loss
# ~~~~~~~~~~
#
# The style loss module is implemented similarly to the content loss
# module. It will act as a transparent layer in a
# network that computes the style loss of that layer. In order to
# calculate the style loss, we need to compute the gram matrix :math:`G_{XL}`. A gram
# matrix is the result of multiplying a given matrix by its transposed
# matrix. In this application the given matrix is a reshaped version of
# the feature maps :math:`F_{XL}` of a layer :math:`L`. :math:`F_{XL}` is reshaped to form :math:`\hat{F}_{XL}`, a :math:`K`\ x\ :math:`N`
# matrix, where :math:`K` is the number of feature maps at layer :math:`L` and :math:`N` is the
# length of any vectorized feature map :math:`F_{XL}^k`. For example, the first line
# of :math:`\hat{F}_{XL}` corresponds to the first vectorized feature map :math:`F_{XL}^1`.
#
# Finally, the gram matrix must be normalized by dividing each element by
# the total number of elements in the matrix. This normalization is to
# counteract the fact that :math:`\hat{F}_{XL}` matrices with a large :math:`N` dimension yield
# larger values in the Gram matrix. These larger values will cause the
# first layers (before pooling layers) to have a larger impact during the
# gradient descent. Style features tend to be in the deeper layers of the
# network so this normalization step is crucial.
#
def gram_matrix(input):
a, b, c, d = input.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a * b, c * d) # resise F_XL into \hat F_XL
G = torch.mm(features, features.t()) # compute the gram product
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(a * b * c * d)
######################################################################
# Now the style loss module looks almost exactly like the content loss
# module. The style distance is also computed using the mean square
# error between :math:`G_{XL}` and :math:`G_{SL}`.
#
class StyleLoss(nn.Module):
def __init__(self, target_feature):
super(StyleLoss, self).__init__()
self.target = gram_matrix(target_feature).detach()
def forward(self, input):
G = gram_matrix(input)
self.loss = F.mse_loss(G, self.target)
return input
######################################################################
# Importing the Model
# -------------------
#
# Now we need to import a pre-trained neural network. We will use a 19
# layer VGG network like the one used in the paper.
#
# PyTorch's implementation of VGG is a module divided into two child
# ``Sequential`` modules: ``features`` (containing convolution and pooling layers),
# and ``classifier`` (containing fully connected layers). We will use the
# ``features`` module because we need the output of the individual
# convolution layers to measure content and style loss. Some layers have
# different behavior during training than evaluation, so we must set the
# network to evaluation mode using ``.eval()``.
#
cnn = models.vgg19(pretrained=True).features.to(device).eval()
######################################################################
# Additionally, VGG networks are trained on images with each channel
# normalized by mean=[0.485, 0.456, 0.406] and std=[0.229, 0.224, 0.225].
# We will use them to normalize the image before sending it into the network.
#
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
# create a module to normalize input image so we can easily put it in a
# nn.Sequential
class Normalization(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
# .view the mean and std to make them [C x 1 x 1] so that they can
# directly work with image Tensor of shape [B x C x H x W].
# B is batch size. C is number of channels. H is height and W is width.
self.mean = torch.tensor(mean).view(-1, 1, 1)
self.std = torch.tensor(std).view(-1, 1, 1)
def forward(self, img):
# normalize img
return (img - self.mean) / self.std
######################################################################
# A ``Sequential`` module contains an ordered list of child modules. For
# instance, ``vgg19.features`` contains a sequence (Conv2d, ReLU, MaxPool2d,
# Conv2d, ReLU...) aligned in the right order of depth. We need to add our
# content loss and style loss layers immediately after the convolution
# layer they are detecting. To do this we must create a new ``Sequential``
# module that has content loss and style loss modules correctly inserted.
#
# desired depth layers to compute style/content losses :
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
style_img, content_img,
content_layers=content_layers_default,
style_layers=style_layers_default):
cnn = copy.deepcopy(cnn)
# normalization module
normalization = Normalization(normalization_mean, normalization_std).to(device)
# just in order to have an iterable access to or list of content/syle
# losses
content_losses = []
style_losses = []
# assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
# to put in modules that are supposed to be activated sequentially
model = nn.Sequential(normalization)
i = 0 # increment every time we see a conv
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
elif isinstance(layer, nn.ReLU):
name = 'relu_{}'.format(i)
# The in-place version doesn't play very nicely with the ContentLoss
# and StyleLoss we insert below. So we replace with out-of-place
# ones here.
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = 'pool_{}'.format(i)
elif isinstance(layer, nn.BatchNorm2d):
name = 'bn_{}'.format(i)
else:
raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
model.add_module(name, layer)
if name in content_layers:
# add content loss:
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module("content_loss_{}".format(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss:
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module("style_loss_{}".format(i), style_loss)
style_losses.append(style_loss)
# now we trim off the layers after the last content and style losses
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
model = model[:(i + 1)]
return model, style_losses, content_losses
######################################################################
# Next, we select the input image. You can use a copy of the content image
# or white noise.
#
input_img = content_img.clone()
# if you want to use white noise instead uncomment the below line:
# input_img = torch.randn(content_img.data.size(), device=device)
# add the original input image to the figure:
plt.figure()
imshow(input_img, title='Input Image')
######################################################################
# Gradient Descent
# ----------------
#
# As Leon Gatys, the author of the algorithm, suggested `here <https://discuss.pytorch.org/t/pytorch-tutorial-for-neural-transfert-of-artistic-style/336/20?u=alexis-jacq>`__, we will use
# L-BFGS algorithm to run our gradient descent. Unlike training a network,
# we want to train the input image in order to minimise the content/style
# losses. We will create a PyTorch L-BFGS optimizer ``optim.LBFGS`` and pass
# our image to it as the tensor to optimize.
#
def get_input_optimizer(input_img):
# this line to show that input is a parameter that requires a gradient
optimizer = optim.LBFGS([input_img.requires_grad_()])
return optimizer
######################################################################
# Finally, we must define a function that performs the neural transfer. For
# each iteration of the networks, it is fed an updated input and computes
# new losses. We will run the ``backward`` methods of each loss module to
# dynamicaly compute their gradients. The optimizer requires a "closure"
# function, which reevaluates the modul and returns the loss.
#
# We still have one final constraint to address. The network may try to
# optimize the input with values that exceed the 0 to 1 tensor range for
# the image. We can address this by correcting the input values to be
# between 0 to 1 each time the network is run.
#
def run_style_transfer(cnn, normalization_mean, normalization_std,
content_img, style_img, input_img, num_steps=300,
style_weight=1000000, content_weight=1):
"""Run the style transfer."""
print('Building the style transfer model..')
model, style_losses, content_losses = get_style_model_and_losses(cnn,
normalization_mean, normalization_std, style_img,
content_img)
optimizer = get_input_optimizer(input_img)
print('Optimizing..')
run = [0]
while run[0] <= num_steps:
def closure():
# correct the values of updated input image
input_img.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
run[0] += 1
if run[0] % 50 == 0:
print("run {}:".format(run))
print('Style Loss : {:4f} Content Loss: {:4f}'.format(
style_score.item(), content_score.item()))
print()
return style_score + content_score
optimizer.step(closure)
# a last correction...
input_img.data.clamp_(0, 1)
return input_img
######################################################################
# Finally, we can run the algorithm.
#
output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std,
content_img, style_img, input_img)
plt.figure()
imshow(output, title='Output Image')
# sphinx_gallery_thumbnail_number = 4
plt.ioff()
plt.show()
| 40.463768
| 186
| 0.647667
|
0511512cf9c1f20d93b313eeb13302178d0be3fe
| 947
|
py
|
Python
|
myclasses/ClassHomework5.py
|
gurmeetkhehra/python-practice
|
abeb5586f8c1e673fd8ff312a4ae0941f2a0194b
|
[
"Apache-2.0"
] | null | null | null |
myclasses/ClassHomework5.py
|
gurmeetkhehra/python-practice
|
abeb5586f8c1e673fd8ff312a4ae0941f2a0194b
|
[
"Apache-2.0"
] | null | null | null |
myclasses/ClassHomework5.py
|
gurmeetkhehra/python-practice
|
abeb5586f8c1e673fd8ff312a4ae0941f2a0194b
|
[
"Apache-2.0"
] | null | null | null |
# 5. Write a Python class Car and all attributes private. Write method to return all individual attributes
class Car():
def __init__(self, brand, model, year, color):
self.brand = brand
self.model = model
self.year = year
self.color = color
def get_car_details(self):
print(self.brand)
print(self.model)
print(self.color)
print(self.year)
Toyota = Car('Toyota', 'Camry', 2018, 'white')
print(Toyota.get_car_details())
class ElectricCar(Car):
def __init__(self, brand, model, year, color, battery, charged_capacity):
super().__init__(brand, model, year, color)
self.battery=battery
self.charged_capacity=charged_capacity
def get_car_details(self):
super().get_car_details()
print(self.battery)
print(self.charged_capacity)
tesla = ElectricCar('Tesla', 'X', 2019, 'Red', 5000, 100)
print(tesla.get_car_details())
| 27.852941
| 106
| 0.654699
|
8953f544bfd4760e29f4de01a9f1ac2eba5e0594
| 3,195
|
py
|
Python
|
gui/qt/qrwindow.py
|
stratisproject/electrum
|
c60fa543418c31ce7f5dcf5aa717d82a5c47e216
|
[
"MIT"
] | 26
|
2017-06-09T04:13:13.000Z
|
2021-11-15T11:35:30.000Z
|
gui/qt/qrwindow.py
|
stratisproject/electrum
|
c60fa543418c31ce7f5dcf5aa717d82a5c47e216
|
[
"MIT"
] | 29
|
2017-05-07T05:08:06.000Z
|
2021-02-19T13:15:03.000Z
|
gui/qt/qrwindow.py
|
stratisproject/electrum
|
c60fa543418c31ce7f5dcf5aa717d82a5c47e216
|
[
"MIT"
] | 21
|
2017-05-31T14:24:20.000Z
|
2021-01-30T17:35:43.000Z
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import platform
from decimal import Decimal
from urllib import quote
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
from electrum_stratis_gui.qt.qrcodewidget import QRCodeWidget
from electrum_stratis.i18n import _
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
column_index = 4
class QR_Window(QWidget):
def __init__(self, win):
QWidget.__init__(self)
self.win = win
self.setWindowTitle('Electrum - '+_('Payment Request'))
self.setMinimumSize(800, 250)
self.address = ''
self.label = ''
self.amount = 0
self.setFocusPolicy(QtCore.Qt.NoFocus)
main_box = QHBoxLayout()
self.qrw = QRCodeWidget()
main_box.addWidget(self.qrw, 1)
vbox = QVBoxLayout()
main_box.addLayout(vbox)
self.address_label = QLabel("")
#self.address_label.setFont(QFont(MONOSPACE_FONT))
vbox.addWidget(self.address_label)
self.label_label = QLabel("")
vbox.addWidget(self.label_label)
self.amount_label = QLabel("")
vbox.addWidget(self.amount_label)
vbox.addStretch(1)
self.setLayout(main_box)
def set_content(self, address, amount, message, url):
address_text = "<span style='font-size: 18pt'>%s</span>" % address if address else ""
self.address_label.setText(address_text)
if amount:
amount = self.win.format_amount(amount)
amount_text = "<span style='font-size: 21pt'>%s</span> <span style='font-size: 16pt'>%s</span> " % (amount, self.win.base_unit())
else:
amount_text = ''
self.amount_label.setText(amount_text)
label_text = "<span style='font-size: 21pt'>%s</span>" % message if message else ""
self.label_label.setText(label_text)
self.qrw.setData(url)
| 33.989362
| 141
| 0.694836
|
f4eda96fc27599aff40f0ee7dfbcf3f031ad9ef8
| 1,994
|
py
|
Python
|
benchmarks/experimental/benchmark_dataset.py
|
gautham-kollu/fairscale
|
9dc1b92ff0897f150f8d0259966ef477ef891883
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
benchmarks/experimental/benchmark_dataset.py
|
gautham-kollu/fairscale
|
9dc1b92ff0897f150f8d0259966ef477ef891883
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
benchmarks/experimental/benchmark_dataset.py
|
gautham-kollu/fairscale
|
9dc1b92ff0897f150f8d0259966ef477ef891883
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.utils.data import Dataset
# TODO(sidgoyal): Refactor benchmarks to remove this file eventually.
def collate_sentences_lm(samples):
if len(samples) == 0:
return {}
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = torch.stack([s["source"] for s in samples], 0)
tgt_tokens = torch.stack([s["target"] for s in samples], 0)
ntokens = len(samples) * len(samples[0]["target"])
src_lengths = torch.LongTensor([len(samples[0]["source"])] * len(samples))
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"input": src_tokens,
"target": tgt_tokens,
}
return batch
class BenchmarkLMDataset(Dataset):
"""
Dataset to benchmark a translation like seq2seq task.
Args:
vocab_size (int, optional): size of the vocabulary (default 10000).
max_source_positions (int, optional): max number of tokens in the
source sentence (default: 1024).
total_samples (int, optional): the total number of rows in the
dataset (default: 10000).
"""
def __init__(
self,
vocab_size=10000,
max_source_positions=1024,
total_samples=10000,
):
self.vocab_size = vocab_size
self.max_source_positions = max_source_positions
self.total_samples = total_samples
self.sizes = [self.max_source_positions] * self.total_samples
def __getitem__(self, index):
length = self.sizes[index]
source = torch.randint(1, self.vocab_size, (length,))
target = source.clone()
return {
"id": index,
"source": source,
"target": target,
}
def __len__(self):
return self.total_samples
| 29.761194
| 78
| 0.629388
|
77736aa1fe9c94bea589eb6e7574d81227dec2e6
| 1,153
|
py
|
Python
|
recipe_scrapers/thevintagemixer.py
|
riki900/recipes
|
7895802f6cf80d14db8465e2f3d3874cec922b5d
|
[
"MIT"
] | null | null | null |
recipe_scrapers/thevintagemixer.py
|
riki900/recipes
|
7895802f6cf80d14db8465e2f3d3874cec922b5d
|
[
"MIT"
] | null | null | null |
recipe_scrapers/thevintagemixer.py
|
riki900/recipes
|
7895802f6cf80d14db8465e2f3d3874cec922b5d
|
[
"MIT"
] | null | null | null |
from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string
class TheVintageMixer(AbstractScraper):
@classmethod
def host(self):
return 'thevintagemixer.com'
def title(self):
return self.soup.find(
'div',
{'class': 'wprm-recipe-name'}
).get_text()
def total_time(self):
return get_minutes(self.soup.find(
'meta', {'itemprop': 'totalTime'}).parent
)
def yields(self):
return 0 # Servings do not exist in this site.
def ingredients(self):
ingredients = self.soup.findAll(
'li', {'itemprop': "recipeIngredient"}
)
return [
normalize_string(ingredient.get_text())
for ingredient in ingredients
if len(normalize_string(ingredient.get_text())) > 0
]
def instructions(self):
instructions = self.soup.findAll(
'div',
{'itemprop': 'recipeInstructions'}
)
return '\n'.join([
normalize_string(instruction.get_text())
for instruction in instructions
])
| 25.065217
| 63
| 0.575889
|
0eee58f996a06392582f59bf111614aa5707cc46
| 19,146
|
py
|
Python
|
tests/tensorflow/test_nn.py
|
xnuohz/dgl
|
115ac0b9a3dbd806cc52f2a428048b79502f2350
|
[
"Apache-2.0"
] | 1
|
2020-06-04T07:57:12.000Z
|
2020-06-04T07:57:12.000Z
|
tests/tensorflow/test_nn.py
|
hetong007/dgl
|
1bfc3118e4a542821c1415e376c026fe1dfd0b59
|
[
"Apache-2.0"
] | null | null | null |
tests/tensorflow/test_nn.py
|
hetong007/dgl
|
1bfc3118e4a542821c1415e376c026fe1dfd0b59
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras import layers
import networkx as nx
import pytest
import dgl
import dgl.nn.tensorflow as nn
import dgl.function as fn
import backend as F
from test_utils.graph_cases import get_cases, random_graph, random_bipartite, random_dglgraph
from test_utils import parametrize_dtype
from copy import deepcopy
import numpy as np
import scipy as sp
def _AXWb(A, X, W, b):
X = tf.matmul(X, W)
Y = tf.reshape(tf.matmul(A, tf.reshape(X, (X.shape[0], -1))), X.shape)
return Y + b
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv(out_dim):
g = dgl.DGLGraph(nx.path_graph(3)).to(F.ctx())
ctx = F.ctx()
adj = tf.sparse.to_dense(tf.sparse.reorder(g.adjacency_matrix(transpose=True, ctx=ctx)))
conv = nn.GraphConv(5, out_dim, norm='none', bias=True)
# conv = conv
print(conv)
# test#1: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
# test#2: more-dim
h0 = F.ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
assert F.allclose(h1, _AXWb(adj, h0, conv.weight, conv.bias))
conv = nn.GraphConv(5, out_dim)
# conv = conv
# test#3: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
# test#4: basic
h0 = F.ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
conv = nn.GraphConv(5, out_dim)
# conv = conv
# test#3: basic
h0 = F.ones((3, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
# test#4: basic
h0 = F.ones((3, 5, 5))
h1 = conv(g, h0)
assert len(g.ndata) == 0
assert len(g.edata) == 0
# test rest_parameters
# old_weight = deepcopy(conv.weight.data)
# conv.reset_parameters()
# new_weight = conv.weight.data
# assert not F.allclose(old_weight, new_weight)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [True, False])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv2(idtype, g, norm, weight, bias, out_dim):
g = g.astype(idtype).to(F.ctx())
conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias)
ext_w = F.randn((5, out_dim))
nsrc = g.number_of_src_nodes()
ndst = g.number_of_dst_nodes()
h = F.randn((nsrc, 5))
h_dst = F.randn((ndst, out_dim))
if weight:
h_out = conv(g, h)
else:
h_out = conv(g, h, weight=ext_w)
assert h_out.shape == (ndst, out_dim)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree', 'dglgraph']))
@pytest.mark.parametrize('norm', ['none', 'both', 'right'])
@pytest.mark.parametrize('weight', [True, False])
@pytest.mark.parametrize('bias', [True, False])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_graph_conv2_bi(idtype, g, norm, weight, bias, out_dim):
g = g.astype(idtype).to(F.ctx())
conv = nn.GraphConv(5, out_dim, norm=norm, weight=weight, bias=bias)
ext_w = F.randn((5, out_dim))
nsrc = g.number_of_src_nodes()
ndst = g.number_of_dst_nodes()
h = F.randn((nsrc, 5))
h_dst = F.randn((ndst, out_dim))
if weight:
h_out = conv(g, (h, h_dst))
else:
h_out = conv(g, (h, h_dst), weight=ext_w)
assert h_out.shape == (ndst, out_dim)
def test_simple_pool():
ctx = F.ctx()
g = dgl.DGLGraph(nx.path_graph(15)).to(F.ctx())
sum_pool = nn.SumPooling()
avg_pool = nn.AvgPooling()
max_pool = nn.MaxPooling()
sort_pool = nn.SortPooling(10) # k = 10
print(sum_pool, avg_pool, max_pool, sort_pool)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = sum_pool(g, h0)
assert F.allclose(F.squeeze(h1, 0), F.sum(h0, 0))
h1 = avg_pool(g, h0)
assert F.allclose(F.squeeze(h1, 0), F.mean(h0, 0))
h1 = max_pool(g, h0)
assert F.allclose(F.squeeze(h1, 0), F.max(h0, 0))
h1 = sort_pool(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 * 5 and h1.ndim == 2
# test#2: batched graph
g_ = dgl.DGLGraph(nx.path_graph(5)).to(F.ctx())
bg = dgl.batch([g, g_, g, g_, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h1 = sum_pool(bg, h0)
truth = tf.stack([F.sum(h0[:15], 0),
F.sum(h0[15:20], 0),
F.sum(h0[20:35], 0),
F.sum(h0[35:40], 0),
F.sum(h0[40:55], 0)], 0)
assert F.allclose(h1, truth)
h1 = avg_pool(bg, h0)
truth = tf.stack([F.mean(h0[:15], 0),
F.mean(h0[15:20], 0),
F.mean(h0[20:35], 0),
F.mean(h0[35:40], 0),
F.mean(h0[40:55], 0)], 0)
assert F.allclose(h1, truth)
h1 = max_pool(bg, h0)
truth = tf.stack([F.max(h0[:15], 0),
F.max(h0[15:20], 0),
F.max(h0[20:35], 0),
F.max(h0[35:40], 0),
F.max(h0[40:55], 0)], 0)
assert F.allclose(h1, truth)
h1 = sort_pool(bg, h0)
assert h1.shape[0] == 5 and h1.shape[1] == 10 * 5 and h1.ndim == 2
def test_glob_att_pool():
g = dgl.DGLGraph(nx.path_graph(10)).to(F.ctx())
gap = nn.GlobalAttentionPooling(layers.Dense(1), layers.Dense(10))
print(gap)
# test#1: basic
h0 = F.randn((g.number_of_nodes(), 5))
h1 = gap(g, h0)
assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.ndim == 2
# test#2: batched graph
bg = dgl.batch([g, g, g, g])
h0 = F.randn((bg.number_of_nodes(), 5))
h1 = gap(bg, h0)
assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.ndim == 2
@pytest.mark.parametrize('O', [1, 2, 8])
def test_rgcn(O):
etype = []
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True).to(F.ctx())
# 5 etypes
R = 5
for i in range(g.number_of_edges()):
etype.append(i % 5)
B = 2
I = 10
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis_low = nn.RelGraphConv(I, O, R, "basis", B, low_mem=True)
rgc_basis_low.weight = rgc_basis.weight
rgc_basis_low.w_comp = rgc_basis.w_comp
rgc_basis_low.loop_weight = rgc_basis.loop_weight
h = tf.random.normal((100, I))
r = tf.constant(etype)
h_new = rgc_basis(g, h, r)
h_new_low = rgc_basis_low(g, h, r)
assert list(h_new.shape) == [100, O]
assert list(h_new_low.shape) == [100, O]
assert F.allclose(h_new, h_new_low)
if O % B == 0:
rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B)
rgc_bdd_low = nn.RelGraphConv(I, O, R, "bdd", B, low_mem=True)
rgc_bdd_low.weight = rgc_bdd.weight
rgc_bdd_low.loop_weight = rgc_bdd.loop_weight
h = tf.random.normal((100, I))
r = tf.constant(etype)
h_new = rgc_bdd(g, h, r)
h_new_low = rgc_bdd_low(g, h, r)
assert list(h_new.shape) == [100, O]
assert list(h_new_low.shape) == [100, O]
assert F.allclose(h_new, h_new_low)
# with norm
norm = tf.zeros((g.number_of_edges(), 1))
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis_low = nn.RelGraphConv(I, O, R, "basis", B, low_mem=True)
rgc_basis_low.weight = rgc_basis.weight
rgc_basis_low.w_comp = rgc_basis.w_comp
rgc_basis_low.loop_weight = rgc_basis.loop_weight
h = tf.random.normal((100, I))
r = tf.constant(etype)
h_new = rgc_basis(g, h, r, norm)
h_new_low = rgc_basis_low(g, h, r, norm)
assert list(h_new.shape) == [100, O]
assert list(h_new_low.shape) == [100, O]
assert F.allclose(h_new, h_new_low)
if O % B == 0:
rgc_bdd = nn.RelGraphConv(I, O, R, "bdd", B)
rgc_bdd_low = nn.RelGraphConv(I, O, R, "bdd", B, low_mem=True)
rgc_bdd_low.weight = rgc_bdd.weight
rgc_bdd_low.loop_weight = rgc_bdd.loop_weight
h = tf.random.normal((100, I))
r = tf.constant(etype)
h_new = rgc_bdd(g, h, r, norm)
h_new_low = rgc_bdd_low(g, h, r, norm)
assert list(h_new.shape) == [100, O]
assert list(h_new_low.shape) == [100, O]
assert F.allclose(h_new, h_new_low)
# id input
rgc_basis = nn.RelGraphConv(I, O, R, "basis", B)
rgc_basis_low = nn.RelGraphConv(I, O, R, "basis", B, low_mem=True)
rgc_basis_low.weight = rgc_basis.weight
rgc_basis_low.w_comp = rgc_basis.w_comp
rgc_basis_low.loop_weight = rgc_basis.loop_weight
h = tf.constant(np.random.randint(0, I, (100,))) * 1
r = tf.constant(etype) * 1
h_new = rgc_basis(g, h, r)
h_new_low = rgc_basis_low(g, h, r)
assert list(h_new.shape) == [100, O]
assert list(h_new_low.shape) == [100, O]
assert F.allclose(h_new, h_new_low)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
@pytest.mark.parametrize('num_heads', [1, 4])
def test_gat_conv(g, idtype, out_dim, num_heads):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gat = nn.GATConv(5, out_dim, num_heads)
feat = F.randn((g.number_of_src_nodes(), 5))
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, get_attention=True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
# test residual connection
gat = nn.GATConv(5, out_dim, num_heads, residual=True)
h = gat(g, feat)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
@pytest.mark.parametrize('num_heads', [1, 4])
def test_gat_conv_bi(g, idtype, out_dim, num_heads):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gat = nn.GATConv(5, out_dim, num_heads)
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
h = gat(g, feat)
assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
_, a = gat(g, feat, get_attention=True)
assert a.shape == (g.number_of_edges(), num_heads, 1)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
@pytest.mark.parametrize('out_dim', [1, 10])
def test_sage_conv(idtype, g, aggre_type, out_dim):
g = g.astype(idtype).to(F.ctx())
sage = nn.SAGEConv(5, out_dim, aggre_type)
feat = F.randn((g.number_of_src_nodes(), 5))
h = sage(g, feat)
assert h.shape[-1] == out_dim
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite']))
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sage_conv_bi(idtype, g, aggre_type, out_dim):
g = g.astype(idtype).to(F.ctx())
dst_dim = 5 if aggre_type != 'gcn' else 10
sage = nn.SAGEConv((10, dst_dim), out_dim, aggre_type)
feat = (F.randn((g.number_of_src_nodes(), 10)), F.randn((g.number_of_dst_nodes(), dst_dim)))
h = sage(g, feat)
assert h.shape[-1] == out_dim
assert h.shape[0] == g.number_of_dst_nodes()
@parametrize_dtype
@pytest.mark.parametrize('aggre_type', ['mean', 'pool', 'gcn'])
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sage_conv_bi_empty(idtype, aggre_type, out_dim):
# Test the case for graphs without edges
g = dgl.heterograph({('_U', '_E', '_V'): ([], [])}, {'_U': 5, '_V': 3}).to(F.ctx())
g = g.astype(idtype).to(F.ctx())
sage = nn.SAGEConv((3, 3), out_dim, 'gcn')
feat = (F.randn((5, 3)), F.randn((3, 3)))
h = sage(g, feat)
assert h.shape[-1] == out_dim
assert h.shape[0] == 3
for aggre_type in ['mean', 'pool', 'lstm']:
sage = nn.SAGEConv((3, 1), out_dim, aggre_type)
feat = (F.randn((5, 3)), F.randn((3, 1)))
h = sage(g, feat)
assert h.shape[-1] == out_dim
assert h.shape[0] == 3
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_sgc_conv(g, idtype, out_dim):
ctx = F.ctx()
g = g.astype(idtype).to(ctx)
# not cached
sgc = nn.SGConv(5, out_dim, 3)
feat = F.randn((g.number_of_nodes(), 5))
h = sgc(g, feat)
assert h.shape[-1] == out_dim
# cached
sgc = nn.SGConv(5, out_dim, 3, True)
h_0 = sgc(g, feat)
h_1 = sgc(g, feat + 1)
assert F.allclose(h_0, h_1)
assert h_0.shape[-1] == out_dim
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo'], exclude=['zero-degree']))
def test_appnp_conv(g, idtype):
ctx = F.ctx()
g = g.astype(idtype).to(ctx)
appnp = nn.APPNPConv(10, 0.1)
feat = F.randn((g.number_of_nodes(), 5))
h = appnp(g, feat)
assert h.shape[-1] == 5
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite']))
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
def test_gin_conv(g, idtype, aggregator_type):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
gin = nn.GINConv(
tf.keras.layers.Dense(12),
aggregator_type
)
feat = F.randn((g.number_of_src_nodes(), 5))
h = gin(g, feat)
assert h.shape == (g.number_of_dst_nodes(), 12)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite']))
@pytest.mark.parametrize('aggregator_type', ['mean', 'max', 'sum'])
def test_gin_conv_bi(g, idtype, aggregator_type):
g = g.astype(idtype).to(F.ctx())
gin = nn.GINConv(
tf.keras.layers.Dense(12),
aggregator_type
)
feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
h = gin(g, feat)
assert h.shape == (g.number_of_dst_nodes(), 12)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['homo', 'block-bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_edge_conv(g, idtype, out_dim):
g = g.astype(idtype).to(F.ctx())
edge_conv = nn.EdgeConv(out_dim)
h0 = F.randn((g.number_of_src_nodes(), 5))
h1 = edge_conv(g, h0)
assert h1.shape == (g.number_of_dst_nodes(), out_dim)
@parametrize_dtype
@pytest.mark.parametrize('g', get_cases(['bipartite'], exclude=['zero-degree']))
@pytest.mark.parametrize('out_dim', [1, 2])
def test_edge_conv_bi(g, idtype, out_dim):
g = g.astype(idtype).to(F.ctx())
ctx = F.ctx()
edge_conv = nn.EdgeConv(out_dim)
h0 = F.randn((g.number_of_src_nodes(), 5))
x0 = F.randn((g.number_of_dst_nodes(), 5))
h1 = edge_conv(g, (h0, x0))
assert h1.shape == (g.number_of_dst_nodes(), out_dim)
def myagg(alist, dsttype):
rst = alist[0]
for i in range(1, len(alist)):
rst = rst + (i + 1) * alist[i]
return rst
@parametrize_dtype
@pytest.mark.parametrize('agg', ['sum', 'max', 'min', 'mean', 'stack', myagg])
def test_hetero_conv(agg, idtype):
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 0, 2, 1], [1, 2, 1, 3]),
('user', 'plays', 'game'): ([0, 0, 0, 1, 2], [0, 2, 3, 0, 2]),
('store', 'sells', 'game'): ([0, 0, 1, 1], [0, 3, 1, 2])},
idtype=idtype, device=F.ctx())
conv = nn.HeteroGraphConv({
'follows': nn.GraphConv(2, 3, allow_zero_in_degree=True),
'plays': nn.GraphConv(2, 4, allow_zero_in_degree=True),
'sells': nn.GraphConv(3, 4, allow_zero_in_degree=True)},
agg)
uf = F.randn((4, 2))
gf = F.randn((4, 4))
sf = F.randn((2, 3))
h = conv(g, {'user': uf, 'store': sf, 'game': gf})
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
block = dgl.to_block(g.to(F.cpu()), {'user': [0, 1, 2, 3], 'game': [0, 1, 2, 3], 'store': []}).to(F.ctx())
h = conv(block, ({'user': uf, 'game': gf, 'store': sf}, {'user': uf, 'game': gf, 'store': sf[0:0]}))
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
h = conv(block, {'user': uf, 'game': gf, 'store': sf})
assert set(h.keys()) == {'user', 'game'}
if agg != 'stack':
assert h['user'].shape == (4, 3)
assert h['game'].shape == (4, 4)
else:
assert h['user'].shape == (4, 1, 3)
assert h['game'].shape == (4, 2, 4)
# test with mod args
class MyMod(tf.keras.layers.Layer):
def __init__(self, s1, s2):
super(MyMod, self).__init__()
self.carg1 = 0
self.carg2 = 0
self.s1 = s1
self.s2 = s2
def call(self, g, h, arg1=None, *, arg2=None):
if arg1 is not None:
self.carg1 += 1
if arg2 is not None:
self.carg2 += 1
return tf.zeros((g.number_of_dst_nodes(), self.s2))
mod1 = MyMod(2, 3)
mod2 = MyMod(2, 4)
mod3 = MyMod(3, 4)
conv = nn.HeteroGraphConv({
'follows': mod1,
'plays': mod2,
'sells': mod3},
agg)
mod_args = {'follows' : (1,), 'plays' : (1,)}
mod_kwargs = {'sells' : {'arg2' : 'abc'}}
h = conv(g, {'user' : uf, 'game': gf, 'store' : sf}, mod_args=mod_args, mod_kwargs=mod_kwargs)
assert mod1.carg1 == 1
assert mod1.carg2 == 0
assert mod2.carg1 == 1
assert mod2.carg2 == 0
assert mod3.carg1 == 0
assert mod3.carg2 == 1
@pytest.mark.parametrize('out_dim', [1, 2])
def test_dense_cheb_conv(out_dim):
for k in range(3, 4):
ctx = F.ctx()
g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1, random_state=42))
g = g.to(ctx)
adj = tf.sparse.to_dense(tf.sparse.reorder(g.adjacency_matrix(transpose=True, ctx=ctx)))
cheb = nn.ChebConv(5, out_dim, k, None, bias=True)
dense_cheb = nn.DenseChebConv(5, out_dim, k, bias=True)
# init cheb modules
feat = F.ones((100, 5))
out_cheb = cheb(g, feat, [2.0])
dense_cheb.W = tf.reshape(cheb.linear.weights[0], (k, 5, out_dim))
if cheb.linear.bias is not None:
dense_cheb.bias = cheb.linear.bias
out_dense_cheb = dense_cheb(adj, feat, 2.0)
print(out_cheb - out_dense_cheb)
assert F.allclose(out_cheb, out_dense_cheb)
if __name__ == '__main__':
test_graph_conv()
# test_set2set()
test_glob_att_pool()
test_simple_pool()
# test_set_trans()
test_rgcn()
# test_tagconv()
test_gat_conv()
test_sage_conv()
test_sgc_conv()
test_appnp_conv()
test_gin_conv()
test_edge_conv()
# test_agnn_conv()
# test_gated_graph_conv()
# test_nn_conv()
# test_gmm_conv()
# test_dense_graph_conv()
# test_dense_sage_conv()
test_dense_cheb_conv()
# test_sequential()
| 34.684783
| 110
| 0.595007
|
84c5414277bc1734a47f55cab27ea5504f745f74
| 310
|
py
|
Python
|
src/rfidam/setup.py
|
larioandr/thesis-models
|
ecbc8c01aaeaa69034d6fe1d8577ab655968ea5f
|
[
"MIT"
] | 1
|
2021-01-17T15:49:03.000Z
|
2021-01-17T15:49:03.000Z
|
src/rfidam/setup.py
|
larioandr/thesis-models
|
ecbc8c01aaeaa69034d6fe1d8577ab655968ea5f
|
[
"MIT"
] | null | null | null |
src/rfidam/setup.py
|
larioandr/thesis-models
|
ecbc8c01aaeaa69034d6fe1d8577ab655968ea5f
|
[
"MIT"
] | 1
|
2021-03-07T15:31:06.000Z
|
2021-03-07T15:31:06.000Z
|
from setuptools import setup
setup(
name='rfidam',
version='1.0',
py_modules=['rfidam'],
install_requires=[
'Click',
'numpy>=1.19.2',
],
tests_requires=[
'pytest',
],
entry_points='''
[console_scripts]
rfidam=rfidam.main:main
'''
)
| 15.5
| 31
| 0.522581
|
765af8f31a422794d675590ce89e84015b8c7c07
| 10,487
|
py
|
Python
|
nova/cmd/baremetal_deploy_helper.py
|
melwitt/nova
|
6c8706b70c3bb386e01742116306a0a7942956be
|
[
"Apache-2.0"
] | null | null | null |
nova/cmd/baremetal_deploy_helper.py
|
melwitt/nova
|
6c8706b70c3bb386e01742116306a0a7942956be
|
[
"Apache-2.0"
] | null | null | null |
nova/cmd/baremetal_deploy_helper.py
|
melwitt/nova
|
6c8706b70c3bb386e01742116306a0a7942956be
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Bare-Metal Deployment Service."""
import os
import sys
import threading
import time
import cgi
import Queue
import re
import socket
import stat
from wsgiref import simple_server
from nova import config
from nova import context as nova_context
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
QUEUE = Queue.Queue()
LOG = logging.getLogger(__name__)
# All functions are called from deploy() directly or indirectly.
# They are split for stub-out.
def discovery(portal_address, portal_port):
"""Do iSCSI discovery on portal."""
utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'st',
'-p', '%s:%s' % (portal_address, portal_port),
run_as_root=True,
check_exit_code=[0])
def login_iscsi(portal_address, portal_port, target_iqn):
"""Login to an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--login',
run_as_root=True,
check_exit_code=[0])
# Ensure the login complete
time.sleep(3)
def logout_iscsi(portal_address, portal_port, target_iqn):
"""Logout from an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--logout',
run_as_root=True,
check_exit_code=[0])
def make_partitions(dev, root_mb, swap_mb):
"""Create partitions for root and swap on a disk device."""
# Lead in with 1MB to allow room for the partition table itself, otherwise
# the way sfdisk adjusts doesn't shift the partition up to compensate, and
# we lose the space.
# http://bazaar.launchpad.net/~ubuntu-branches/ubuntu/raring/util-linux/
# raring/view/head:/fdisk/sfdisk.c#L1940
stdin_command = ('1,%d,83;\n,%d,82;\n0,0;\n0,0;\n' % (root_mb, swap_mb))
utils.execute('sfdisk', '-uM', dev, process_input=stdin_command,
run_as_root=True,
check_exit_code=[0])
# avoid "device is busy"
time.sleep(3)
def is_block_device(dev):
"""Check whether a device is block or not."""
s = os.stat(dev)
return stat.S_ISBLK(s.st_mode)
def dd(src, dst):
"""Execute dd from src to dst."""
utils.execute('dd',
'if=%s' % src,
'of=%s' % dst,
'bs=1M',
'oflag=direct',
run_as_root=True,
check_exit_code=[0])
def mkswap(dev, label='swap1'):
"""Execute mkswap on a device."""
utils.execute('mkswap',
'-L', label,
dev,
run_as_root=True,
check_exit_code=[0])
def block_uuid(dev):
"""Get UUID of a block device."""
out, _ = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev,
run_as_root=True,
check_exit_code=[0])
return out.strip()
def switch_pxe_config(path, root_uuid):
"""Switch a pxe config from deployment mode to service mode."""
with open(path) as f:
lines = f.readlines()
root = 'UUID=%s' % root_uuid
rre = re.compile(r'\$\{ROOT\}')
dre = re.compile('^default .*$')
with open(path, 'w') as f:
for line in lines:
line = rre.sub(root, line)
line = dre.sub('default boot', line)
f.write(line)
def notify(address, port):
"""Notify a node that it becomes ready to reboot."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((address, port))
s.send('done')
finally:
s.close()
def get_dev(address, port, iqn, lun):
"""Returns a device path for given parameters."""
dev = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s" \
% (address, port, iqn, lun)
return dev
def get_image_mb(image_path):
"""Get size of an image in Megabyte."""
mb = 1024 * 1024
image_byte = os.path.getsize(image_path)
# round up size to MB
image_mb = int((image_byte + mb - 1) / mb)
return image_mb
def work_on_disk(dev, root_mb, swap_mb, image_path):
"""Creates partitions and write an image to the root partition."""
root_part = "%s-part1" % dev
swap_part = "%s-part2" % dev
if not is_block_device(dev):
LOG.warn("parent device '%s' not found", dev)
return
make_partitions(dev, root_mb, swap_mb)
if not is_block_device(root_part):
LOG.warn("root device '%s' not found", root_part)
return
if not is_block_device(swap_part):
LOG.warn("swap device '%s' not found", swap_part)
return
dd(image_path, root_part)
mkswap(swap_part)
root_uuid = block_uuid(root_part)
return root_uuid
def deploy(address, port, iqn, lun, image_path, pxe_config_path,
root_mb, swap_mb):
"""All-in-one function to deploy a node."""
dev = get_dev(address, port, iqn, lun)
image_mb = get_image_mb(image_path)
if image_mb > root_mb:
root_mb = image_mb
discovery(address, port)
login_iscsi(address, port, iqn)
try:
root_uuid = work_on_disk(dev, root_mb, swap_mb, image_path)
except processutils.ProcessExecutionError, err:
# Log output if there was a error
LOG.error("Cmd : %s" % err.cmd)
LOG.error("StdOut : %s" % err.stdout)
LOG.error("StdErr : %s" % err.stderr)
finally:
logout_iscsi(address, port, iqn)
switch_pxe_config(pxe_config_path, root_uuid)
# Ensure the node started netcat on the port after POST the request.
time.sleep(3)
notify(address, 10000)
class Worker(threading.Thread):
"""Thread that handles requests in queue."""
def __init__(self):
super(Worker, self).__init__()
self.setDaemon(True)
self.stop = False
self.queue_timeout = 1
def run(self):
while not self.stop:
try:
# Set timeout to check self.stop periodically
(node_id, params) = QUEUE.get(block=True,
timeout=self.queue_timeout)
except Queue.Empty:
pass
else:
# Requests comes here from BareMetalDeploy.post()
LOG.info(_('start deployment for node %(node_id)s, '
'params %(params)s') % locals())
context = nova_context.get_admin_context()
try:
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYING})
deploy(**params)
except Exception:
LOG.exception(_('deployment to node %s failed') % node_id)
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYFAIL})
else:
LOG.info(_('deployment to node %s done') % node_id)
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYDONE})
class BareMetalDeploy(object):
"""WSGI server for bare-metal deployment."""
def __init__(self):
self.worker = Worker()
self.worker.start()
def __call__(self, environ, start_response):
method = environ['REQUEST_METHOD']
if method == 'POST':
return self.post(environ, start_response)
else:
start_response('501 Not Implemented',
[('Content-type', 'text/plain')])
return 'Not Implemented'
def post(self, environ, start_response):
LOG.info("post: environ=%s", environ)
inpt = environ['wsgi.input']
length = int(environ.get('CONTENT_LENGTH', 0))
x = inpt.read(length)
q = dict(cgi.parse_qsl(x))
try:
node_id = q['i']
deploy_key = q['k']
address = q['a']
port = q.get('p', '3260')
iqn = q['n']
lun = q.get('l', '1')
except KeyError as e:
start_response('400 Bad Request', [('Content-type', 'text/plain')])
return "parameter '%s' is not defined" % e
context = nova_context.get_admin_context()
d = db.bm_node_get(context, node_id)
if d['deploy_key'] != deploy_key:
start_response('400 Bad Request', [('Content-type', 'text/plain')])
return 'key is not match'
params = {'address': address,
'port': port,
'iqn': iqn,
'lun': lun,
'image_path': d['image_path'],
'pxe_config_path': d['pxe_config_path'],
'root_mb': int(d['root_mb']),
'swap_mb': int(d['swap_mb']),
}
# Restart worker, if needed
if not self.worker.isAlive():
self.worker = Worker()
self.worker.start()
LOG.info("request is queued: node %s, params %s", node_id, params)
QUEUE.put((node_id, params))
# Requests go to Worker.run()
start_response('200 OK', [('Content-type', 'text/plain')])
return ''
def main():
config.parse_args(sys.argv)
logging.setup("nova")
global LOG
LOG = logging.getLogger('nova.virt.baremetal.deploy_helper')
app = BareMetalDeploy()
srv = simple_server.make_server('', 10000, app)
srv.serve_forever()
| 32.568323
| 79
| 0.575093
|
36d98648a0c36bd93bf3b48ca406dee51349fcc6
| 824
|
py
|
Python
|
authentication/auth.py
|
morfat/djangorest_start
|
093b6ea878ec51bfc10b99f0801f989d09bc3f88
|
[
"MIT"
] | 1
|
2017-01-27T13:24:57.000Z
|
2017-01-27T13:24:57.000Z
|
authentication/auth.py
|
morfat/djangorest_start
|
093b6ea878ec51bfc10b99f0801f989d09bc3f88
|
[
"MIT"
] | null | null | null |
authentication/auth.py
|
morfat/djangorest_start
|
093b6ea878ec51bfc10b99f0801f989d09bc3f88
|
[
"MIT"
] | null | null | null |
from users.models import User
class CustomBackend(object):
"""authenticate when given email,phone number or secret key and password """
def get_by_email(self,email,password):
try:
user = User.objects.get(email=email)
if password:
if user.check_password(password):
return user
else:
return user
except User.DoesNotExist:
pass
def authenticate(self, email=None, password=None, **kwargs):
if email:
return self.get_by_email(email, password)
return None
def get_user(self, pk):
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
return None
| 23.542857
| 80
| 0.525485
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.