text stringlengths 4 1.02M | meta dict |
|---|---|
from ..style import use
use("johannes")
| {
"content_hash": "265b325c826dd99909b410a210cf8123",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 23,
"avg_line_length": 13.666666666666666,
"alnum_prop": 0.7073170731707317,
"repo_name": "tamasgal/km3pipe",
"id": "b7ba7b207cefed7a3f1cf04019f74b46483bbd1a",
"size": "41",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "km3pipe/style/johannes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "981"
},
{
"name": "Makefile",
"bytes": "8994"
},
{
"name": "Python",
"bytes": "832178"
},
{
"name": "Shell",
"bytes": "1351"
}
],
"symlink_target": ""
} |
"""
Installs and configures MariaDB
"""
import uuid
import logging
from packstack.installer import validators
from packstack.installer import utils
from packstack.installer.utils import split_hosts
from packstack.modules.common import filtered_hosts
from packstack.modules.ospluginutils import (getManifestTemplate,
appendManifestFile)
#------------------ oVirt installer initialization ------------------
PLUGIN_NAME = "MariaDB"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
params = [
{"CMD_OPTION": "mariadb-host",
"USAGE": ("The IP address of the server on which to install MariaDB "
"or IP address of DB server to use if MariaDB "
"installation was not selected"),
"PROMPT": "Enter the IP address of the MariaDB server",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_ssh],
"DEFAULT_VALUE": utils.get_localhost_ip(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_MARIADB_HOST",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_MYSQL_HOST']},
{"CMD_OPTION": "mariadb-user",
"USAGE": "Username for the MariaDB admin user",
"PROMPT": "Enter the username for the MariaDB admin user",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "root",
"MASK_INPUT": False,
"LOOSE_VALIDATION": False,
"CONF_NAME": "CONFIG_MARIADB_USER",
"USE_DEFAULT": True,
"NEED_CONFIRM": False,
"CONDITION": False,
"DEPRECATES": ['CONFIG_MYSQL_USER']},
{"CMD_OPTION": "mariadb-pw",
"USAGE": "Password for the MariaDB admin user",
"PROMPT": "Enter the password for the MariaDB admin user",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": uuid.uuid4().hex[:16],
"MASK_INPUT": True,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_MARIADB_PW",
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False,
"DEPRECATES": ['CONFIG_MYSQL_PW']},
]
group = {"GROUP_NAME": "MARIADB",
"DESCRIPTION": "MariaDB Config parameters",
"PRE_CONDITION": lambda x: 'yes',
"PRE_CONDITION_MATCH": "yes",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, params)
def initSequences(controller):
mariadbsteps = [
{'title': 'Adding MariaDB manifest entries',
'functions': [create_manifest]}
]
controller.addSequence("Installing MariaDB", [], [], mariadbsteps)
#-------------------------- step functions --------------------------
def create_manifest(config, messages):
if config['CONFIG_MARIADB_INSTALL'] == 'y':
suffix = 'install'
host = config['CONFIG_MARIADB_HOST']
else:
suffix = 'noinstall'
host = config['CONFIG_CONTROLLER_HOST']
manifestfile = "%s_mariadb.pp" % host
manifestdata = [getManifestTemplate('mariadb_%s.pp' % suffix)]
def append_for(module, suffix):
# Modules have to be appended to the existing mysql.pp
# otherwise pp will fail for some of them saying that
# Mysql::Config definition is missing.
template = "mariadb_%s_%s.pp" % (module, suffix)
manifestdata.append(getManifestTemplate(template))
append_for("keystone", suffix)
for mod in ['nova', 'cinder', 'glance', 'neutron', 'heat']:
if config['CONFIG_%s_INSTALL' % mod.upper()] == 'y':
append_for(mod, suffix)
hosts = filtered_hosts(config, exclude=False, dbhost=True)
config['FIREWALL_SERVICE_NAME'] = "mariadb"
config['FIREWALL_PORTS'] = "'3306'"
config['FIREWALL_CHAIN'] = "INPUT"
config['FIREWALL_PROTOCOL'] = 'tcp'
for host in hosts:
config['FIREWALL_ALLOWED'] = "'%s'" % host
config['FIREWALL_SERVICE_ID'] = "mariadb_%s" % host
manifestdata.append(getManifestTemplate("firewall.pp"))
appendManifestFile(manifestfile, "\n".join(manifestdata), 'pre')
| {
"content_hash": "1813b175edcd6b5058efb66a82d935c2",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 78,
"avg_line_length": 35.459016393442624,
"alnum_prop": 0.5887656033287101,
"repo_name": "Bitp-Spbsu/openstack-packstack",
"id": "eca7f24a229d98066956c3bd1d7d45d5a15615b7",
"size": "4351",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packstack/plugins/mariadb_003.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "187904"
},
{
"name": "Diff",
"bytes": "21134"
},
{
"name": "HTML",
"bytes": "164"
},
{
"name": "Pascal",
"bytes": "923"
},
{
"name": "Puppet",
"bytes": "102034"
},
{
"name": "Python",
"bytes": "393275"
},
{
"name": "Ruby",
"bytes": "16469"
},
{
"name": "Shell",
"bytes": "3016"
}
],
"symlink_target": ""
} |
import os
import logging
import pkgutil
import shutil
import sys
import xmlrpclib
import time
from lib.core.packages import choose_package
from lib.common.exceptions import CuckooError, CuckooPackageError
from lib.common.abstracts import Package, Auxiliary
from lib.common.constants import PATHS
from lib.core.config import Config
from lib.core.startup import init_logging
from modules import auxiliary
logging.disable(level=logging.DEBUG)
log = logging.getLogger()
class Analyzer(object):
def __init__(self):
self.config = None
self.target = None
def complete(self):
"""End analysis."""
log.info("Analysis completed")
def get_options(self):
"""Get analysis options.
@return: options dict.
"""
# The analysis package can be provided with some options in the
# following format:
# option1=value1,option2=value2,option3=value3
#
# Here we parse such options and provide a dictionary that will be made
# accessible to the analysis package.
options = {}
if self.config.options:
try:
# Split the options by comma.
fields = self.config.options.strip().split(",")
except ValueError as e:
log.warning("Failed parsing the options: %s", e)
else:
for field in fields:
# Split the name and the value of the option.
try:
key, value = field.strip().split("=")
except ValueError as e:
log.warning("Failed parsing option (%s): %s", field, e)
else:
# If the parsing went good, we add the option to the
# dictionary.
options[key.strip()] = value.strip()
return options
def prepare(self):
# Initialize logging.
init_logging()
# Parse the analysis configuration file generated by the agent.
self.config = Config(cfg="analysis.conf")
# We update the target according to its category. If it's a file, then
# we store the path.
if self.config.category == "file":
self.target = os.path.join("/data/local/tmp", str(self.config.file_name))
shutil.copyfile("config/hooks.json", "/data/local/tmp/hooks.json")
# If it's a URL, well.. we store the URL.
else:
self.target = self.config.target
def run(self):
self.prepare()
log.info("Starting analyzer from: {0}".format(os.getcwd()))
log.info("Storing results at: {0}".format(PATHS["root"]))
log.info("Target is: {0}".format(self.target))
# If no analysis package was specified at submission, we try to select
# one automatically.
if not self.config.package:
log.info("No analysis package specified, trying to detect it automagically")
# If the analysis target is a file, we choose the package according
# to the file format.
if self.config.category == "file":
package = choose_package(self.config.file_type, self.config.file_name)
# If it's an URL, we'll just use the default Internet Explorer
# package.
else:
package = "default_browser"
# If we weren't able to automatically determine the proper package,
# we need to abort the analysis.
if not package:
raise CuckooError("No valid package available for file type: {0}".format(self.config.file_type))
log.info("Automatically selected analysis package \"%s\"", package)
# Otherwise just select the specified package.
else:
package = self.config.package
# Generate the package path.
package_name = "modules.packages.%s" % package
# Try to import the analysis package.
try:
__import__(package_name, globals(), locals(), ["dummy"], -1)
# If it fails, we need to abort the analysis.
except ImportError:
raise CuckooError("Unable to import package \"{0}\", does not exist.".format(package_name))
# Initialize the package parent abstract.
Package()
# Enumerate the abstract's subclasses.
try:
package_class = Package.__subclasses__()[0]
except IndexError as e:
raise CuckooError("Unable to select package class (package={0}): {1}".format(package_name, e))
# Initialize the analysis package.
pack = package_class(self.get_options())
# Initialize Auxiliary modules
Auxiliary()
prefix = auxiliary.__name__ + "."
for loader, name, ispkg in pkgutil.iter_modules(auxiliary.__path__, prefix):
if ispkg:
continue
# Import the auxiliary module.
try:
__import__(name, globals(), locals(), ["dummy"], -1)
except ImportError as e:
log.warning("Unable to import the auxiliary module "
"\"%s\": %s", name, e)
# Walk through the available auxiliary modules.
aux_enabled = []
for module in Auxiliary.__subclasses__():
# Try to start the auxiliary module.
try:
aux = module()
aux.start()
except (NotImplementedError, AttributeError):
log.warning("Auxiliary module %s was not implemented",
aux.__class__.__name__)
continue
except Exception as e:
log.warning("Cannot execute auxiliary module %s: %s",
aux.__class__.__name__, e)
continue
finally:
log.info("Started auxiliary module %s",
aux.__class__.__name__)
aux_enabled.append(aux)
# Start analysis package. If for any reason, the execution of the
# analysis package fails, we have to abort the analysis.
try:
pack.start(self.target)
except NotImplementedError:
raise CuckooError("The package \"{0}\" doesn't contain a run "
"function.".format(package_name))
except CuckooPackageError as e:
raise CuckooError("The package \"{0}\" start function raised an "
"error: {1}".format(package_name, e))
except Exception as e:
raise CuckooError("The package \"{0}\" start function encountered "
"an unhandled exception: "
"{1}".format(package_name, e))
time_counter = 0
while True:
time_counter += 1
if time_counter == int(self.config.timeout):
log.info("Analysis timeout hit, terminating analysis")
break
try:
# The analysis packages are provided with a function that
# is executed at every loop's iteration. If such function
# returns False, it means that it requested the analysis
# to be terminate.
if not pack.check():
log.info("The analysis package requested the "
"termination of the analysis...")
break
# If the check() function of the package raised some exception
# we don't care, we can still proceed with the analysis but we
# throw a warning.
except Exception as e:
log.warning("The package \"%s\" check function raised "
"an exception: %s", package_name, e)
finally:
# Zzz.
time.sleep(1)
try:
# Before shutting down the analysis, the package can perform some
# final operations through the finish() function.
pack.finish()
except Exception as e:
log.warning("The package \"%s\" finish function raised an "
"exception: %s", package_name, e)
# Terminate the Auxiliary modules.
for aux in aux_enabled:
try:
aux.stop()
except (NotImplementedError, AttributeError):
continue
except Exception as e:
log.warning("Cannot terminate auxiliary module %s: %s",
aux.__class__.__name__, e)
# Let's invoke the completion procedure.
self.complete()
return True
if __name__ == "__main__":
success = False
error = ""
try:
# Initialize the main analyzer class.
analyzer = Analyzer()
# Run it and wait for the response.
success = analyzer.run()
# This is not likely to happen.
except KeyboardInterrupt:
error = "Keyboard Interrupt"
# If the analysis process encountered a critical error, it will raise a
# CuckooError exception, which will force the termination of the analysis
# weill notify the agent of the failure. Also catched unexpected
# exceptions.
except Exception as e:
# Store the error.
error = str(e)
# Just to be paranoid.
if len(log.handlers) > 0:
log.critical(error)
else:
sys.stderr.write("{0}\n".format(e))
# Once the analysis is completed or terminated for any reason, we report
# back to the agent, notifying that it can report back to the host.
finally:
# Establish connection with the agent XMLRPC server.
server = xmlrpclib.Server("http://127.0.0.1:8000")
server.complete(success, error, PATHS["root"])
| {
"content_hash": "692a13ceac066708cef4e736abe9ec3c",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 112,
"avg_line_length": 38.59375,
"alnum_prop": 0.5577935222672065,
"repo_name": "mburakergenc/Malware-Detection-using-Machine-Learning",
"id": "b31b54ec9c1fbc1a9f1da21d4c11b05eabb957fd",
"size": "10117",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cuckoo/analyzer/android/analyzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "120655"
},
{
"name": "CSS",
"bytes": "57002"
},
{
"name": "HTML",
"bytes": "14690306"
},
{
"name": "JavaScript",
"bytes": "134909"
},
{
"name": "Jupyter Notebook",
"bytes": "167644"
},
{
"name": "Makefile",
"bytes": "4676"
},
{
"name": "Mako",
"bytes": "1078"
},
{
"name": "Python",
"bytes": "1576528"
},
{
"name": "Shell",
"bytes": "34027"
},
{
"name": "Visual Basic",
"bytes": "1101"
}
],
"symlink_target": ""
} |
import os
import csv
import shutil
import pandas as pd
import tensorflow as tf
import numpy as np
from keras.applications import VGG16
from keras.layers import Dense, Flatten, Dropout, ELU
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
import math
import cv2
import pickle
from scipy import misc, random
from sklearn.model_selection import train_test_split
from training import CommaAI, SimpleConvnet, Nvidia, Udacity, Basic, BasicELU
from zimpy.camera_preprocessor import preprocess_image, predict_images
from zimpy.generators.csv_image_provider import batch_generator, load_image
from zimpy.serializers.trained_data_serializer import TrainedDataSerializer
flags = tf.app.flags
FLAGS = flags.FLAGS
# command line flags
flags.DEFINE_string('network_arch', 'commaai', "The network architecture to train on.")
flags.DEFINE_integer('epochs', 1, "The number of epochs.")
flags.DEFINE_integer('batch_size', 32, "The batch size.")
flags.DEFINE_integer('samples_per_epoch', None, "The number of samples per epoch during training.")
flags.DEFINE_integer('algo_mode', 5, "The algorithm to train against.")
flags.DEFINE_boolean('repickle', True, "Whether to regenerage the train.p file of training camera images.")
flags.DEFINE_boolean('use_weights', False, "Whether to use prior trained weights.")
flags.DEFINE_float('dropout_prob', 0.5, "Percentage of neurons to misfire during training.")
flags.DEFINE_float('lr', 0.0001, "Optimizer learning rate.")
train_samples_seen = []
X_train, y_train, X_val, y_val = None, None, None, None
img_rows, img_cols = None, None
def move_training_images(classifier):
drive_log_path = './driving_log.csv'
img_path = './IMG'
shutil.move(drive_log_path, drive_log_path + '_' + classifier.uuid)
shutil.move(img_path, img_path + '_' + classifier.uuid)
# os.remove(drive_log_path)
def load_track_csv():
X_train, y_train = [], []
# ctr_idx = 0
# lft_idx = 1
# rgt_idx = 2
# str_ang = 3
# Only look at latest driving_log.csv
drive_log_path = './driving_log.csv'
if os.path.isfile(drive_log_path):
df = pd.read_csv(drive_log_path)
# compute z score for each steering angle
col_zscore = 'steering_zscore'
df[col_zscore] = (df['steering'] - df['steering'].mean())/df['steering'].std(ddof=0)
headers = list(df.columns.values)
print(headers)
for index, row in df.iterrows():
c = row['center'].strip()
l = row['left'].strip()
r = row['right'].strip()
a = float(row['steering'])
z = float(row[col_zscore])
if os.path.isfile(c) and os.path.isfile(l) and os.path.isfile(r):
# casts absolute path to relative to remain env agnostic
l, c, r = [('IMG/' + os.path.split(file_path)[1]) for file_path in (l, c, r)]
# single string in memory
x = '{}:{}:{}:{}'.format(l, c, r, z)
X_train.append(x)
y_train.append(a)
# Split some of the training data into a validation dataset
X_train, X_val, y_train, y_val = train_test_split(
X_train,
y_train,
test_size=0.01,
random_state=0)
X_train, y_train, X_val, y_val = np.array(X_train), np.array(y_train), np.array(X_val), np.array(y_val)
return X_train, y_train, X_val, y_val
def main(_):
global X_train, y_train, X_val, y_val
# fits the model on batches with real-time data augmentation:
train_mode = FLAGS.algo_mode
if train_mode == 5:
output_shape = (66, 200, 3)
# output_shape = (160, 320, 3)
X_train, y_train, X_val, y_val = load_track_csv()
# train model
clf = Nvidia()
model = clf.get_model(input_shape=output_shape, output_shape=output_shape, use_weights=FLAGS.use_weights,
dropout_prob=FLAGS.dropout_prob)
samples_per_epoch = len(X_train)
if FLAGS.samples_per_epoch is not None:
print('overriding samples per epoch from {} to {}'.format(samples_per_epoch, FLAGS.samples_per_epoch))
samples_per_epoch = FLAGS.samples_per_epoch
history = model.fit_generator(
batch_generator(X_train, y_train, 'train set', FLAGS.epochs, batch_size=FLAGS.batch_size,
output_shape=output_shape),
nb_epoch=FLAGS.epochs,
samples_per_epoch=samples_per_epoch,
nb_val_samples=len(X_val),
classifier=clf,
validation_data=batch_generator(X_val, y_val, 'validation set', num_epochs=FLAGS.epochs,
batch_size=FLAGS.batch_size, output_shape=output_shape),
verbose=2)
elif train_mode == 6:
output_shape = (40, 80, 3)
X_train, y_train, X_val, y_val = load_track_csv()
print('population: ', len(X_train))
# train model
clf = Basic()
model = clf.get_model(input_shape=output_shape, output_shape=output_shape, use_weights=FLAGS.use_weights)
samples_per_epoch = len(X_train)
if FLAGS.samples_per_epoch is not None:
print('overriding samples per epoch from {} to {}'.format(samples_per_epoch, FLAGS.samples_per_epoch))
samples_per_epoch = FLAGS.samples_per_epoch
history = model.fit_generator(
batch_generator(X=X_train, Y=y_train, label='train set', num_epochs=FLAGS.epochs, flip_images=True,
batch_size=FLAGS.batch_size,
output_shape=output_shape,
classifier=clf),
nb_epoch=FLAGS.epochs,
samples_per_epoch=samples_per_epoch,
validation_data=None,
verbose=2)
elif train_mode == 7:
output_shape = (40, 80, 3)
X_train, y_train, X_val, y_val = load_track_csv()
print('population: ', len(X_train))
# train model
clf = BasicELU()
model = clf.get_model(input_shape=output_shape, output_shape=output_shape, use_weights=FLAGS.use_weights,
dropout_prob=FLAGS.dropout_prob)
samples_per_epoch = len(X_train)
if FLAGS.samples_per_epoch is not None:
print('overriding samples per epoch from {} to {}'.format(samples_per_epoch, FLAGS.samples_per_epoch))
samples_per_epoch = FLAGS.samples_per_epoch
history = model.fit_generator(
batch_generator(X=X_train, Y=y_train, label='train set', num_epochs=FLAGS.epochs, flip_images=True,
batch_size=FLAGS.batch_size,
output_shape=output_shape),
nb_epoch=FLAGS.epochs,
samples_per_epoch=samples_per_epoch,
validation_data=None,
verbose=2)
print(history.history)
clf.save()
# move_training_images(clf)
# parses flags and calls the `main` function above
if __name__ == '__main__':
tf.app.run()
| {
"content_hash": "a537a39d81e2aca52a743bb7e1f1f44f",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 114,
"avg_line_length": 37.86021505376344,
"alnum_prop": 0.6137460948594149,
"repo_name": "matthewzimmer/carnd-behavioral-cloning",
"id": "21bf517a93f5f3e0b9e9949b8c5d3f0ac1ab09de",
"size": "7042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "training_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "953158"
},
{
"name": "Python",
"bytes": "131210"
},
{
"name": "Ruby",
"bytes": "3232"
}
],
"symlink_target": ""
} |
"""Copies & compiles the data required to start the reinforcement loop."""
import sys
sys.path.insert(0, '.') # nopep8
import asyncio
import glob
import os
from absl import app, flags
from ml_perf import utils
N = os.environ.get('BOARD_SIZE', '19')
flags.DEFINE_string('src_dir', 'gs://minigo-pub/ml_perf/',
'Directory on GCS to copy source data from. Files will be '
'copied from subdirectories of src_dir corresponding to '
'the BOARD_SIZE environment variable (defaults to 19).')
flags.DEFINE_string('dst_dir', 'ml_perf/',
'Local directory to write to. Files will be written to '
'subdirectories of dst_dir corresponding to the BOARD_SIZE '
'environment variable (defaults to 19).')
flags.DEFINE_boolean('use_tpu', False,
'Set to true to generate models that can run on Cloud TPU')
FLAGS = flags.FLAGS
def freeze_graph(path, batch=0):
utils.wait(utils.checked_run(None,
'python', 'freeze_graph.py',
'--model_path={}'.format(path), '--trt_batch={}'.format(batch), '--use_tpu={}'.format(FLAGS.use_tpu)))
def main(unused_argv):
try:
for d in ['checkpoint', 'target']:
# Pull the required training checkpoints and models from GCS.
src = os.path.join(FLAGS.src_dir, d, N)
dst = os.path.join(FLAGS.dst_dir, d)
utils.ensure_dir_exists(dst)
utils.wait(utils.checked_run(None, 'gsutil', '-m', 'cp', '-r', src, dst))
# Freeze the target model.
freeze_graph(os.path.join(FLAGS.dst_dir, 'target', N, 'target'), 2048)
# Freeze the training checkpoint models.
pattern = os.path.join(FLAGS.dst_dir, 'checkpoint', N, 'work_dir', '*.index')
for path in glob.glob(pattern):
freeze_graph(os.path.splitext(path)[0], 2048)
finally:
asyncio.get_event_loop().close()
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "eab05253ee75272c5c479a176818154b",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 108,
"avg_line_length": 32.728813559322035,
"alnum_prop": 0.6229932677369239,
"repo_name": "mlperf/training_results_v0.6",
"id": "3f491777923a7f52ff34c80d933c52df0f892997",
"size": "2507",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "NVIDIA/benchmarks/minigo/implementations/tensorflow/minigo/ml_perf/get_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import collections
import logging
import threading
import time
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super().add_result(future)
self.event.set()
def add_exception(self, future):
super().add_exception(future)
self.event.set()
def add_cancelled(self, future):
super().add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super().__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super().add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super().add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super().add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
yield from finished
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future was cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, timeout=None):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
| {
"content_hash": "6eea9d46a64e321d43fc5c12382b8444",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 80,
"avg_line_length": 34.15384615384615,
"alnum_prop": 0.5867628992628993,
"repo_name": "IronLanguages/ironpython3",
"id": "b2598339bbfa7668595112e52fe18a775719cbb3",
"size": "19639",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Src/StdLib/Lib/concurrent/futures/_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6855"
},
{
"name": "C",
"bytes": "239473"
},
{
"name": "C#",
"bytes": "12619304"
},
{
"name": "C++",
"bytes": "28403"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "13157428"
},
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "84504"
},
{
"name": "Python",
"bytes": "29490541"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "4872"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
from django.conf import settings
from django.test import SimpleTestCase
from django.utils._os import upath
from django.utils.translation import activate, get_language
here = os.path.dirname(os.path.dirname(os.path.abspath(upath(__file__))))
pdir = os.path.split(os.path.split(os.path.abspath(here))[0])[0]
extended_locale_paths = settings.LOCALE_PATHS + [
os.path.join(pdir, 'i18n', 'other', 'locale'),
]
class MultipleLocaleActivationTestCase(SimpleTestCase):
"""
Tests for template rendering when multiple locales are activated during the
lifetime of the same process.
"""
def setUp(self):
self._old_language = get_language()
def tearDown(self):
activate(self._old_language)
| {
"content_hash": "557b146d47f5f584824e2a18ec5c0bd1",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 28.814814814814813,
"alnum_prop": 0.7197943444730077,
"repo_name": "cloudera/hue",
"id": "b8c083d5368445bd70eb1e608e0523222b3391e9",
"size": "778",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Django-1.11.29/tests/template_tests/syntax_tests/i18n/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
setup(name='bisectcloud',
version='1.0',
description='Django application.',
long_description='',
author='',
author_email='',
license='',
url='',
include_package_data=True,
classifiers = [],
packages=find_packages(exclude=['tests']),
install_requires=[])
| {
"content_hash": "933b4e9fcaabf19164a47402498557f0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 48,
"avg_line_length": 22.176470588235293,
"alnum_prop": 0.5941644562334217,
"repo_name": "AutomatedTester/bisectcloud",
"id": "46f519374a96e683e30a5806257fa1d95dc6d716",
"size": "377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "4587"
},
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "40595"
},
{
"name": "Ruby",
"bytes": "1462"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
} |
r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
bytes_le[8:])
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_bytes_le(self):
bytes = self.bytes
return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
bytes_le = property(get_bytes_le)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _find_mac(command, args, hw_identifiers, get_index):
import os
for dir in ['', '/sbin/', '/usr/sbin']:
executable = os.path.join(dir, command)
if not os.path.exists(executable):
continue
try:
# LC_ALL to get English output, 2>/dev/null to
# prevent output on stderr
cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
pipe = os.popen(cmd)
except IOError:
continue
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
return int(words[get_index(i)].replace(':', ''), 16)
return None
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
if mac:
return mac
import socket
ip_addr = socket.gethostbyname(socket.gethostname())
# Try getting the MAC addr from arp based on our IP address (Solaris).
mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
if mac:
return mac
# This might work on HP-UX.
mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
if mac:
return mac
return None
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
# NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
# 6 bytes returned by UuidCreateSequential are fixed, they don't appear
# to bear any relationship to the MAC address of any network device
# on the box.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
_buffer = ctypes.create_string_buffer(16)
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1<<48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
_buffer = ctypes.create_string_buffer(16)
if _uuid_generate_time and node is clock_seq is None:
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
if timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
_buffer = ctypes.create_string_buffer(16)
if _uuid_generate_random:
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
from hashlib import sha1
hash = sha1(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
| {
"content_hash": "9b659934a23139b10bc2e84b3e9d3887",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 79,
"avg_line_length": 37.35845588235294,
"alnum_prop": 0.6023716970919648,
"repo_name": "MalloyPower/parsing-python",
"id": "e1b2f4b881f8144dd80a65a0df4c60df1d28ba75",
"size": "20323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.6/Lib/uuid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
def _as_two_dim(x):
if x.ndim == 2:
return x
return x.reshape((len(x), -1))
@testing.parameterize(
{'shape': (4, 3, 5)},
{'shape': (4, 15)},
)
class TestBatchL2NormSquared(unittest.TestCase):
def setUp(self):
self.x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
self.gy = np.random.uniform(-1, 1, self.shape[0]).astype(np.float32)
self.ggx = np.random.uniform(-1, 1, self.shape).astype(np.float32)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.batch_l2_norm_squared(x)
self.assertEqual(y.data.dtype, np.float32)
y_data = cuda.to_cpu(y.data)
x_two_dim = _as_two_dim(self.x)
y_expect = np.empty(len(self.x))
for n in six.moves.range(len(self.x)):
y_expect[n] = sum(map(lambda x: x * x, x_two_dim[n]))
testing.assert_allclose(y_expect, y_data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
functions.batch_l2_norm_squared, x_data, y_grad, eps=1)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
gradient_check.check_double_backward(
functions.batch_l2_norm_squared,
x_data, y_grad, x_grad_grad, dtype=np.float64)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x),
cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
class TestBatchL2NormSquaredTypeError(unittest.TestCase):
def test_invalid_shape(self):
x = chainer.Variable(np.zeros((4,), dtype=np.float32))
with self.assertRaises(type_check.InvalidType):
chainer.functions.batch_l2_norm_squared(x)
testing.run_module(__name__, __file__)
| {
"content_hash": "e514c1c324de4a0eddd18ea8d4581a4a",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 76,
"avg_line_length": 28.348314606741575,
"alnum_prop": 0.6341656757827983,
"repo_name": "anaruse/chainer",
"id": "a4bcded83bf88110d6bccdd341fe67a1d6077abb",
"size": "2523",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/functions_tests/math_tests/test_batch_l2_norm_squared.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3723858"
}
],
"symlink_target": ""
} |
import inspect
import time
import uuid
from oslo_utils import timeutils
from six import moves
from novadocker.virt.docker import client as docker_client
class MockClient(object):
def __init__(self, endpoint=None):
self._containers = {}
self.name = None
# Fake repository
self._repository = {'image_with_cmd':
{'ContainerConfig':
{'Cmd': 'echo Test'}},
'image_without_cmd':
{'ContainerConfig':
{'Cmd': None}}}
self._images = {'snap-1':
{'ContainerConfig':
{'Cmd': None}}}
self._image_data = {'snap-1': 'dummy'}
self._setup_decorators()
def _setup_decorators(self):
for name, member in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
setattr(self, name, docker_client.filter_data(member))
def _fake_id(self):
return uuid.uuid4().hex + uuid.uuid4().hex
def _image_name(self, image_name):
"""Split full image name to host and image name."""
if '/' in image_name:
host, image_name = image_name.split('/', 1)
return image_name
def _is_image_exists(self, image_name):
"""Check whether Images is listed in self._repository."""
image_name = self._image_name(image_name)
if image_name in self._repository:
return image_name in self._images
return True
def _is_daemon_running(self):
return True
def containers(self, all=True):
containers = []
for container_id in self._containers.iterkeys():
containers.append({
'Status': 'Exit 0',
'Created': int(time.time()),
'Image': 'ubuntu:12.04',
'Ports': '',
'Command': 'bash ',
'Id': container_id
})
return containers
def create_container(self, image_name, **args):
self.name = args['name']
data = {
'Hostname': args['hostname'],
'User': '',
'Memory': args['mem_limit'],
'MemorySwap': 0,
'AttachStdin': False,
'AttachStdout': False,
'AttachStderr': False,
'PortSpecs': None,
'Tty': True,
'OpenStdin': True,
'StdinOnce': False,
'Env': None,
'Cmd': [],
'Dns': None,
'Image': image_name,
'Volumes': {},
'VolumesFrom': '',
'CpuShares': args['cpu_shares'],
'NetworkDisabled': args['network_disabled']
}
data.update(args)
if not self._is_image_exists(data['Image']):
return None
container_id = self._fake_id()
self._containers[container_id] = {
'Id': container_id,
'running': False,
'Config': data
}
return container_id
def start(self, container_id, binds=None, dns=None, devices=None):
if container_id not in self._containers:
return False
self._containers[container_id]['running'] = True
return True
def inspect_image(self, image_name):
if not self._is_image_exists(image_name):
return None
image_name = self._image_name(image_name)
if image_name in self._images:
return self._images[image_name]
return {'ContainerConfig': {'Cmd': None}}
def inspect_container(self, container_id):
if container_id not in self._containers:
return
container = self._containers[container_id]
info = {
'Args': [],
'Config': container['Config'],
'Created': str(timeutils.utcnow()),
'Id': container_id,
'Image': self._fake_id(),
'NetworkSettings': {
'Bridge': '',
'Gateway': '',
'IPAddress': '',
'IPPrefixLen': 0,
'PortMapping': None
},
'Path': 'bash',
'ResolvConfPath': '/etc/resolv.conf',
'State': {
'ExitCode': 0,
'Ghost': False,
'Pid': 0,
'Running': container['running'],
'StartedAt': str(timeutils.utcnow())
},
'SysInitPath': '/tmp/docker',
'Volumes': {},
}
return info
def stop(self, container_id, timeout=None):
if container_id not in self._containers:
return False
self._containers[container_id]['running'] = False
return True
def kill(self, container_id):
if container_id not in self._containers:
return False
self._containers[container_id]['running'] = False
return True
def remove_container(self, container_id, force=False):
if container_id not in self._containers:
return False
# Docker doesn't allow to destroy a running container.
if self._containers[container_id]['running']:
return False
del self._containers[container_id]
return True
def unpause(self, container_id):
if container_id not in self._containers:
return False
self._containers[container_id]['paused'] = False
return True
def pause(self, container_id):
if container_id not in self._containers:
return False
self._containers[container_id]['paused'] = True
return True
def commit(self, container_id, repository=None, tag=None):
if container_id not in self._containers:
return False
return True
def get_container_logs(self, container_id):
if container_id not in self._containers:
return False
return '\n'.join([
'Lorem ipsum dolor sit amet, consectetur adipiscing elit. ',
'Vivamus ornare mi sit amet orci feugiat, nec luctus magna ',
'vehicula. Quisque diam nisl, dictum vitae pretium id, ',
'consequat eget sapien. Ut vehicula tortor non ipsum ',
'consectetur, at tincidunt elit posuere. In ut ligula leo. ',
'Donec eleifend accumsan mi, in accumsan metus. Nullam nec ',
'nulla eu risus vehicula porttitor. Sed purus ligula, ',
'placerat nec metus a, imperdiet viverra turpis. Praesent ',
'dapibus ornare massa. Nam ut hendrerit nunc. Interdum et ',
'malesuada fames ac ante ipsum primis in faucibus. ',
'Fusce nec pellentesque nisl.'])
def get_image(self, name):
if (name not in self._images or
name not in self._image_data):
raise Exception("Image not found - %s" % name)
return moves.StringIO(self._image_data[name])
def load_image(self, name, data):
self._image_data[name] = data
def load_repository_file(self, name, path):
pass
def ping(self):
return True
| {
"content_hash": "0cf754642ef5b4a047ca618b74e0995e",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 73,
"avg_line_length": 33.05990783410138,
"alnum_prop": 0.5294117647058824,
"repo_name": "thanhtien522/nova-docker",
"id": "fb47e1c385ba128ace7ba5761c63a9b8300db398",
"size": "7808",
"binary": false,
"copies": "1",
"ref": "refs/heads/test/kilo-cinder",
"path": "novadocker/tests/virt/docker/mock_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "174641"
},
{
"name": "Shell",
"bytes": "14583"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("meetings", "0010_auto__add_field_meeting_meetup_id"),
)
def forwards(self, orm):
# Adding model 'MeetingSponsor'
db.create_table(u'sponsors_meetingsponsor', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sponsor', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sponsors.Sponsor'])),
('meeting', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['meetings.Meeting'])),
('about', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'sponsors', ['MeetingSponsor'])
# Adding model 'Sponsor'
db.create_table(u'sponsors_sponsor', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=80)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=80)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('logo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'sponsors', ['Sponsor'])
def backwards(self, orm):
# Deleting model 'MeetingSponsor'
db.delete_table(u'sponsors_meetingsponsor')
# Deleting model 'Sponsor'
db.delete_table(u'sponsors_sponsor')
models = {
u'meetings.meeting': {
'Meta': {'object_name': 'Meeting'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'blank': 'True'}),
'live_stream': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'meetup_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'where': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['meetings.Venue']", 'null': 'True', 'blank': 'True'})
},
u'meetings.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'directions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'embed_map': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'sponsors.meetingsponsor': {
'Meta': {'object_name': 'MeetingSponsor'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['meetings.Meeting']"}),
'sponsor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sponsors.Sponsor']"})
},
u'sponsors.sponsor': {
'Meta': {'object_name': 'Sponsor'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '80'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['sponsors']
| {
"content_hash": "0eae57979f1668efac243b7c6c7a4ed0",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 138,
"avg_line_length": 59.86046511627907,
"alnum_prop": 0.5625485625485626,
"repo_name": "tanyaschlusser/chipy.org",
"id": "48ce377199298c7becd4db7dd687aa70aa907c65",
"size": "5172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chipy_org/apps/sponsors/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "199261"
},
{
"name": "HTML",
"bytes": "77240"
},
{
"name": "JavaScript",
"bytes": "36277"
},
{
"name": "Python",
"bytes": "158016"
}
],
"symlink_target": ""
} |
import numpy as np
import cv2 as cv
import time
import faceAPI
from faceAPI import FaceRecognitionModel
debug = True
onComputer = True
fm = FaceRecognitionModel()
# fm.learnAll("./training-images/")
# fm.recognize("./test-images/max_1.jpg")
# fm.recognize("./test-images/will_1.jpg")
# fm.recognize("./test-images/arnold_1.jpeg")
#Set dimensions of frame as 320x240 for better performance on Pi
FRAME_WIDTH = 320
FRAME_HEIGHT = 240
cap = cv.VideoCapture(0)
cap.set(cv.cv.CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH)
cap.set(cv.cv.CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT)
if not onComputer:
camera = CamControl()
camera.up(50, 9)
time.sleep(1)
while True:
# Capture frame-by-frame
ret, frame = cap.read()
cv.flip(frame, 1, frame) # flip the image
if cv.waitKey(1) & 0xFF == ord('q'):
break
# Release capture
cap.release()
cv.destroyAllWindows() | {
"content_hash": "fdfc73f5e8cf862ca41bb8504989a1b1",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 64,
"avg_line_length": 22.564102564102566,
"alnum_prop": 0.7,
"repo_name": "xamgeis/project_vulcan",
"id": "432adbcdc08c75bd312ae99d72fd2dfdf426f243",
"size": "880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "52269"
},
{
"name": "Python",
"bytes": "123317"
},
{
"name": "Shell",
"bytes": "5035"
}
],
"symlink_target": ""
} |
"""## Casting
TensorFlow provides several operations that you can use to cast tensor data
types in your graph.
@@string_to_number
@@to_double
@@to_float
@@to_bfloat16
@@to_int32
@@to_int64
@@cast
## Shapes and Shaping
TensorFlow provides several operations that you can use to determine the shape
of a tensor and change the shape of a tensor.
@@shape
@@size
@@rank
@@reshape
@@squeeze
@@expand_dims
## Slicing and Joining
TensorFlow provides several operations to slice or extract parts of a tensor,
or join multiple tensors together.
@@slice
@@split
@@tile
@@pad
@@concat
@@pack
@@unpack
@@reverse_sequence
@@reverse
@@transpose
@@gather
@@dynamic_partition
@@dynamic_stitch
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import types
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# pylint: disable=wildcard-import
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.ops.constant_op import constant
from tensorflow.python.ops.gen_array_ops import *
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_baseslice = slice
# Aliases for some automatically-generated names.
listdiff = gen_array_ops.list_diff
# pylint: disable=undefined-variable,protected-access
def _SliceHelper(tensor, slice_spec):
"""Overload for Tensor.__getitem__.
Currently the size of the slice must be statically known in each dimension,
i.e. the "stop" of the slice must not be omitted.
TODO(mrry): Support slices where the sizes are not specified.
TODO(mrry): Support negative indices in slices with numpy/Python semantics.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
indices = []
sizes = []
squeeze_dims = []
for dim, s in enumerate(slice_spec):
if isinstance(s, int):
if s < 0:
raise NotImplementedError("Negative indices are currently unsupported")
indices.append(s)
sizes.append(1)
squeeze_dims.append(dim)
elif isinstance(s, _baseslice):
if s.step not in (None, 1):
raise NotImplementedError(
"Steps other than 1 are not currently supported")
start = s.start if s.start is not None else 0
if start < 0:
raise NotImplementedError(
"Negative start indices are not currently supported")
indices.append(start)
if s.stop is not None and s.stop < 0:
raise NotImplementedError(
"Negative stop indices are not currently supported")
# NOTE(mrry): If the stop is not specified, Python substitutes
# sys.maxsize, which is typically (2 ** 63) - 1. Since Slice currently
# supports signed DT_INT32 arguments, we use -1 to specify that all
# elements should be captured.
if s.stop is None or s.stop == sys.maxsize:
sizes.append(-1)
else:
if start > s.stop:
raise ValueError("Stop must be at least start")
sizes.append(s.stop - start)
elif s is Ellipsis:
raise NotImplementedError("Ellipsis is not currently supported")
else:
raise TypeError("Bad slice index %s of type %s" % (s, type(s)))
sliced = slice(tensor, indices, sizes)
if squeeze_dims:
return squeeze(sliced, squeeze_dims=squeeze_dims)
else:
return sliced
def slice(input_, begin, size, name=None):
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],
[[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
ops.Tensor._override_operator("__getitem__", _SliceHelper)
def pack(values, name="pack"):
"""Packs a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs tensors in `values` into a tensor with rank one higher than each tensor
in `values` and shape `[len(values)] + values[0].shape`. The output satisfies
`output[i, ...] = values[i][...]`.
This is the opposite of unpack. The numpy equivalent is
tf.pack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A packed `Tensor` with the same type as `values`.
"""
return gen_array_ops._pack(values, name=name)
def unpack(value, num=None, name="unpack"):
"""Unpacks the outer dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` along the first dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[0]` is not known, `ValueError` is raised.
The ith tensor in `output` is the slice `value[i, ...]`. Each tensor in
`output` has shape `value.shape[1:]`.
This is the opposite of pack. The numpy equivalent is
tf.unpack(x, n) = list(x)
Args:
value: A rank `R > 0` `Tensor` to be unpacked.
num: An `int`. The first dimension of value. Automatically inferred if
`None` (the default).
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unpacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
if num is None:
value = ops.convert_to_tensor(value)
shape = value.get_shape()
num = shape[0].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % shape)
return gen_array_ops._unpack(value, num=num, name=name)
def concat(concat_dim, values, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `concat_dim`. If
`values[i].shape = [D0, D1, ... Dconcat_dim(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Rconcat_dim, ...Dn]
where
Rconcat_dim = sum(Dconcat_dim(i))
That is, the data from the input tensors is joined along the `concat_dim`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `concat_dim` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat(0, [t1, t2]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat(1, [t1, t2]) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat(0, [t3, t4])) ==> [4, 3]
tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]
```
Args:
concat_dim: 0-D `int32` `Tensor`. Dimension along which to concatenate.
values: A list of `Tensor` objects or a single `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
return identity(values[0], name=name)
return gen_array_ops._concat(concat_dim=concat_dim,
values=values,
name=name)
@ops.RegisterShape("Pack")
def _PackShape(op):
input_shape = op.inputs[0].get_shape()
for inp in op.inputs[1:]:
input_shape = input_shape.merge_with(inp.get_shape())
return [tensor_shape.TensorShape([len(op.inputs)]).concatenate(input_shape)]
@ops.RegisterShape("Unpack")
def _UnpackShape(op):
input_shape = op.inputs[0].get_shape()
return [input_shape[1:]] * op.get_attr("num")
@ops.RegisterShape("Concat")
def _ConcatShape(op):
concat_dim = tensor_util.ConstantValue(op.inputs[0])
if concat_dim is None:
# Return an unknown shape with the same rank as the inputs, or an
# unknown rank if no input's rank is known.
rank = None
for value in op.inputs[1:]:
if rank is not None:
value.get_shape().assert_has_rank(rank)
else:
rank = value.get_shape().ndims
return [tensor_shape.unknown_shape(ndims=max(rank, 1))]
else:
# Merge all the non-concat dims, and sum the concat dim to make an
# output shape.
concat_dim = int(concat_dim)
output_shape = op.inputs[1].get_shape()
# TODO(irving): Remove once !kAllowLegacyScalars.
if output_shape.ndims == 0:
output_shape = tensor_shape.TensorShape([1])
for value in op.inputs[2:]:
value_shape = value.get_shape()
if value_shape.ndims is not None and concat_dim >= value_shape.ndims:
if value_shape.ndims == 0 and concat_dim == 0:
# Let concat handle scalars
# TODO(irving): Remove once !kAllowLegacyScalars.
value_shape = tensor_shape.TensorShape([1])
else:
raise ValueError("concat_dim is out of range (values rank = %d)" %
value_shape.ndims)
before = output_shape[:concat_dim].merge_with(value_shape[:concat_dim])
at = output_shape[concat_dim] + value_shape[concat_dim]
after = output_shape[
concat_dim + 1:].merge_with(value_shape[concat_dim + 1:])
output_shape = before.concatenate(at).concatenate(after)
return [output_shape]
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices specified
in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices => [12, 26, 37, 45]
tf.shape(a.values) => [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask of its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse_mask(a, [12, 45])
b.indices => [26, 37]
tf.shape(b.values) => [2, 10]
```
Args:
* `a`: An `IndexedSlices` instance.
* `mask_indices`: Indices of elements to mask.
* `name`: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.op_scope([a, mask_indices], name, "sparse_mask") as name:
indices = a.indices
out_indices, to_gather = listdiff(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
def split(split_dim, num_split, value, name="split"):
"""Splits a tensor into `num_split` tensors along one dimension.
Splits `value` along dimension `split_dim` into `num_split` smaller tensors.
Requires that `num_split` evenly divide `value.shape[split_dim]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(1, 3, value)
tf.shape(split0) ==> [5, 10]
```
Args:
split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[0, rank(value))`.
num_split: A 0-D `int32` `Tensor`. The number of ways to split.
value: The `Tensor` to split.
name: A name for the operation (optional).
Returns:
`num_split` `Tensor` objects resulting from splitting `value`.
"""
return gen_array_ops._split(split_dim=split_dim,
num_split=num_split,
value=value,
name=name)
@ops.RegisterShape("Reverse")
def _ReverseShape(op):
return [op.inputs[0].get_shape().with_rank_at_most(8)]
def transpose(a, perm=None, name="transpose"):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example:
```python
# 'x' is [[1 2 3]
# [4 5 6]]
tf.transpose(x) ==> [[1 4]
[2 5]
[3 6]]
# Equivalently
tf.transpose(x perm=[0, 1]) ==> [[1 4]
[2 5]
[3 6]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
# 'x' is [[[1 2 3]
# [4 5 6]]
# [[7 8 9]
# [10 11 12]]]
# Take the transpose of the matrices in dimension-0
tf.transpose(b, perm=[0, 2, 1]) ==> [[[1 4]
[2 5]
[3 6]]
[[7 10]
[8 11]
[9 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
with ops.op_scope([a], name, "transpose") as name:
if perm is None:
dims = gen_math_ops._range(0, gen_array_ops.rank(a), 1)
perm = gen_array_ops.reverse(dims, [True])
ret = gen_array_ops.transpose(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = gen_array_ops.transpose(a, perm, name=name)
return ret
def zeros(shape, dtype=types.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.op_scope([shape], name, "zeros") as name:
if isinstance(shape, list):
output = constant(0, shape=shape, dtype=dtype, name=name)
else:
shape = ops.convert_to_tensor(shape, name="shape")
output = fill(shape, constant(0, dtype=dtype), name=name)
assert output.dtype.base_dtype == types.as_dtype(dtype).base_dtype
return output
def zeros_like(tensor, dtype=None, name=None):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.op_scope([tensor], name, "zeros_like") as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
zeros_shape = shape(tensor)
if dtype is None:
dtype = tensor.dtype
return zeros(zeros_shape, dtype=dtype, name=name)
def ones_like(tensor, dtype=None, name=None):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
# 'tensor' is [[1, 2, 3], [4, 5, 6]]
tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.op_scope([tensor], name, "ones_like") as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape(tensor)
if dtype is None:
dtype = tensor.dtype
return ones(ones_shape, dtype=dtype, name=name)
def zeros_initializer(shape, dtype=types.float32):
"""An adaptor for zeros() to match the Initializer spec."""
return zeros(shape, dtype)
def ones(shape, dtype=types.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.op_scope([shape], name, "ones") as name:
if isinstance(shape, list):
output = constant(1, shape=shape, dtype=dtype, name=name)
else:
shape = ops.convert_to_tensor(shape, name="shape")
output = fill(shape, constant(1, dtype=dtype), name=name)
assert output.dtype.base_dtype == types.as_dtype(dtype).base_dtype
return output
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(float, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print sess.run(y) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print sess.run(y, feed_dict={x: rand_array}) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape = tensor_shape.as_shape(shape)
if shape.is_fully_defined():
dim_list = shape.as_list()
else:
dim_list = []
ret = gen_array_ops._placeholder(
dtype=dtype,
shape=dim_list,
name=name)
ret.set_shape(shape)
return ret
@ops.RegisterShape("Placeholder")
def _PlaceholderShape(op):
given_shape = tensor_util.TensorShapeProtoToList(op.get_attr("shape"))
if given_shape:
return [tensor_shape.TensorShape(given_shape)]
else:
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("CheckNumerics")
@ops.RegisterShape("Identity")
@ops.RegisterShape("RefIdentity")
@ops.RegisterShape("StopGradient")
def _UnchangedShape(op):
return [op.inputs[0].get_shape()]
@ops.RegisterShape("Rank")
@ops.RegisterShape("Size")
def _ScalarShape(unused_op):
return [tensor_shape.scalar()]
@ops.RegisterShape("Slice")
def _SliceShape(op):
"""Shape function for array_ops.slice."""
input_shape = op.inputs[0].get_shape()
begin_shape = op.inputs[1].get_shape().with_rank_at_most(1)
sizes_shape = op.inputs[2].get_shape().with_rank_at_most(1)
rank_vector_shape = begin_shape.merge_with(sizes_shape)
ndims = rank_vector_shape.num_elements()
if ndims is not None:
input_shape.assert_has_rank(ndims)
begin_value = tensor_util.ConstantValue(op.inputs[1])
sizes_value = tensor_util.ConstantValue(op.inputs[2])
if sizes_value is not None:
returned_dims = []
for i, slice_size in enumerate(sizes_value.ravel()):
if slice_size != -1:
returned_dims.append(slice_size)
elif begin_value is not None:
returned_dims.append(input_shape[i] - begin_value[i])
else:
returned_dims.append(None)
return [tensor_shape.TensorShape(returned_dims)]
else:
if input_shape.ndims is not None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
elif ndims is not None:
return [tensor_shape.unknown_shape(ndims=ndims)]
else:
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("Gather")
def _GatherShape(op):
"""Shape function for array_ops.gather."""
params_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
return [indices_shape.concatenate(params_shape[1:])]
@ops.RegisterShape("Unique")
def _UniqueShape(op):
"""Shape function for array_ops.Unique."""
# The output is a vector with data-dependent length.
input_shape = op.inputs[0].get_shape()
input_shape.assert_has_rank(1)
return [tensor_shape.vector(None), input_shape]
@ops.RegisterShape("Diag")
def _DiagShape(op):
"""Shape function for array_ops.diag.
This op has one input (of rank k <= 3), and one output (of rank 2k),
where the shape of the output is the concatenation of the input
shape with itself.
Args:
op: A Diag Operation.
Returns:
A single-element list containing the shape of the output.
"""
input_shape = op.inputs[0].get_shape().with_rank_at_most(3)
return [input_shape.concatenate(input_shape)]
@ops.RegisterShape("ExpandDims")
def _ExpandDimsShape(op):
"""Determine shape for expand op's output tensor.
Args:
op: Operation for which to determine shape.
op.inputs[0] is the input tensor.
op.inputs[1] is the dimension in which to expand.
Returns:
Shape of op's output tensor.
Raises:
ValueError: If dim is outside of [-rank - 1, rank], where rank is the number
of dimensions in the input tensor.
"""
input_shape = op.inputs[0].get_shape()
if input_shape.dims is None:
return [tensor_shape.unknown_shape()]
dim = tensor_util.ConstantValue(op.inputs[1])
input_ndims = input_shape.ndims
if dim < -input_ndims - 1 or dim > input_ndims:
raise ValueError(
"dim %d not in [%d, %d]." % (dim, -input_ndims, input_ndims))
if dim < 0:
dim += (input_ndims + 1)
result_shape = list(input_shape.dims)
result_shape.insert(dim, 1)
return [tensor_shape.TensorShape(result_shape)]
@ops.RegisterShape("Squeeze")
def _SqueezeShape(op):
"""Determine shape for squeeze op's output tensor.
Args:
op: Operation for which to determine shape.
Returns:
Shape of op's output tensor.
Raises:
ValueError: if squeeze_dims includes a dimension outside of [-rank, rank),
where rank is the number of dimensions in the input tensor. Or, if
squeeze_dims includes a dimension for which input shape has a value
not equal to 1.
"""
input_shape = op.inputs[0].get_shape()
if input_shape.dims is None:
return [tensor_shape.unknown_shape()]
squeeze_dims = op.get_attr("squeeze_dims") or []
wrapped_squeeze_dims = []
input_ndims = input_shape.ndims
for i, squeeze_dim in enumerate(squeeze_dims):
if squeeze_dim < -input_ndims or squeeze_dim >= input_ndims:
raise ValueError(
"squeeze_dims[%d]=%d not in [%d, %d)." % (
i, squeeze_dim, -input_ndims, input_ndims))
if squeeze_dim < 0:
squeeze_dim += input_ndims
wrapped_squeeze_dims.append(squeeze_dim)
result_shape = []
for i, dim in enumerate([d.value for d in input_shape.dims]):
is_explicit_match = i in wrapped_squeeze_dims
if is_explicit_match or not wrapped_squeeze_dims:
if dim is None:
return [tensor_shape.unknown_shape()]
if dim != 1:
if is_explicit_match:
raise ValueError(
"Can not squeeze dim[%d], expected a dimension of 1, got %d." % (
i, dim))
result_shape.append(dim)
else:
result_shape.append(dim)
return [tensor_shape.TensorShape(result_shape)]
@ops.RegisterShape("Reshape")
def _ReshapeShape(op):
"""Shape function for Reshape op."""
input_shape = op.inputs[0].get_shape()
new_shape_shape = op.inputs[1].get_shape().with_rank_at_most(1)
new_shape = tensor_util.ConstantValue(op.inputs[1])
if new_shape is None:
# Attempt to infer the rank of the output from the length of
# new_shape.
return [tensor_shape.unknown_shape(ndims=new_shape_shape.num_elements())]
new_shape = np.reshape(new_shape, -1).tolist()
if -1 not in new_shape:
# The new shape is fully defined.
return [tensor_shape.TensorShape(new_shape)]
elif input_shape.is_fully_defined():
# We know the input shape, so we can calculate the missing
# dimension in the new_shape.
num_elements = 1
for dim in input_shape.dims:
num_elements *= dim.value
known_elements = 1
unknown_index = None
for i, dim in enumerate(new_shape):
if dim == -1:
unknown_index = i
else:
known_elements *= dim
if known_elements == 0:
raise ValueError("cannot infer the missing input size for "
"an empty tensor unless all specified "
"input sizes are non-zero")
if num_elements % known_elements != 0:
raise ValueError("input has %s elements, which isn't divisible by %d" %
(num_elements, known_elements))
new_shape[unknown_index] = num_elements // known_elements
return [tensor_shape.TensorShape(new_shape)]
else:
# We don't know the input shape, but we know n-1 of the dimensions
# in the new shape.
new_shape[new_shape.index(-1)] = None
return [tensor_shape.TensorShape(new_shape)]
@ops.RegisterShape("BroadcastGradientArgs")
def _BroadcastGradientArgsShape(op):
"""Shape function for the BroadcastGradientArgs op."""
# TODO(mrry): Implement ConstantValue for BroadcastGradientArgs?
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(1)
return [tensor_shape.vector(None), tensor_shape.vector(None)]
@ops.RegisterShape("Fill")
def _FillShape(op):
"""Shape function for the Fill op.
This op takes a vector of dimensions and a scalar, and produces a
tensor with the given dimensions.
Args:
op: A Fill Operation.
Returns:
A single-element list containing the shape of the output.
"""
dimensions_shape = op.inputs[0].get_shape().with_rank_at_most(1)
op.inputs[1].get_shape().assert_is_compatible_with(tensor_shape.scalar())
fill_dims = tensor_util.ConstantValue(op.inputs[0])
if fill_dims is None:
# Attempt to infer the rank of the output from the length of
# dimensions.
return [tensor_shape.unknown_shape(ndims=dimensions_shape.num_elements())]
else:
return [tensor_shape.TensorShape(fill_dims.tolist())]
@ops.RegisterShape("InvertPermutation")
def _InvertPermutationShape(op):
"""Shape function for the InvertPermutation op."""
return [op.inputs[0].get_shape().with_rank(1)]
@ops.RegisterShape("ListDiff")
def _ListDiffShape(op):
"""Shape function for the ListDiff op."""
op.inputs[0].get_shape().assert_has_rank(1)
op.inputs[1].get_shape().assert_has_rank(1)
# TODO(mrry): Indicate that the length falls within an interval?
return [tensor_shape.vector(None)] * 2
@ops.RegisterShape("Pad")
def _PadShape(op):
"""Shape function for the Pad op.
This op has two inputs:
* input: A rank-N tensor.
* paddings: An N-by-2 matrix, in which the i^th row contains the
number of padding elements to add before and after `input` in the
i^th dimension.
It has one output, which has the same rank as input, and additional
elements according to the values in paddings.
Args:
op: A Pad Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the input shapes are incompatible.
"""
paddings_shape = op.inputs[1].get_shape().with_rank(2)
input_shape = op.inputs[0].get_shape()
if input_shape.ndims == 0 and paddings_shape[0].value == 1:
# TODO(irving): Remove once !kAllowLegacyScalars.
input_shape = tensor_shape.TensorShape([1])
else:
input_shape = input_shape.with_rank(paddings_shape[0].value)
paddings_shape = paddings_shape.merge_with(
tensor_shape.matrix(input_shape.ndims, 2))
paddings = tensor_util.ConstantValue(op.inputs[1])
if paddings is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
output_dims = []
for i, dim in enumerate(input_shape.dims):
if paddings[i, 0] < 0 or paddings[i, 1] < 0:
raise ValueError("paddings must be non-negative")
output_dims.append(dim + paddings[i, 0] + paddings[i, 1])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("ReverseSequence")
def _ReverseSequenceShape(op):
"""Shape function for the ReverseSequence op.
This op has two inputs:
* input: A rank-N tensor with size B in the 0th dimension.
* seq_lens: A vector of length B.
It has one output, with the same size as input.
Args:
op: A ReverseSequence Operation.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the input shapes are incompatible.
"""
input_shape = op.inputs[0].get_shape()
seq_lens_shape = op.inputs[1].get_shape().with_rank(1)
batch_size = input_shape[0].merge_with(seq_lens_shape[0])
input_shape = tensor_shape.TensorShape([batch_size]).concatenate(
input_shape[1:])
seq_dim = op.get_attr("seq_dim")
if seq_dim >= input_shape.ndims:
raise ValueError("seq_dim must be < input.dims() (%d vs %d)" %
(seq_dim, input_shape.ndims))
return [input_shape]
@ops.RegisterShape("Shape")
def _ShapeShape(op):
"""Shape function for the Shape op."""
input_shape = op.inputs[0].get_shape()
return [tensor_shape.vector(input_shape.ndims)]
@ops.RegisterShape("Transpose")
def _TransposeShape(op):
"""Shape function for the Transpose op.
This op takes two inputs:
* input: a rank-N tensor of arbitrary shape.
* shuffle: a length-N vector.
Its output is the rank-N tensor computed by permuting the dimensions
of input according to shuffle.
Args:
op: A Transpose op.
Returns:
A single-element list containing the shape of the output.
Raises:
ValueError: If the shapes of input and shuffle are incompatible.
IndexError: If shuffle contains an index that is >= the rank of input.
"""
input_shape = op.inputs[0].get_shape()
transpose_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(
input_shape.ndims))
transpose_vec = tensor_util.ConstantValue(op.inputs[1])
if transpose_vec is None:
return [tensor_shape.unknown_shape(ndims=transpose_shape[0].value)]
else:
return [tensor_shape.TensorShape([input_shape[i]
for i in transpose_vec.tolist()])]
@ops.RegisterShape("Split")
def _SplitShape(op):
"""Shape function for the Split op."""
split_dim = tensor_util.ConstantValue(op.inputs[0])
num_split = len(op.outputs)
input_shape = op.inputs[1].get_shape()
if split_dim is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] * num_split
else:
split_dim = int(split_dim)
input_shape = input_shape.with_rank_at_least(split_dim + 1)
if not (input_shape[split_dim] % num_split).is_compatible_with(0):
raise ValueError(
"Number of ways to split should evenly divide the split "
"dimension but got split_dim %d (size = %d) and num_split %d" %
(split_dim, input_shape[split_dim].value, num_split))
prefix = input_shape[:split_dim]
size_in_split_dim = input_shape[split_dim] // num_split
suffix = input_shape[split_dim + 1:]
output_shape = prefix.concatenate(size_in_split_dim).concatenate(suffix)
return [output_shape] * num_split
@ops.RegisterShape("Tile")
def _TileShape(op):
"""Shape function for the Tile op.
This op has two inputs:
* input: A rank-N tensor.
* multiples: A length-N vector, in which the i^th element contains
the factor by which `input` will be tiled in the i^th dimension.
It has one output, which has the same rank as input, and additional
elements according to the values in multiples
Args:
op: A Tile Operation.
Returns:
A single-element list containing the shape of the output.
"""
multiples_shape = op.inputs[1].get_shape().with_rank_at_most(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape.num_elements())
multiples = tensor_util.ConstantValue(op.inputs[1])
if multiples is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
output_dims = []
multiples = multiples.ravel()
for i, dim in enumerate(input_shape.dims):
output_dims.append(dim * multiples[i])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("TileGrad")
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank_at_most(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape.num_elements())
multiples = tensor_util.ConstantValue(op.inputs[1])
if multiples is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
output_dims = []
for i, dim in enumerate(input_shape.dims):
output_dims.append(dim // multiples[i])
return [tensor_shape.TensorShape(output_dims)]
@ops.RegisterShape("Where")
def _WhereShape(op):
"""Shape function for the Where op."""
input_shape = op.inputs[0].get_shape()
return [tensor_shape.matrix(None, input_shape.ndims)]
@ops.RegisterShape("ZerosLike")
def _ZerosLikeShape(op):
"""Shape function for the ZerosLike op."""
return [op.inputs[0].get_shape()]
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"]
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]]
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(hypothesis, ops.SparseTensor):
raise TypeError("Hypothesis must be a SparseTensor")
if not isinstance(truth, ops.SparseTensor):
raise TypeError("Truth must be a SparseTensor")
return gen_array_ops._edit_distance(hypothesis.indices,
hypothesis.values,
hypothesis.shape,
truth.indices,
truth.values,
truth.shape,
normalize=normalize,
name=name)
@ops.RegisterShape("EditDistance")
def _EditDistanceShape(op):
"""Shape function for the EditDistance op."""
hypothesis_shape = tensor_util.ConstantValue(op.inputs[2])
truth_shape = tensor_util.ConstantValue(op.inputs[5])
if hypothesis_shape is not None and truth_shape is not None:
if len(hypothesis_shape) != len(truth_shape):
raise ValueError(
"Inconsistent ranks in hypothesis and truth. Saw shapes: %s and %s" %
(str(hypothesis_shape), str(truth_shape)))
return [tensor_shape.TensorShape(
[max(h, t) for h, t in zip(hypothesis_shape[:-1], truth_shape[:-1])])]
return [tensor_shape.unknown_shape()]
# The remaining ops do not change the shape of their inputs.
@ops.RegisterShape("Quantize")
@ops.RegisterShape("Dequantize")
def _QuantizeDequantizeShape(op):
unused_min_range = op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
unused_max_range = op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
return common_shapes.unchanged_shape(op)
| {
"content_hash": "9868fe4b82a3ded6ae4457865daf03d5",
"timestamp": "",
"source": "github",
"line_count": 1211,
"max_line_length": 82,
"avg_line_length": 32.35177539223782,
"alnum_prop": 0.6476338761549849,
"repo_name": "brodyh/tensorflow",
"id": "28138fbf39f65591c9037a49b10e058926f99b34",
"size": "39178",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/array_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "165934"
},
{
"name": "C++",
"bytes": "4901913"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "637241"
},
{
"name": "Java",
"bytes": "44388"
},
{
"name": "JavaScript",
"bytes": "5067"
},
{
"name": "Objective-C",
"bytes": "630"
},
{
"name": "Protocol Buffer",
"bytes": "45213"
},
{
"name": "Python",
"bytes": "2473570"
},
{
"name": "Shell",
"bytes": "7535"
},
{
"name": "TypeScript",
"bytes": "237446"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from datetime import timedelta
from math import pow, sqrt
import datetime
import json
from core.utils import get_config
from django.conf import settings
from django.core.cache import cache
from django.db.models import Q
import pytz
class MapJSONGenerator(object):
"""Provides methods create a JSON representation of a Map.
Instantiated with a map and user.
Provides a method that returns the JSON representation of the map.
"""
def __init__(self, map, user):
self.map = map
self.user = user
self.pvp_threshold = int(get_config("MAP_PVP_THRESHOLD", user).value)
self.npc_threshold = int(get_config("MAP_NPC_THRESHOLD", user).value)
self.interest_time = int(get_config("MAP_INTEREST_TIME", user).value)
def _get_interest_path(self):
"""Get all MapSystems contained in a path to a system of interest."""
try:
return self._interest_path
except AttributeError:
threshold = (datetime.datetime.now(pytz.utc) -
timedelta(minutes=self.interest_time))
systems = []
for system in self.map.systems.filter(
interesttime__gt=threshold).iterator():
systems.extend(self.get_path_to_map_system(system))
self._interest_path = systems
return systems
@staticmethod
def get_cache_key(map_inst):
return '%s_map' % map_inst.pk
@staticmethod
def get_path_to_map_system(system):
"""
Returns a list of MapSystems on the route between the map root and
the provided MapSystem.
"""
systemlist = []
parent = system
while parent:
systemlist.append(parent)
if parent.parentsystem and not parent.parent_wormhole.collapsed:
parent = parent.parentsystem
else:
parent = None
return systemlist
def get_system_icon(self, system):
"""Get URL to system background icon.
Takes a MapSystem and returns the appropriate icon to
display on the map as a relative URL.
"""
pvp_threshold = self.pvp_threshold
npc_threshold = self.npc_threshold
static_prefix = "%s" % (settings.STATIC_URL + "images/")
if system.system.stfleets.filter(ended__isnull=True).exists():
return static_prefix + "farm.png"
if system.system.shipkills + system.system.podkills > pvp_threshold:
return static_prefix + "pvp.png"
if system.system.npckills > npc_threshold:
return static_prefix + "carebears.png"
# unscanned for >24h
if not system.system.signatures.filter(modified_time__gte=(datetime.datetime.now(pytz.utc)-datetime.timedelta(days=1))).filter(sigtype__isnull=False).exists():
return static_prefix + "scan.png"
# partially scanned
if system.system.signatures.filter(sigtype__isnull=True).exists():
return static_prefix + "isis_scan.png"
return None
def system_to_dict(self, system, level_x, level_y):
"""Get dict representation of a system.
Takes a MapSystem and X, Y data.
Returns the dict of information to be passed to the map JS as JSON.
"""
system_obj = system.system
is_wspace = system_obj.is_wspace()
system_dict = {
'sysID': system_obj.pk,
'Name': system_obj.name,
'LevelX': level_x,
'LevelY': level_y,
'SysClass': system_obj.sysclass,
'Friendly': system.friendlyname,
'interest':
system.interesttime and
system.interesttime > datetime.datetime.now(pytz.utc) -
timedelta(minutes=self.interest_time),
'interestpath': system in self._get_interest_path(),
'activePilots': len(system_obj.pilot_list),
'pilot_list': [x[1][1] for x in system_obj.pilot_list.items()
if x[1][1] != "OOG Browser"],
'iconImageURL': self.get_system_icon(system),
'msID': system.pk,
'backgroundImageURL': self.get_system_background(system),
'effect': system_obj.wsystem.effect if is_wspace else None,
'importance': system_obj.importance,
'shattered':
system_obj.wsystem.is_shattered if is_wspace else False,
}
if system.parentsystem:
parent_wh = system.parent_wormhole
system_dict.update({
'ParentID': system.parentsystem.pk,
'WhToParent': parent_wh.bottom_type.name,
'WhFromParent': parent_wh.top_type.name,
'WhMassStatus': parent_wh.mass_status,
'WhTimeStatus': parent_wh.time_status,
'WhTotalMass': parent_wh.max_mass,
'WhJumpMass': parent_wh.jump_mass,
'WhToParentBubbled': parent_wh.bottom_bubbled,
'WhFromParentBubbled': parent_wh.top_bubbled,
'whID': parent_wh.pk,
'collapsed': bool(parent_wh.collapsed),
})
else:
system_dict.update({
'ParentID': None,
'WhToParent': "",
'WhFromParent': "",
'WhTotalMass': None,
'WhJumpMass': None,
'WhMassStatus': None,
'WhTimeStatus': None,
'WhToParentBubbled': None,
'WhFromParentBubbled': None,
'whID': None,
'collapsed': False,
})
return system_dict
@staticmethod
def get_system_background(system):
"""
Takes a MapSystem and returns the appropriate background icon
as a relative URL or None.
"""
importance = system.system.importance
if importance == 0:
return None
elif importance == 1:
image = 'skull.png'
elif importance == 2:
image = 'mark.png'
else:
raise ValueError
return "{0}images/{1}".format(settings.STATIC_URL, image)
def get_systems_json(self):
"""Returns a JSON string representing the systems in a map."""
cache_key = self.get_cache_key(self.map)
systems = cache.get(cache_key)
if systems is None:
systems = self.create_syslist()
cache.set(cache_key, systems, 15)
user_locations_dict = cache.get('user_%s_locations' % self.user.pk)
if user_locations_dict:
user_img = "%s/images/mylocation.png" % (settings.STATIC_URL,)
user_locations = [i[1][0] for i in user_locations_dict.items()]
for system in systems:
if (system['sysID'] in user_locations and
system['iconImageURL'] is None):
system['iconImageURL'] = user_img
return json.dumps(systems, sort_keys=True)
def create_syslist(self):
"""
Return list of system dictionaries with appropriate x/y levels
for map display.
"""
# maps system ids to child/parent system ids
children = defaultdict(list)
# maps system ids to objects
systems = dict()
# maps system ids to priorities
priorities = dict()
for system in (self.map.systems.all()
.select_related('system', 'parentsystem',
'parent_wormhole')
.iterator()):
children[system.parentsystem_id].append(system.pk)
systems[system.pk] = system
priorities[system.pk] = system.display_order_priority
# sort children by priority
for l in children.values():
l.sort(key=priorities.__getitem__)
# actual map layout generation
layout_gen = LayoutGenerator(children)
system_positions = layout_gen.get_layout()
# generate list of system dictionaries for conversion to JSON
syslist = []
for sys_id in layout_gen.processed:
sys_obj = systems[sys_id]
x, y = system_positions[sys_id]
syslist.append(self.system_to_dict(sys_obj, x, y))
return syslist
def get_wormhole_type(system1, system2):
"""Gets the one-way wormhole types between system1 and system2."""
from Map.models import WormholeType
source = "K"
# Set the source and destination for system1 > system2
if system1.is_wspace:
source = str(system1.sysclass)
if system1.sysclass == 7:
source = "H"
if system1.sysclass in [8, 9, 10, 11]:
source = "NH"
destination = system2.sysclass
sourcewh = None
if source == "H":
if WormholeType.objects.filter(
source="H", destination=destination).count() == 0:
sourcewh = WormholeType.objects.filter(
source="K", destination=destination).all()
else:
sourcewh = WormholeType.objects.filter(
source="H", destination=destination).all()
if source == "NH":
if WormholeType.objects.filter(
source="NH", destination=destination).count() == 0:
sourcewh = WormholeType.objects.filter(
source="K", destination=destination).all()
else:
sourcewh = WormholeType.objects.filter(
source="NH", destination=destination).all()
if source == "5" or source == "6":
if WormholeType.objects.filter(
source="Z", destination=destination).count() != 0:
sourcewh = (WormholeType.objects
.filter(Q(source="Z") | Q(source='W'))
.filter(destination=destination).all())
if sourcewh is None:
sourcewh = (WormholeType.objects
.filter(Q(source=source) | Q(source='W'))
.filter(destination=destination).all())
return sourcewh
def get_possible_wh_types(system1, system2):
"""Takes two systems and gets the possible wormhole types between them.
For example, given system1 as highsec and system2 as C2, it should return
R943 and B274. system1 is the source and system2 is the destination.
Results are returned as lists because some combinations have
multiple possibilities.
Returns a dict in the format {system1: [R943,], system2: [B274,]}.
"""
# Get System1 > System2
forward = get_wormhole_type(system1, system2)
# Get Reverse
reverse = get_wormhole_type(system2, system1)
result = {'system1': forward, 'system2': reverse}
return result
def convert_signature_id(sigid):
"""Standardize the signature ID to XXX-XXX if info is available."""
escaped_sigid = sigid.replace(' ', '').replace('-', '').upper()
if len(escaped_sigid) == 6:
return "%s-%s" % (escaped_sigid[:3], escaped_sigid[3:])
else:
return sigid.upper()
class RouteFinder(object):
"""Provides methods for finding distances between systems.
Has methods for getting the shortest stargate jump route length,
the light-year distance, and the shortest stargate route
as a list of KSystem objects.
"""
def __init__(self):
from django.core.cache import cache
if not cache.get('route_graph'):
self._cache_graph()
else:
import cPickle
self.graph = cPickle.loads(cache.get('route_graph'))
@staticmethod
def _get_ly_distance(sys1, sys2):
"""
Gets the distance in light years between two systems.
"""
x1 = sys1.x
y1 = sys1.y
z1 = sys1.z
x2 = sys2.x
y2 = sys2.y
z2 = sys2.z
distance = sqrt(pow(x1 - x2, 2) +
pow(y1 - y2, 2) +
pow(z1 - z2, 2)) / 9.4605284e+15
return distance
def ly_distance(self, sys1, sys2):
return self._get_ly_distance(sys1, sys2)
def route_as_ids(self, sys1, sys2):
return self._find_route(sys1, sys2)
def route(self, sys1, sys2):
from Map.models import KSystem
return [KSystem.objects.get(pk=sysid)
for sysid in self._find_route(sys1, sys2)]
def route_length(self, sys1, sys2):
return len(self._find_route(sys1, sys2))
def _cache_graph(self):
from Map.models import KSystem
from core.models import SystemJump
from django.core.cache import cache
import cPickle
import networkx as nx
if not cache.get('route_graph'):
graph = nx.Graph()
for from_system in KSystem.objects.all():
for to_system in (SystemJump.objects
.filter(fromsystem=from_system.pk)):
graph.add_edge(from_system.pk, to_system.tosystem)
cache.set('route_graph',
cPickle.dumps(graph, cPickle.HIGHEST_PROTOCOL), 0)
self.graph = graph
def _find_route(self, sys1, sys2):
"""
Takes two system objects (can be KSystem or SystemData).
Returns a list of system IDs that comprise the route.
"""
import networkx as nx
import cPickle
if not self.graph:
from django.core.cache import cache
if not cache.get('route_graph'):
self._cache_graph()
self.graph = cPickle.loads(cache.get('route_graph'))
else:
self.graph = cPickle.loads(cache.get('route_graph'))
return nx.shortest_path(self.graph, source=sys1.pk, target=sys2.pk)
class LayoutGenerator(object):
"""Provides methods for generating the map layout."""
def __init__(self, children):
"""Create new LayoutGenerator.
children should be a dictionary of system ids as
keys and their child ids as values.
"""
self.children = children
self.positions = None
self.occupied = [-1]
# after processing is done, this contains the processed
# system ids in drawing order
self.processed = []
def get_layout(self):
"""Create map layout.
returns a dictionary containing x, y positions for
the given system ids.
"""
if self.positions is not None:
return self.positions
self.positions = {}
root_node = self.children[None][0]
self._place_node(root_node, 0, 0)
return self.positions
def _place_node(self, node_id, x, min_y):
"""Determine x, y position for a node.
node_id: id of the node to be positioned
x: x position (depth) of the node
min_y: minimal y position of the node
(can't be above parent nodes)
returns: y offset relative to min_y
"""
self.processed.append(node_id)
# initially set y to the next free y in this column
# or min_y, whichever is greater
try:
y_occupied = self.occupied[x]
except IndexError:
self.occupied.append(-1)
y_occupied = -1
y = max(min_y, y_occupied + 1)
# position first child (and thus its children)
# and move this node down if child moved down
try:
first_child = self.children[node_id][0]
y += self._place_node(first_child, x + 1, y)
except IndexError:
pass # node has no children, ignore that.
# system position is now final, save it
self.occupied[x] = y
self.positions[node_id] = (x, y)
# place the rest of the children
for child in self.children[node_id][1:]:
self._place_node(child, x + 1, y)
return y - min_y
| {
"content_hash": "28f7cebf49640cf28d4af4f9c7cccbb2",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 167,
"avg_line_length": 35.11479028697572,
"alnum_prop": 0.5754699189036273,
"repo_name": "nyrocron/eve-wspace",
"id": "a6205caab6d36a72b57630c0f1ac6b4804b98e95",
"size": "16537",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "evewspace/Map/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45196"
},
{
"name": "HTML",
"bytes": "152723"
},
{
"name": "JavaScript",
"bytes": "88653"
},
{
"name": "Nginx",
"bytes": "109"
},
{
"name": "Puppet",
"bytes": "6781"
},
{
"name": "Python",
"bytes": "1138095"
},
{
"name": "Shell",
"bytes": "2632"
}
],
"symlink_target": ""
} |
__all__ = ['Emitter', 'EmitterError']
from error import YAMLError
from events import *
class EmitterError(YAMLError):
pass
class ScalarAnalysis(object):
def __init__(self, scalar, empty, multiline,
allow_flow_plain, allow_block_plain,
allow_single_quoted, allow_double_quoted,
allow_block):
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Emitter(object):
DEFAULT_TAG_PREFIXES = {
u'!' : u'!',
u'tag:yaml.org,2002:' : u'!!',
}
def __init__(self, stream, canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
# The stream should have the methods `write` and possibly `flush`.
self.stream = stream
# Encoding can be overridden by STREAM-START.
self.encoding = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = []
self.state = self.expect_stream_start
# Current event and the event queue.
self.events = []
self.event = None
# The current indentation level and the stack of previous indents.
self.indents = []
self.indent = None
# Flow level.
self.flow_level = 0
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
# Whether the document requires an explicit document indicator
self.open_ended = False
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
self.best_indent = 2
if indent and 1 < indent < 10:
self.best_indent = indent
self.best_width = 80
if width and width > self.best_indent*2:
self.best_width = width
self.best_line_break = u'\n'
if line_break in [u'\r', u'\n', u'\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None
# Prepared anchor and tag.
self.prepared_anchor = None
self.prepared_tag = None
# Scalar analysis and style.
self.analysis = None
self.style = None
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return (len(self.events) < count+1)
def increase_indent(self, flow=False, indentless=False):
self.indents.append(self.indent)
if self.indent is None:
if flow:
self.indent = self.best_indent
else:
self.indent = 0
elif not indentless:
self.indent += self.best_indent
# States.
# Stream handlers.
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
if self.event.encoding and not getattr(self.stream, 'encoding', None):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s"
% self.event)
def expect_nothing(self):
raise EmitterError("expected nothing, but got %s" % self.event)
# Document handlers.
def expect_first_document_start(self):
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator(u'...', True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = self.event.tags.keys()
handles.sort()
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (first and not self.event.explicit and not self.canonical
and not self.event.version and not self.event.tags
and not self.check_empty_document())
if not implicit:
self.write_indent()
self.write_indicator(u'---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator(u'...', True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError("expected DocumentStartEvent, but got %s"
% self.event)
def expect_document_end(self):
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator(u'...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s"
% self.event)
def expect_document_root(self):
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False,
simple_key=False):
self.root_context = root
self.sequence_context = sequence
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
self.process_anchor(u'&')
self.process_tag()
if isinstance(self.event, ScalarEvent):
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_sequence():
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_mapping():
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % self.event)
def expect_alias(self):
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor(u'*')
self.state = self.states.pop()
def expect_scalar(self):
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
self.write_indicator(u'[', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self):
self.write_indicator(u'{', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
self.write_indicator(u':', False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(u':', True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
indentless = (self.mapping_context and not self.indention)
self.increase_indent(flow=False, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
if not first and isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
self.write_indicator(u'-', True, indention=True)
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
self.increase_indent(flow=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
if not first and isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
if self.check_simple_key():
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
self.write_indicator(u':', False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
self.write_indent()
self.write_indicator(u':', True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
return (isinstance(self.event, SequenceStartEvent) and self.events
and isinstance(self.events[0], SequenceEndEvent))
def check_empty_mapping(self):
return (isinstance(self.event, MappingStartEvent) and self.events
and isinstance(self.events[0], MappingEndEvent))
def check_empty_document(self):
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (isinstance(event, ScalarEvent) and event.anchor is None
and event.tag is None and event.implicit and event.value == u'')
def check_simple_key(self):
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
and self.event.tag is not None:
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return (length < 128 and (isinstance(self.event, AliasEvent)
or (isinstance(self.event, ScalarEvent)
and not self.analysis.empty and not self.analysis.multiline)
or self.check_empty_sequence() or self.check_empty_mapping()))
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
if self.event.anchor is None:
self.prepared_anchor = None
return
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator+self.prepared_anchor, True)
self.prepared_anchor = None
def process_tag(self):
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if ((not self.canonical or tag is None) and
((self.style == '' and self.event.implicit[0])
or (self.style != '' and self.event.implicit[1]))):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = u'!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
self.prepared_tag = None
def choose_scalar_style(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if not self.event.style and self.event.implicit[0]:
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline))
and (self.flow_level and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context
and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
#if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
# Analyzers.
def prepare_version(self, version):
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
return u'%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != u'!' or handle[-1] != u'!':
raise EmitterError("tag handle must start and end with '!': %r"
% (handle.encode('utf-8')))
for ch in handle[1:-1]:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (ch.encode('utf-8'), handle.encode('utf-8')))
return handle
def prepare_tag_prefix(self, prefix):
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = []
start = end = 0
if prefix[0] == u'!':
end = 1
while end < len(prefix):
ch = prefix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return u''.join(chunks)
def prepare_tag(self, tag):
if not tag:
raise EmitterError("tag must not be empty")
if tag == u'!':
return tag
handle = None
suffix = tag
prefixes = self.tag_prefixes.keys()
prefixes.sort()
for prefix in prefixes:
if tag.startswith(prefix) \
and (prefix == u'!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix):]
chunks = []
start = end = 0
while end < len(suffix):
ch = suffix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?:@&=+$,_.~*\'()[]' \
or (ch == u'!' and handle != u'!'):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(suffix[start:end])
suffix_text = u''.join(chunks)
if handle:
return u'%s%s' % (handle, suffix_text)
else:
return u'!<%s>' % suffix_text
def prepare_anchor(self, anchor):
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (ch.encode('utf-8'), anchor.encode('utf-8')))
return anchor
def analyze_scalar(self, scalar):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
allow_flow_plain=False, allow_block_plain=True,
allow_single_quoted=True, allow_double_quoted=True,
allow_block=False)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith(u'---') or scalar.startswith(u'...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceeded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = (len(scalar) == 1 or
scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in u'#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in u'?:':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == u'-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in u',?[]{}':
flow_indicators = True
if ch == u':':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == u'#' and preceeded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in u'\n\x85\u2028\u2029':
line_breaks = True
if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == u' ':
if index == 0:
leading_space = True
if index == len(scalar)-1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in u'\n\x85\u2028\u2029':
if index == 0:
leading_break = True
if index == len(scalar)-1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
followed_by_whitespace = (index+1 >= len(scalar) or
scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if (leading_space or leading_break
or trailing_space or trailing_break):
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if space_break or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(scalar=scalar,
empty=False, multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block)
# Writers.
def flush_stream(self):
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write(u'\uFEFF'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
def write_indicator(self, indicator, need_whitespace,
whitespace=False, indention=False):
if self.whitespace or not need_whitespace:
data = indicator
else:
data = u' '+indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
indent = self.indent or 0
if not self.indention or self.column > indent \
or (self.column == indent and not self.whitespace):
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = u' '*(indent-self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
data = u'%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
data = u'%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
self.write_indicator(u'\'', True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != u' ':
if start+1 == end and self.column > self.best_width and split \
and start != 0 and end != len(text):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == u'\'':
data = u'\'\''
self.column += 2
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
self.write_indicator(u'\'', False)
ESCAPE_REPLACEMENTS = {
u'\0': u'0',
u'\x07': u'a',
u'\x08': u'b',
u'\x09': u't',
u'\x0A': u'n',
u'\x0B': u'v',
u'\x0C': u'f',
u'\x0D': u'r',
u'\x1B': u'e',
u'\"': u'\"',
u'\\': u'\\',
u'\x85': u'N',
u'\xA0': u'_',
u'\u2028': u'L',
u'\u2029': u'P',
}
def write_double_quoted(self, text, split=True):
self.write_indicator(u'"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
or not (u'\x20' <= ch <= u'\x7E'
or (self.allow_unicode
and (u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD'))):
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
elif ch <= u'\xFF':
data = u'\\x%02X' % ord(ch)
elif ch <= u'\uFFFF':
data = u'\\u%04X' % ord(ch)
else:
data = u'\\U%08X' % ord(ch)
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end+1
if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
and self.column+(end-start) > self.best_width and split:
data = text[start:end]+u'\\'
if start < end:
start = end
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == u' ':
data = u'\\'
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator(u'"', False)
def determine_block_hints(self, text):
hints = u''
if text:
if text[0] in u' \n\x85\u2028\u2029':
hints += unicode(self.best_indent)
if text[-1] not in u'\n\x85\u2028\u2029':
hints += u'-'
elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
hints += u'+'
return hints
def write_folded(self, text):
hints = self.determine_block_hints(text)
self.write_indicator(u'>'+hints, True)
if hints[-1:] == u'+':
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if not leading_space and ch is not None and ch != u' ' \
and text[start] == u'\n':
self.write_line_break()
leading_space = (ch == u' ')
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
spaces = (ch == u' ')
end += 1
def write_literal(self, text):
hints = self.determine_block_hints(text)
self.write_indicator(u'|'+hints, True)
if hints[-1:] == u'+':
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
else:
if ch is None or ch in u'\n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
def write_plain(self, text, split=True):
if self.root_context:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = u' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
| {
"content_hash": "468c54073865f8aaf55e4f6cef8832aa",
"timestamp": "",
"source": "github",
"line_count": 1132,
"max_line_length": 85,
"avg_line_length": 37.99028268551237,
"alnum_prop": 0.5104290198814091,
"repo_name": "stitchfix/spinnaker",
"id": "c70771ac344a8d20b1caacb813d23363e2a17bc6",
"size": "43299",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pylib/yaml/emitter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7648"
},
{
"name": "Python",
"bytes": "735198"
},
{
"name": "Shell",
"bytes": "116671"
}
],
"symlink_target": ""
} |
'''
Kivy Language
=============
The Kivy language is a language dedicated to describing user interface and
interactions. You could compare this language to Qt's QML
(http://qt.nokia.com), but we included new concepts such as rule definitions
(which are somewhat akin to what you may know from CSS), templating and so on.
.. versionchanged:: 1.7.0
The Builder doesn't execute canvas expression in realtime anymore. It will
pack all the expressions that need to be executed first, and execute them
after dispatching input, and just before drawing the frame. If you want to
force the execution of canvas drawing, just call :meth:`Builder.sync`.
A experimental profiling tool of kv lang is also done, you can activate it
by setting the env `KIVY_PROFILE_LANG=1`. You will get an html file named
`builder_stats.html`.
Overview
--------
The language consists of several constructs that you can use:
Rules
A rule is similar to a CSS rule. A rule applies to specific widgets (or
classes thereof) in your widget tree and modifies them in a
certain way.
You can use rules to specify interactive behaviour or use them to add
graphical representations of the widgets they apply to.
You can target a specific class of widgets (similar to CSS'
concept of a *class*) by using the ``cls`` attribute (e.g.
``cls=MyTestWidget``).
A Root Widget
You can use the language to create your entire user interface.
A kv file must contain only one root widget at most.
Templates
*(introduced in version 1.0.5.)*
Templates will be used to populate parts of your application, such as a
list's content. If you want to design the look of an entry in a list
(icon on the left, text on the right), you will use a template
for that.
Syntax of a kv File
-------------------
.. highlight:: kv
A Kivy language file must have ``.kv`` as filename extension.
The content of the file must always start with the Kivy header, where `version`
must be replaced with the Kivy language version you're using. For now, use
1.0::
#:kivy `version`
# content here
The `content` can contain rule definitions, a root widget and templates::
# Syntax of a rule definition. Note that several Rules can share the same
# definition (as in CSS). Note the braces; They are part of the definition.
<Rule1,Rule2>:
# .. definitions ..
<Rule3>:
# .. definitions ..
# Syntax for creating a root widget
RootClassName:
# .. definitions ..
# Syntax for create a template
[TemplateName@BaseClass1,BaseClass2]:
# .. definitions ..
Regardless of whether it's a rule, root widget or template you're defining,
the definition should look like this::
# With the braces it's a rule; Without them it's a root widget.
<ClassName>:
prop1: value1
prop2: value2
canvas:
CanvasInstruction1:
canvasprop1: value1
CanvasInstruction2:
canvasprop2: value2
AnotherClass:
prop3: value1
Here `prop1` and `prop2` are the properties of `ClassName` and `prop3` is the
property of `AnotherClass`. If the widget doesn't have a property with
the given name, an :class:`~kivy.properties.ObjectProperty` will be
automatically created and added to the instance.
`AnotherClass` will be created and added as a child of the `ClassName`
instance.
- The indentation is important and must be consistent. The spacing must be a
multiple of the number of spaces used on the first indented line. Spaces
are encouraged; mixing tabs and spaces is not recommended.
- The value of a property must be given on a single line (for now at least).
- The `canvas` property is special: You can put graphics instructions in it
to create a graphical representation of the current class.
Here is a simple example of a kv file that contains a root widget::
#:kivy 1.0
Button:
text: 'Hello world'
.. versionchanged:: 1.7.0
The indentation is not limited to 4 spaces anymore. The spacing must be a
multiple of the number of spaces used on the first indented line.
Value Expressions and Reserved Keywords
---------------------------------------
When you specify a property's value, the value is evaluated as a python
expression. This expression can be static or dynamic, which means that
the value can use the values of other properties using reserved keywords.
self
The keyword self references the "current widget instance"::
Button:
text: 'My state is %s' % self.state
root
This keyword is available only in rule definitions, and represents the
root widget of the rule (the first instance of the rule)::
<Widget>:
custom: 'Hello world'
Button:
text: root.custom
app
This keyword always refers to your app instance, it's equivalent
to a call to :meth:`App.get_running_app` in python.::
Label:
text: app.name
args
This keyword is available in on_<action> callbacks. It refers to the
arguments passed to the callback.::
TextInput:
on_focus:
self.insert_text("I'm focused!") \
if args[1] else self.insert_text("I'm not focused.")
Furthermore, if a class definition contains an id, you can use it as a
keyword::
<Widget>:
Button:
id: btn1
Button:
text: 'The state of the other button is %s' % btn1.state
Please note that the `id` will not be available in the widget instance;
The `id` attribute will be not used.
Relation Between Values and Properties
--------------------------------------
When you use the Kivy language, you might notice that we do some work
behind the scenes to automatically make things work properly. You should
know that :doc:`api-kivy.properties` implement the *observer* software
design pattern: That means that you can bind your own function to be
called when the value of a property changes (i.e. you passively
`observe` the property for potential changes).
The Kivy language detects properties in your `value` expression and will create
create callbacks to automatically update the property via your expression when
changes occur.
Here's a simple example that demonstrates this behaviour::
Button:
text: str(self.state)
In this example, the parser detects that `self.state` is a dynamic value (a
property). The :data:`~kivy.uix.button.Button.state` property of the button
can change at any moment (when the user touches it).
We now want this button to display its own state as text, even as the state
changes. To do this, we use the state property of the Button and use it in the
value expression for the button's `text` property, which controls what text is
displayed on the button (We also convert the state to a string representation).
Now, whenever the button state changes, the text property will be updated
automatically.
Remember: The value is a python expression! That means that you can do
something more interesting like::
Button:
text: 'Plop world' if self.state == 'normal' else 'Release me!'
The Button text changes with the state of the button. By default, the button
text will be 'Plop world', but when the button is being pressed, the text will
change to 'Release me!'.
Graphical Instructions
----------------------
The graphical instructions are a special part of the Kivy language. This
concerns the 'canvas' property definition::
Widget:
canvas:
Color:
rgb: (1, 1, 1)
Rectangle:
size: self.size
pos: self.pos
All the classes added inside the canvas property must be derived from the
:class:`~kivy.graphics.Instruction` class. You cannot put any Widget class
inside the canvas property (as that would not make sense because a
widget is not a graphics instruction).
If you want to do theming, you'll have the same question as in CSS: You don't
know which rules have been executed before. In our case, the rules are executed
in processing order (i.e. top-down).
If you want to change how Buttons are rendered, you can create your own kv file
and put something like this::
<Button>:
canvas:
Color:
rgb: (1, 0, 0)
Rectangle:
pos: self.pos
size: self.size
Rectangle:
pos: self.pos
size: self.texture_size
texture: self.texture
This will result in buttons having a red background, with the label in the
bottom left, in addition to all the preceding rules.
You can clear all the previous instructions by using the `Clear` command::
<Button>:
canvas:
Clear
Color:
rgb: (1, 0, 0)
Rectangle:
pos: self.pos
size: self.size
Rectangle:
pos: self.pos
size: self.texture_size
texture: self.texture
Then, only your rules that follow the `Clear` command will be taken into
consideration.
.. _dynamic_classes:
Dynamic classes
---------------
Dynamic classes allow you to create new widgets on-the-fly, without any python
declaration in the first place. The syntax of the dynamic classes is similar to
the Rules, but you need to specify what are the bases classes you want to
subclasses.
The syntax look like:
.. code-block:: kv
# Simple inheritance
<NewWidget@Button>:
...
# Multiple inheritance
<NewWidget@Label,ButtonBehavior>:
...
The `@` character is used to seperate the name from the classes you want to
subclass. The Python equivalent would have been:
.. code-block:: python
# Simple inheritance
class NewWidget(Button):
pass
# Multiple inheritance
class NewWidget(Label, ButtonBehavior):
pass
Any new properties, usually added in python code, should be declared first.
If the property doesn't exist in the dynamic classes, it will be automatically
created as an :class:`~kivy.properties.ObjectProperty`.
Let's illustrate the usage of theses dynamic classes with an implementation of a
basic Image button. We could derivate our classes from the Button, we just need
to add a property for the image filename:
.. code-block:: kv
<ImageButton@Button>:
source: None
Image:
source: root.source
pos: root.pos
size: root.size
# let's use the new classes in another rule:
<MainUI>:
BoxLayout:
ImageButton:
source: 'hello.png'
on_press: root.do_something()
ImageButton:
source: 'world.png'
on_press: root.do_something_else()
In Python you can create an instance of the dynamic class by:
.. code-block:: python
from kivy.factory import Factory
button_inst = Factory.ImageButton()
.. _template_usage:
Templates
---------
.. versionchanged:: 1.7.0
The template usage are now deprecated, please use Dynamic classes instead.
Syntax of template
~~~~~~~~~~~~~~~~~~
Using a template in Kivy require 2 things :
#. a context to pass for the context (will be ctx inside template)
#. a kv definition of the template
Syntax of a template:
.. code-block:: kv
# With only one base class
[ClassName@BaseClass]:
# .. definitions ..
# With more than one base class
[ClassName@BaseClass1,BaseClass2]:
# .. definitions ..
For example, for a list, you'll need to create a entry with a image on
the left, and a label on the right. You can create a template for making
that definition more easy to use.
So, we'll create a template that require 2 entry in the context: a image
filename and a title:
.. code-block:: kv
[IconItem@BoxLayout]:
Image:
source: ctx.image
Label:
text: ctx.title
Then in Python, you can create instanciate the template with:
.. code-block:: python
from kivy.lang import Builder
# create a template with hello world + an image
# the context values should be passed as kwargs to the Builder.template
# function
icon1 = Builder.template('IconItem', title='Hello world',
image='myimage.png')
# create a second template with another information
ctx = {'title': 'Another hello world',
'image': 'myimage2.png'}
icon2 = Builder.template('IconItem', **ctx)
# and use icon1 and icon2 as other widget.
Template example
~~~~~~~~~~~~~~~~
Most of time, when you are creating screen into kv lang, you have lot of
redefinition. In our example, we'll create a Toolbar, based on a BoxLayout, and
put many Image that will react to on_touch_down:
.. code-block:: kv
<MyToolbar>:
BoxLayout:
Image:
source: 'data/text.png'
size: self.texture_size
size_hint: None, None
on_touch_down: self.collide_point(*args[1].pos) and\
root.create_text()
Image:
source: 'data/image.png'
size: self.texture_size
size_hint: None, None
on_touch_down: self.collide_point(*args[1].pos) and\
root.create_image()
Image:
source: 'data/video.png'
size: self.texture_size
size_hint: None, None
on_touch_down: self.collide_point(*args[1].pos) and\
root.create_video()
We can see that the size and size_hint attribute are exactly the same.
More than that, the callback in on_touch_down and the image are changing.
Theses can be the variable part of the template that we can put into a context.
Let's try to create a template for the Image:
.. code-block:: kv
[ToolbarButton@Image]:
# This is the same as before
source: 'data/%s.png' % ctx.image
size: self.texture_size
size_hint: None, None
# Now, we are using the ctx for the variable part of the template
on_touch_down: self.collide_point(*args[1].pos) and self.callback()
The template can be used directly in the MyToolbar rule:
.. code-block:: kv
<MyToolbar>:
BoxLayout:
ToolbarButton:
image: 'text'
callback: root.create_text
ToolbarButton:
image: 'image'
callback: root.create_image
ToolbarButton:
image: 'video'
callback: root.create_video
That's all :)
Template limitations
~~~~~~~~~~~~~~~~~~~~
When you are creating a context:
#. you cannot use references other than "root":
.. code-block:: kv
<MyRule>:
Widget:
id: mywidget
value: 'bleh'
Template:
ctxkey: mywidget.value # << fail, this reference mywidget id
#. all the dynamic part will be not understood:
.. code-block:: kv
<MyRule>:
Template:
ctxkey: 'value 1' if root.prop1 else 'value2' # << even if
# root.prop1 is a property, the context will not update the
# context
Redefining a widget's style
---------------------------
Sometimes we would like to inherit from a widget in order to use its python
properties without also using its .kv defined style. For example, we would
like to inherit from a Label, but we would also like to define our own
canvas instructions instead of automatically using the canvas instructions
inherited from Label. We can achieve this by prepending a dash (-) before
the class name in the .kv style definition.
In myapp.py:
.. code-block:: python
class MyWidget(Label):
pass
and in my.kv:
.. code-block:: kv
<-MyWidget>:
canvas:
Color:
rgb: 1, 1, 1
Rectangle:
size: (32, 32)
MyWidget will now have a Color and Rectangle instruction in its canvas
without any of the instructions inherited from Label.
Lang Directives
---------------
You can use directive to control part of the lang files. Directive is done with
a comment line starting with:
.. code-block:: kv
#:<directivename> <options>
import <package>
~~~~~~~~~~~~~~~~
.. versionadded:: 1.0.5
Syntax:
.. code-block:: kv
#:import <alias> <package>
You can import a package by writing:
.. code-block:: kv
#:import os os
<Rule>:
Button:
text: os.getcwd()
Or more complex:
.. code-block:: kv
#:import ut kivy.utils
<Rule>:
canvas:
Color:
rgba: ut.get_random_color()
.. versionadded:: 1.0.7
You can directly import class from a module:
.. code-block:: kv
#: import Animation kivy.animation.Animation
<Rule>:
on_prop: Animation(x=.5).start(self)
set <key> <expr>
~~~~~~~~~~~~~~~~
.. versionadded:: 1.0.6
Syntax:
.. code-block:: kv
#:set <key> <expr>
Set a key that will be available anywhere in the kv. For example:
.. code-block:: kv
#:set my_color (.4, .3, .4)
#:set my_color_hl (.5, .4, .5)
<Rule>:
state: 'normal'
canvas:
Color:
rgb: my_color if self.state == 'normal' else my_color_hl
'''
__all__ = ('Builder', 'BuilderBase', 'BuilderException',
'Parser', 'ParserException')
import codecs
import re
import sys
from re import sub, findall
from os import environ
from os.path import join
from copy import copy
from types import CodeType
from functools import partial
from collections import OrderedDict
from kivy.factory import Factory
from kivy.logger import Logger
from kivy.utils import QueryDict
from kivy.cache import Cache
from kivy import kivy_data_dir, require
from kivy.compat import PY2, iteritems, iterkeys
from kivy.context import register_context
import kivy.metrics as Metrics
trace = Logger.trace
global_idmap = {}
# late import
Instruction = None
# register cache for creating new classtype (template)
Cache.register('kv.lang')
# precompile regexp expression
lang_str = re.compile('([\'"][^\'"]*[\'"])')
lang_key = re.compile('([a-zA-Z_]+)')
lang_keyvalue = re.compile('([a-zA-Z_][a-zA-Z0-9_.]*\.[a-zA-Z0-9_.]+)')
lang_tr = re.compile('(_\()')
# delayed calls are canvas expression triggered during an loop
_delayed_calls = []
# all the widget handlers, used to correctly unbind all the callbacks then the
# widget is deleted
_handlers = {}
class ProxyApp(object):
# proxy app object
# taken from http://code.activestate.com/recipes/496741-object-proxying/
__slots__ = ['_obj']
def __init__(self):
object.__init__(self)
object.__setattr__(self, '_obj', None)
def _ensure_app(self):
app = object.__getattribute__(self, '_obj')
if app is None:
from kivy.app import App
app = App.get_running_app()
object.__setattr__(self, '_obj', app)
# Clear cached application instance, when it stops
app.bind(on_stop=lambda instance:
object.__setattr__(self, '_obj', None))
return app
def __getattribute__(self, name):
object.__getattribute__(self, '_ensure_app')()
return getattr(object.__getattribute__(self, '_obj'), name)
def __delattr__(self, name):
object.__getattribute__(self, '_ensure_app')()
delattr(object.__getattribute__(self, '_obj'), name)
def __setattr__(self, name, value):
object.__getattribute__(self, '_ensure_app')()
setattr(object.__getattribute__(self, '_obj'), name, value)
def __bool__(self):
object.__getattribute__(self, '_ensure_app')()
return bool(object.__getattribute__(self, '_obj'))
def __str__(self):
object.__getattribute__(self, '_ensure_app')()
return str(object.__getattribute__(self, '_obj'))
def __repr__(self):
object.__getattribute__(self, '_ensure_app')()
return repr(object.__getattribute__(self, '_obj'))
global_idmap['app'] = ProxyApp()
global_idmap['pt'] = Metrics.pt
global_idmap['inch'] = Metrics.inch
global_idmap['cm'] = Metrics.cm
global_idmap['mm'] = Metrics.mm
global_idmap['dp'] = Metrics.dp
global_idmap['sp'] = Metrics.sp
class ParserException(Exception):
'''Exception raised when something wrong happened in a kv file.
'''
def __init__(self, context, line, message):
self.filename = context.filename or '<inline>'
self.line = line
sourcecode = context.sourcecode
sc_start = max(0, line - 2)
sc_stop = min(len(sourcecode), line + 3)
sc = ['...']
for x in range(sc_start, sc_stop):
if x == line:
sc += ['>> %4d:%s' % (line + 1, sourcecode[line][1])]
else:
sc += [' %4d:%s' % (x + 1, sourcecode[x][1])]
sc += ['...']
sc = '\n'.join(sc)
message = 'Parser: File "%s", line %d:\n%s\n%s' % (
self.filename, self.line + 1, sc, message)
super(ParserException, self).__init__(message)
class BuilderException(ParserException):
'''Exception raised when the Builder failed to apply a rule on a widget.
'''
pass
class ParserRuleProperty(object):
'''Represent a property inside a rule
'''
__slots__ = ('ctx', 'line', 'name', 'value', 'co_value',
'watched_keys', 'mode', 'count')
def __init__(self, ctx, line, name, value):
super(ParserRuleProperty, self).__init__()
#: Associated parser
self.ctx = ctx
#: Line of the rule
self.line = line
#: Name of the property
self.name = name
#: Value of the property
self.value = value
#: Compiled value
self.co_value = None
#: Compilation mode
self.mode = None
#: Watched keys
self.watched_keys = None
#: Stats
self.count = 0
def precompile(self):
name = self.name
value = self.value
# first, remove all the string from the value
tmp = sub(lang_str, '', self.value)
# detecting how to handle the value according to the key name
mode = self.mode
if self.mode is None:
self.mode = mode = 'exec' if name[:3] == 'on_' else 'eval'
if mode == 'eval':
# if we don't detect any string/key in it, we can eval and give the
# result
if re.search(lang_key, tmp) is None:
self.co_value = eval(value)
return
# ok, we can compile.
value = '\n' * self.line + value
self.co_value = compile(value, self.ctx.filename or '<string>', mode)
# for exec mode, we don't need to watch any keys.
if mode == 'exec':
return
# now, detect obj.prop
# first, remove all the string from the value
tmp = sub(lang_str, '', value)
# detect key.value inside value, and split them
wk = list(set(findall(lang_keyvalue, tmp)))
if len(wk):
self.watched_keys = [x.split('.') for x in wk]
if findall(lang_tr, tmp):
if self.watched_keys:
self.watched_keys += [['_']]
else:
self.watched_keys = [['_']]
def __repr__(self):
return '<ParserRuleProperty name=%r filename=%s:%d ' \
'value=%r watched_keys=%r>' % (
self.name, self.ctx.filename, self.line + 1,
self.value, self.watched_keys)
class ParserRule(object):
'''Represent a rule, in term if Kivy internal language
'''
__slots__ = ('ctx', 'line', 'name', 'children', 'id', 'properties',
'canvas_before', 'canvas_root', 'canvas_after',
'handlers', 'level', 'cache_marked', 'avoid_previous_rules')
def __init__(self, ctx, line, name, level):
super(ParserRule, self).__init__()
#: Level of the rule in the kv
self.level = level
#: Associated parser
self.ctx = ctx
#: Line of the rule
self.line = line
#: Name of the rule
self.name = name
#: List of children to create
self.children = []
#: Id given to the rule
self.id = None
#: Properties associated to the rule
self.properties = OrderedDict()
#: Canvas normal
self.canvas_root = None
#: Canvas before
self.canvas_before = None
#: Canvas after
self.canvas_after = None
#: Handlers associated to the rule
self.handlers = []
#: Properties cache list: mark which class have already been checked
self.cache_marked = []
#: Indicate if any previous rules should be avoided.
self.avoid_previous_rules = False
if level == 0:
self._detect_selectors()
else:
self._forbid_selectors()
def precompile(self):
for x in self.properties.values():
x.precompile()
for x in self.handlers:
x.precompile()
for x in self.children:
x.precompile()
if self.canvas_before:
self.canvas_before.precompile()
if self.canvas_root:
self.canvas_root.precompile()
if self.canvas_after:
self.canvas_after.precompile()
def create_missing(self, widget):
# check first if the widget class already been processed by this rule
cls = widget.__class__
if cls in self.cache_marked:
return
self.cache_marked.append(cls)
for name in self.properties:
if not hasattr(widget, name):
widget.create_property(name)
def _forbid_selectors(self):
c = self.name[0]
if c == '<' or c == '[':
raise ParserException(
self.ctx, self.line,
'Selectors rules are allowed only at the first level')
def _detect_selectors(self):
c = self.name[0]
if c == '<':
self._build_rule()
elif c == '[':
self._build_template()
else:
if self.ctx.root is not None:
raise ParserException(
self.ctx, self.line,
'Only one root object is allowed by .kv')
self.ctx.root = self
def _build_rule(self):
name = self.name
if __debug__:
trace('Builder: build rule for %s' % name)
if name[0] != '<' or name[-1] != '>':
raise ParserException(self.ctx, self.line,
'Invalid rule (must be inside <>)')
# if the very first name start with a -, avoid previous rules
name = name[1:-1]
if name[:1] == '-':
self.avoid_previous_rules = True
name = name[1:]
rules = name.split(',')
for rule in rules:
crule = None
if not len(rule):
raise ParserException(self.ctx, self.line,
'Empty rule detected')
if '@' in rule:
# new class creation ?
# ensure the name is correctly written
rule, baseclasses = rule.split('@', 1)
if not re.match(lang_key, rule):
raise ParserException(self.ctx, self.line,
'Invalid dynamic class name')
# save the name in the dynamic classes dict.
self.ctx.dynamic_classes[rule] = baseclasses
crule = ParserSelectorName(rule)
else:
# classical selectors.
if rule[0] == '.':
crule = ParserSelectorClass(rule[1:])
elif rule[0] == '#':
crule = ParserSelectorId(rule[1:])
else:
crule = ParserSelectorName(rule)
self.ctx.rules.append((crule, self))
def _build_template(self):
name = self.name
if __debug__:
trace('Builder: build template for %s' % name)
if name[0] != '[' or name[-1] != ']':
raise ParserException(self.ctx, self.line,
'Invalid template (must be inside [])')
item_content = name[1:-1]
if not '@' in item_content:
raise ParserException(self.ctx, self.line,
'Invalid template name (missing @)')
template_name, template_root_cls = item_content.split('@')
self.ctx.templates.append((template_name, template_root_cls, self))
def __repr__(self):
return '<ParserRule name=%r>' % (self.name, )
class Parser(object):
'''Create a Parser object to parse a Kivy language file or Kivy content.
'''
PROP_ALLOWED = ('canvas.before', 'canvas.after')
CLASS_RANGE = list(range(ord('A'), ord('Z') + 1))
PROP_RANGE = (
list(range(ord('A'), ord('Z') + 1)) +
list(range(ord('a'), ord('z') + 1)) +
list(range(ord('0'), ord('9') + 1)) + [ord('_')])
__slots__ = ('rules', 'templates', 'root', 'sourcecode',
'directives', 'filename', 'dynamic_classes')
def __init__(self, **kwargs):
super(Parser, self).__init__()
self.rules = []
self.templates = []
self.root = None
self.sourcecode = []
self.directives = []
self.dynamic_classes = {}
self.filename = kwargs.get('filename', None)
content = kwargs.get('content', None)
if content is None:
raise ValueError('No content passed')
self.parse(content)
def execute_directives(self):
for ln, cmd in self.directives:
cmd = cmd.strip()
if __debug__:
trace('Parser: got directive <%s>' % cmd)
if cmd[:5] == 'kivy ':
version = cmd[5:].strip()
if len(version.split('.')) == 2:
version += '.0'
require(version)
elif cmd[:4] == 'set ':
try:
name, value = cmd[4:].strip().split(' ', 1)
except:
Logger.exception('')
raise ParserException(self, ln, 'Invalid directive syntax')
try:
value = eval(value)
except:
Logger.exception('')
raise ParserException(self, ln, 'Invalid value')
global_idmap[name] = value
elif cmd[:7] == 'import ':
package = cmd[7:].strip()
l = package.split(' ')
if len(l) != 2:
raise ParserException(self, ln, 'Invalid import syntax')
alias, package = l
try:
if package not in sys.modules:
try:
mod = __import__(package)
except ImportError:
mod = __import__('.'.join(package.split('.')[:-1]))
# resolve the whole thing
for part in package.split('.')[1:]:
mod = getattr(mod, part)
else:
mod = sys.modules[package]
global_idmap[alias] = mod
except ImportError:
Logger.exception('')
raise ParserException(self, ln,
'Unable to import package %r' %
package)
else:
raise ParserException(self, ln, 'Unknown directive')
def parse(self, content):
'''Parse the contents of a Parser file and return a list
of root objects.
'''
# Read and parse the lines of the file
lines = content.splitlines()
if not lines:
return
num_lines = len(lines)
lines = list(zip(list(range(num_lines)), lines))
self.sourcecode = lines[:]
if __debug__:
trace('Parser: parsing %d lines' % num_lines)
# Strip all comments
self.strip_comments(lines)
# Execute directives
self.execute_directives()
# Get object from the first level
objects, remaining_lines = self.parse_level(0, lines)
# Precompile rules tree
for rule in objects:
rule.precompile()
# After parsing, there should be no remaining lines
# or there's an error we did not catch earlier.
if remaining_lines:
ln, content = remaining_lines[0]
raise ParserException(self, ln, 'Invalid data (not parsed)')
def strip_comments(self, lines):
'''Remove all comments from all lines in-place.
Comments need to be on a single line and not at the end of a line.
I.e., a comment line's first non-whitespace character must be a #.
'''
# extract directives
for ln, line in lines[:]:
stripped = line.strip()
if stripped[:2] == '#:':
self.directives.append((ln, stripped[2:]))
if stripped[:1] == '#':
lines.remove((ln, line))
if not stripped:
lines.remove((ln, line))
def parse_level(self, level, lines, spaces=0):
'''Parse the current level (level * spaces) indentation.
'''
indent = spaces * level if spaces > 0 else 0
objects = []
current_object = None
current_property = None
current_propobject = None
i = 0
while i < len(lines):
line = lines[i]
ln, content = line
# Get the number of space
tmp = content.lstrip(' \t')
# Replace any tab with 4 spaces
tmp = content[:len(content) - len(tmp)]
tmp = tmp.replace('\t', ' ')
# first indent designates the indentation
if spaces == 0:
spaces = len(tmp)
count = len(tmp)
if spaces > 0 and count % spaces != 0:
raise ParserException(self, ln,
'Invalid indentation, '
'must be a multiple of '
'%s spaces' % spaces)
content = content.strip()
rlevel = count // spaces if spaces > 0 else 0
# Level finished
if count < indent:
return objects, lines[i - 1:]
# Current level, create an object
elif count == indent:
x = content.split(':', 1)
if not len(x[0]):
raise ParserException(self, ln, 'Identifier missing')
if len(x) == 2 and len(x[1]):
raise ParserException(self, ln,
'Invalid data after declaration')
name = x[0]
# if it's not a root rule, then we got some restriction
# aka, a valid name, without point or everything else
if count != 0:
if False in [ord(z) in Parser.PROP_RANGE for z in name]:
raise ParserException(self, ln, 'Invalid class name')
current_object = ParserRule(self, ln, x[0], rlevel)
current_property = None
objects.append(current_object)
# Next level, is it a property or an object ?
elif count == indent + spaces:
x = content.split(':', 1)
if not len(x[0]):
raise ParserException(self, ln, 'Identifier missing')
# It's a class, add to the current object as a children
current_property = None
name = x[0]
if ord(name[0]) in Parser.CLASS_RANGE or name[0] == '+':
_objects, _lines = self.parse_level(
level + 1, lines[i:], spaces)
current_object.children = _objects
lines = _lines
i = 0
# It's a property
else:
if name not in Parser.PROP_ALLOWED:
if not all(ord(z) in Parser.PROP_RANGE for z in name):
raise ParserException(self, ln,
'Invalid property name')
if len(x) == 1:
raise ParserException(self, ln, 'Syntax error')
value = x[1].strip()
if name == 'id':
if len(value) <= 0:
raise ParserException(self, ln, 'Empty id')
if value in ('self', 'root'):
raise ParserException(
self, ln,
'Invalid id, cannot be "self" or "root"')
current_object.id = value
elif len(value):
rule = ParserRuleProperty(self, ln, name, value)
if name[:3] == 'on_':
current_object.handlers.append(rule)
else:
current_object.properties[name] = rule
else:
current_property = name
current_propobject = None
# Two more levels?
elif count == indent + 2 * spaces:
if current_property in (
'canvas', 'canvas.after', 'canvas.before'):
_objects, _lines = self.parse_level(
level + 2, lines[i:], spaces)
rl = ParserRule(self, ln, current_property, rlevel)
rl.children = _objects
if current_property == 'canvas':
current_object.canvas_root = rl
elif current_property == 'canvas.before':
current_object.canvas_before = rl
else:
current_object.canvas_after = rl
current_property = None
lines = _lines
i = 0
else:
if current_propobject is None:
current_propobject = ParserRuleProperty(
self, ln, current_property, content)
if current_property[:3] == 'on_':
current_object.handlers.append(current_propobject)
else:
current_object.properties[current_property] = \
current_propobject
else:
current_propobject.value += '\n' + content
# Too much indentation, invalid
else:
raise ParserException(self, ln,
'Invalid indentation (too many levels)')
# Check the next line
i += 1
return objects, []
def get_proxy(widget):
try:
return widget.proxy_ref
except AttributeError:
return widget
def custom_callback(__kvlang__, idmap, *largs, **kwargs):
idmap['args'] = largs
exec(__kvlang__.co_value, idmap)
def create_handler(iself, element, key, value, rule, idmap, delayed=False):
locals()['__kvlang__'] = rule
# create an handler
uid = iself.uid
if uid not in _handlers:
_handlers[uid] = []
idmap = copy(idmap)
idmap.update(global_idmap)
idmap['self'] = iself.proxy_ref
def call_fn(*args):
if __debug__:
trace('Builder: call_fn %s, key=%s, value=%r, %r' % (
element, key, value, rule.value))
rule.count += 1
e_value = eval(value, idmap)
if __debug__:
trace('Builder: call_fn => value=%r' % (e_value, ))
setattr(element, key, e_value)
def delayed_call_fn(*args):
_delayed_calls.append(call_fn)
fn = delayed_call_fn if delayed else call_fn
# bind every key.value
if rule.watched_keys is not None:
for k in rule.watched_keys:
try:
f = idmap[k[0]]
for x in k[1:-1]:
f = getattr(f, x)
if hasattr(f, 'bind'):
f.bind(**{k[-1]: fn})
# make sure _handlers doesn't keep widgets alive
_handlers[uid].append([get_proxy(f), k[-1], fn])
except KeyError:
continue
except AttributeError:
continue
try:
return eval(value, idmap)
except Exception as e:
raise BuilderException(rule.ctx, rule.line,
'{}: {}'.format(e.__class__.__name__, e))
class ParserSelector(object):
def __init__(self, key):
self.key = key.lower()
def match(self, widget):
raise NotImplemented()
def __repr__(self):
return '<%s key=%s>' % (self.__class__.__name__, self.key)
class ParserSelectorId(ParserSelector):
def match(self, widget):
if widget.id:
return widget.id.lower() == self.key
class ParserSelectorClass(ParserSelector):
def match(self, widget):
return self.key in widget.cls
class ParserSelectorName(ParserSelector):
parents = {}
def get_bases(self, cls):
for base in cls.__bases__:
if base.__name__ == 'object':
break
yield base
if base.__name__ == 'Widget':
break
for cbase in self.get_bases(base):
yield cbase
def match(self, widget):
parents = ParserSelectorName.parents
cls = widget.__class__
if not cls in parents:
classes = [x.__name__.lower() for x in
[cls] + list(self.get_bases(cls))]
parents[cls] = classes
return self.key in parents[cls]
class BuilderBase(object):
'''Builder is responsible for creating a :class:`Parser` for parsing a kv
file, merging the results to its internal rules, templates, etc.
By default, :class:`Builder` is the global Kivy instance used in widgets,
that you can use to load other kv file in addition to the default one.
'''
_cache_match = {}
def __init__(self):
super(BuilderBase, self).__init__()
self.files = []
self.dynamic_classes = {}
self.templates = {}
self.rules = []
self.rulectx = {}
def load_file(self, filename, **kwargs):
'''Insert a file into the language builder.
:parameters:
`rulesonly`: bool, default to False
If True, the Builder will raise an exception if you have a root
widget inside the definition.
'''
if __debug__:
trace('Builder: load file %s' % filename)
with open(filename, 'r') as fd:
kwargs['filename'] = filename
data = fd.read()
# remove bom ?
if PY2:
if data.startswith((codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)):
raise ValueError('Unsupported UTF16 for kv files.')
if data.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
raise ValueError('Unsupported UTF32 for kv files.')
if data.startswith(codecs.BOM_UTF8):
data = data[len(codecs.BOM_UTF8):]
return self.load_string(data, **kwargs)
def unload_file(self, filename):
'''Unload all rules associated to a previously imported file.
.. versionadded:: 1.0.8
.. warning::
This will not remove rule or template already applied/used on
current widget. It will act only for the next widget creation or
template invocation.
'''
# remove rules and templates
self.rules = [x for x in self.rules if x[1].ctx.filename != filename]
self._clear_matchcache()
templates = {}
for x, y in self.templates.items():
if y[2] != filename:
templates[x] = y
self.templates = templates
if filename in self.files:
self.files.remove(filename)
# unregister all the dynamic classes
Factory.unregister_from_filename(filename)
def load_string(self, string, **kwargs):
'''Insert a string into the Language Builder
:Parameters:
`rulesonly`: bool, default to False
If True, the Builder will raise an exception if you have a root
widget inside the definition.
'''
kwargs.setdefault('rulesonly', False)
self._current_filename = fn = kwargs.get('filename', None)
# put a warning if a file is loaded multiple times
if fn in self.files:
Logger.warning(
'Lang: The file {} is loaded multiples times, '
'you might have unwanted behaviors.'.format(fn))
try:
# parse the string
parser = Parser(content=string, filename=fn)
# merge rules with our rules
self.rules.extend(parser.rules)
self._clear_matchcache()
# add the template found by the parser into ours
for name, cls, template in parser.templates:
self.templates[name] = (cls, template, fn)
Factory.register(name,
cls=partial(self.template, name),
is_template=True)
# register all the dynamic classes
for name, baseclasses in iteritems(parser.dynamic_classes):
Factory.register(name, baseclasses=baseclasses, filename=fn)
# create root object is exist
if kwargs['rulesonly'] and parser.root:
filename = kwargs.get('rulesonly', '<string>')
raise Exception('The file <%s> contain also non-rules '
'directives' % filename)
# save the loaded files only if there is a root without
# template/dynamic classes
if fn and (parser.templates or
parser.dynamic_classes or parser.rules):
self.files.append(fn)
if parser.root:
widget = Factory.get(parser.root.name)()
self._apply_rule(widget, parser.root, parser.root)
return widget
finally:
self._current_filename = None
def template(self, *args, **ctx):
'''Create a specialized template using a specific context.
.. versionadded:: 1.0.5
With template, you can construct custom widget from a kv lang
definition by giving them a context. Check :ref:`Template usage
<template_usage>`.
'''
# Prevent naming clash with whatever the user might be putting into the
# ctx as key.
name = args[0]
if name not in self.templates:
raise Exception('Unknown <%s> template name' % name)
baseclasses, rule, fn = self.templates[name]
key = '%s|%s' % (name, baseclasses)
cls = Cache.get('kv.lang', key)
if cls is None:
rootwidgets = []
for basecls in baseclasses.split('+'):
rootwidgets.append(Factory.get(basecls))
cls = type(name, tuple(rootwidgets), {})
Cache.append('kv.lang', key, cls)
widget = cls()
# in previous versions, ``ctx`` is passed as is as ``template_ctx``
# preventing widgets in it from be collected by the GC. This was
# especially relevant to AccordionItem's title_template.
proxy_ctx = {k: get_proxy(v) for k, v in ctx.items()}
self._apply_rule(widget, rule, rule, template_ctx=proxy_ctx)
return widget
def apply(self, widget):
'''Search all the rules that match the widget, and apply them.
'''
rules = self.match(widget)
if __debug__:
trace('Builder: Found %d rules for %s' % (len(rules), widget))
if not rules:
return
for rule in rules:
self._apply_rule(widget, rule, rule)
def _clear_matchcache(self):
BuilderBase._match_cache = {}
def _apply_rule(self, widget, rule, rootrule, template_ctx=None):
# widget: the current instanciated widget
# rule: the current rule
# rootrule: the current root rule (for children of a rule)
# will collect reference to all the id in children
assert(rule not in self.rulectx)
self.rulectx[rule] = rctx = {
'ids': {'root': widget.proxy_ref},
'set': [], 'hdl': []}
# extract the context of the rootrule (not rule!)
assert(rootrule in self.rulectx)
rctx = self.rulectx[rootrule]
# if a template context is passed, put it as "ctx"
if template_ctx is not None:
rctx['ids']['ctx'] = QueryDict(template_ctx)
# if we got an id, put it in the root rule for a later global usage
if rule.id:
# use only the first word as `id` discard the rest.
rule.id = rule.id.split('#', 1)[0].strip()
rctx['ids'][rule.id] = widget.proxy_ref
# set id name as a attribute for root widget so one can in python
# code simply access root_widget.id_name
_ids = dict(rctx['ids'])
_root = _ids.pop('root')
_new_ids = _root.ids
for _key in iterkeys(_ids):
if _ids[_key] == _root:
# skip on self
continue
_new_ids[_key] = _ids[_key]
_root.ids = _new_ids
# first, ensure that the widget have all the properties used in
# the rule if not, they will be created as ObjectProperty.
rule.create_missing(widget)
# build the widget canvas
if rule.canvas_before:
with widget.canvas.before:
self._build_canvas(widget.canvas.before, widget,
rule.canvas_before, rootrule)
if rule.canvas_root:
with widget.canvas:
self._build_canvas(widget.canvas, widget,
rule.canvas_root, rootrule)
if rule.canvas_after:
with widget.canvas.after:
self._build_canvas(widget.canvas.after, widget,
rule.canvas_after, rootrule)
# create children tree
Factory_get = Factory.get
Factory_is_template = Factory.is_template
for crule in rule.children:
cname = crule.name
# depending if the child rule is a template or not, we are not
# having the same approach
cls = Factory_get(cname)
if Factory_is_template(cname):
# we got a template, so extract all the properties and
# handlers, and push them in a "ctx" dictionnary.
ctx = {}
idmap = copy(global_idmap)
idmap.update({'root': rctx['ids']['root']})
if 'ctx' in rctx['ids']:
idmap.update({'ctx': rctx['ids']['ctx']})
try:
for prule in crule.properties.values():
value = prule.co_value
if type(value) is CodeType:
value = eval(value, idmap)
ctx[prule.name] = value
for prule in crule.handlers:
value = eval(prule.value, idmap)
ctx[prule.name] = value
except Exception as e:
raise BuilderException(prule.ctx, prule.line,
'{}: {}'.format(e.__class__.__name__, e))
# create the template with an explicit ctx
child = cls(**ctx)
widget.add_widget(child)
# reference it on our root rule context
if crule.id:
rctx['ids'][crule.id] = child
else:
# we got a "normal" rule, construct it manually
# we can't construct it without __no_builder=True, because the
# previous implementation was doing the add_widget() before
# apply(), and so, we could use "self.parent".
child = cls(__no_builder=True)
widget.add_widget(child)
self.apply(child)
self._apply_rule(child, crule, rootrule)
# append the properties and handlers to our final resolution task
if rule.properties:
rctx['set'].append((widget.proxy_ref,
list(rule.properties.values())))
if rule.handlers:
rctx['hdl'].append((widget.proxy_ref, rule.handlers))
# if we are applying another rule that the root one, then it's done for
# us!
if rootrule is not rule:
del self.rulectx[rule]
return
# normally, we can apply a list of properties with a proper context
try:
rule = None
for widget_set, rules in reversed(rctx['set']):
for rule in rules:
assert(isinstance(rule, ParserRuleProperty))
key = rule.name
value = rule.co_value
if type(value) is CodeType:
value = create_handler(widget_set, widget_set, key,
value, rule, rctx['ids'])
setattr(widget_set, key, value)
except Exception as e:
if rule is not None:
raise BuilderException(rule.ctx, rule.line,
'{}: {}'.format(e.__class__.__name__, e))
raise e
# build handlers
try:
crule = None
for widget_set, rules in rctx['hdl']:
for crule in rules:
assert(isinstance(crule, ParserRuleProperty))
assert(crule.name.startswith('on_'))
key = crule.name
if not widget_set.is_event_type(key):
key = key[3:]
idmap = copy(global_idmap)
idmap.update(rctx['ids'])
idmap['self'] = widget_set.proxy_ref
widget_set.bind(**{key: partial(custom_callback,
crule, idmap)})
#hack for on_parent
if crule.name == 'on_parent':
Factory.Widget.parent.dispatch(widget_set.__self__)
except Exception as e:
if crule is not None:
raise BuilderException(crule.ctx, crule.line,
'{}: {}'.format(e.__class__.__name__, e))
raise e
# rule finished, forget it
del self.rulectx[rootrule]
def match(self, widget):
'''Return a list of :class:`ParserRule` matching the widget.
'''
cache = BuilderBase._match_cache
k = (widget.__class__, widget.id, tuple(widget.cls))
if k in cache:
return cache[k]
rules = []
for selector, rule in self.rules:
if selector.match(widget):
if rule.avoid_previous_rules:
del rules[:]
rules.append(rule)
cache[k] = rules
return rules
def sync(self):
'''Execute all the waiting operations, such as the execution of all the
expressions related to the canvas.
.. versionadded:: 1.7.0
'''
l = set(_delayed_calls)
del _delayed_calls[:]
for func in l:
try:
func(None, None)
except ReferenceError:
continue
def unbind_widget(self, uid):
'''(internal) Unbind all the handlers created by the rules of the
widget. The :data:`kivy.uix.widget.Widget.uid` is passed here instead of
the widget itself, because we are using it in the widget destructor.
.. versionadded:: 1.7.2
'''
if uid not in _handlers:
return
for f, k, fn in _handlers[uid]:
try:
f.unbind(**{k: fn})
except ReferenceError:
# proxy widget is already gone, that's cool :)
pass
del _handlers[uid]
def _build_canvas(self, canvas, widget, rule, rootrule):
global Instruction
if Instruction is None:
Instruction = Factory.get('Instruction')
idmap = copy(self.rulectx[rootrule]['ids'])
for crule in rule.children:
name = crule.name
if name == 'Clear':
canvas.clear()
continue
instr = Factory.get(name)()
if not isinstance(instr, Instruction):
raise BuilderException(
crule.ctx, crule.line,
'You can add only graphics Instruction in canvas.')
try:
for prule in crule.properties.values():
key = prule.name
value = prule.co_value
if type(value) is CodeType:
value = create_handler(
widget, instr.proxy_ref,
key, value, prule, idmap, True)
setattr(instr, key, value)
except Exception as e:
raise BuilderException(prule.ctx, prule.line,
'{}: {}'.format(e.__class__.__name__, e))
#: Main instance of a :class:`BuilderBase`.
Builder = register_context('Builder', BuilderBase)
Builder.load_file(join(kivy_data_dir, 'style.kv'), rulesonly=True)
if 'KIVY_PROFILE_LANG' in environ:
import atexit
import cgi
def match_rule(fn, index, rule):
if rule.ctx.filename != fn:
return
for prop, prp in iteritems(rule.properties):
if prp.line != index:
continue
yield prp
for child in rule.children:
for r in match_rule(fn, index, child):
yield r
if rule.canvas_root:
for r in match_rule(fn, index, rule.canvas_root):
yield r
if rule.canvas_before:
for r in match_rule(fn, index, rule.canvas_before):
yield r
if rule.canvas_after:
for r in match_rule(fn, index, rule.canvas_after):
yield r
def dump_builder_stats():
html = [
'<!doctype html>'
'<html><body>',
'<style type="text/css">\n',
'pre { margin: 0; }\n',
'</style>']
files = set([x[1].ctx.filename for x in Builder.rules])
for fn in files:
lines = open(fn).readlines()
html += ['<h2>', fn, '</h2>', '<table>']
count = 0
for index, line in enumerate(lines):
line = line.rstrip()
line = cgi.escape(line)
matched_prp = []
for psn, rule in Builder.rules:
matched_prp += list(match_rule(fn, index, rule))
count = sum(set([x.count for x in matched_prp]))
color = (255, 155, 155) if count else (255, 255, 255)
html += ['<tr style="background-color: rgb{}">'.format(color),
'<td>', str(index + 1), '</td>',
'<td>', str(count), '</td>',
'<td><pre>', line, '</pre></td>',
'</tr>']
html += ['</table>']
html += ['</body></html>']
with open('builder_stats.html', 'w') as fd:
fd.write(''.join(html))
print('Profiling written at builder_stats.html')
atexit.register(dump_builder_stats)
| {
"content_hash": "1ab1c1c2e918db40906eb86a97301001",
"timestamp": "",
"source": "github",
"line_count": 1808,
"max_line_length": 80,
"avg_line_length": 33.55641592920354,
"alnum_prop": 0.5462996538651722,
"repo_name": "wangjun/kivy",
"id": "23a9e24ad7174fd838fc1873cab8332c899d35b7",
"size": "60670",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kivy/lang.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1372"
},
{
"name": "C",
"bytes": "152467"
},
{
"name": "Emacs Lisp",
"bytes": "9603"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "19384"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "Python",
"bytes": "2604470"
},
{
"name": "Shell",
"bytes": "4638"
},
{
"name": "VimL",
"bytes": "1123"
}
],
"symlink_target": ""
} |
""" Motion correction / motion correction with slice timing
Routines implementing motion correction and motion correction combined with
slice-timing.
See:
Roche, Alexis (2011) A four-dimensional registration algorithm with application
to joint correction of motion and slice timing in fMRI. *Medical Imaging, IEEE
Transactions on*; 30:1546--1554
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import warnings
import numpy as np
from nibabel import (Nifti1Image, io_orientation)
from nibabel.affines import apply_affine
from .externals.six import string_types
from .slicetiming import timefuncs
from .type_check import (check_type, check_type_and_shape)
from .optimizer import configure_optimizer, use_derivatives
from .affine import Rigid, Affine
from ._register import (_cspline_transform,
_cspline_sample3d,
_cspline_sample4d)
VERBOSE = os.environ.get('NIREG_DEBUG_PRINT', False)
INTERLEAVED = None
XTOL = 1e-5
FTOL = 1e-5
GTOL = 1e-5
STEPSIZE = 1e-6
SMALL = 1e-20
MAXITER = 64
MAXFUN = None
def interp_slice_times(Z, slice_times, tr):
Z = np.asarray(Z)
nslices = len(slice_times)
aux = np.asarray(list(slice_times) + [slice_times[0] + tr])
Zf = np.floor(Z).astype('int')
w = Z - Zf
Zal = Zf % nslices
Za = Zal + w
ret = (1 - w) * aux[Zal] + w * aux[Zal + 1]
ret += (Z - Za)
return ret
def scanner_coords(xyz, affine, from_world, to_world):
Tv = np.dot(from_world, np.dot(affine, to_world))
XYZ = apply_affine(Tv, xyz)
return XYZ[:, 0], XYZ[:, 1], XYZ[:, 2]
def make_grid(dims, subsampling=(1, 1, 1), borders=(0, 0, 0)):
slices = [slice(b, d - b, s)\
for d, s, b in zip(dims, subsampling, borders)]
xyz = np.mgrid[slices]
xyz = np.rollaxis(xyz, 0, 4)
xyz = np.reshape(xyz, [np.prod(xyz.shape[0:-1]), 3])
return xyz
def guess_slice_axis_and_direction(slice_info, affine):
if slice_info is None:
orient = io_orientation(affine)
slice_axis = int(np.where(orient[:, 0] == 2)[0])
slice_direction = int(orient[slice_axis, 1])
else:
slice_axis = int(slice_info[0])
slice_direction = int(slice_info[1])
return slice_axis, slice_direction
class Image4d(object):
"""
Class to represent a sequence of 3d scans (possibly acquired on a
slice-by-slice basis).
Object remains empty until the data array is actually loaded in memory.
Parameters
----------
data : nd array or proxy (function that actually gets the array)
"""
def __init__(self, data, affine, tr, slice_times, slice_info=None):
"""
Configure fMRI acquisition time parameters.
"""
self.affine = np.asarray(affine)
self.tr = float(tr)
# guess the slice axis and direction (z-axis)
self.slice_axis, self.slice_direction =\
guess_slice_axis_and_direction(slice_info, self.affine)
# unformatted parameters
self._slice_times = slice_times
if isinstance(data, np.ndarray):
self._data = data
self._shape = data.shape
self._get_data = None
self._init_timing_parameters()
else:
self._data = None
self._shape = None
self._get_data = data
def _load_data(self):
self._data = self._get_data()
self._shape = self._data.shape
self._init_timing_parameters()
def get_data(self):
if self._data is None:
self._load_data()
return self._data
def get_shape(self):
if self._shape is None:
self._load_data()
return self._shape
def _init_timing_parameters(self):
# Number of slices
nslices = self.get_shape()[self.slice_axis]
self.nslices = nslices
# Set slice times
if isinstance(self._slice_times, (int, float)):
# If a single value is provided, assume synchronous slices
self.slice_times = np.zeros(nslices)
self.slice_times.fill(self._slice_times)
else:
# Verify correctness of provided slice times
if not len(self._slice_times) == nslices:
raise ValueError(
"Incorrect slice times were provided. There are %d "
"slices in the volume, `slice_times` argument has length %d"
% (nslices, len(self._slice_times)))
self.slice_times = np.asarray(self._slice_times)
# Check that slice times are smaller than repetition time
if np.max(self.slice_times) > self.tr:
raise ValueError("slice times should be smaller than repetition time")
def z_to_slice(self, z):
"""
Account for the fact that slices may be stored in reverse
order wrt the scanner coordinate system convention (slice 0 ==
bottom of the head)
"""
if self.slice_direction < 0:
return self.nslices - 1 - z
else:
return z
def scanner_time(self, zv, t):
"""
tv = scanner_time(zv, t)
zv, tv are grid coordinates; t is an actual time value.
"""
corr = interp_slice_times(self.z_to_slice(zv),
self.slice_times,
self.tr)
return (t - corr) / self.tr
def free_data(self):
if self._get_data is not None:
self._data = None
class Realign4dAlgorithm(object):
def __init__(self,
im4d,
affine_class=Rigid,
transforms=None,
time_interp=True,
subsampling=(1, 1, 1),
refscan=0,
borders=(1, 1, 1),
optimizer='ncg',
optimize_template=True,
xtol=XTOL,
ftol=FTOL,
gtol=GTOL,
stepsize=STEPSIZE,
maxiter=MAXITER,
maxfun=MAXFUN):
# Check arguments
check_type_and_shape(subsampling, int, 3)
check_type(refscan, int, accept_none=True)
check_type_and_shape(borders, int, 3)
check_type(xtol, float)
check_type(ftol, float)
check_type(gtol, float)
check_type(stepsize, float)
check_type(maxiter, int)
check_type(maxfun, int, accept_none=True)
# Get dimensional parameters
self.dims = im4d.get_shape()
self.nscans = self.dims[3]
# Reduce borders if spatial image dimension too small to avoid
# getting an empty volume of interest
borders = [min(b, d/2 - (not d%2)) for (b, d) in zip(borders, self.dims[0:3])]
self.xyz = make_grid(self.dims[0:3], subsampling, borders)
masksize = self.xyz.shape[0]
self.data = np.zeros([masksize, self.nscans], dtype='double')
# Initialize space/time transformation parameters
self.affine = im4d.affine
self.inv_affine = np.linalg.inv(self.affine)
if transforms is None:
self.transforms = [affine_class() for scan in range(self.nscans)]
else:
self.transforms = transforms
# Compute the 4d cubic spline transform
self.time_interp = time_interp
if time_interp:
self.timestamps = im4d.tr * np.arange(self.nscans)
self.scanner_time = im4d.scanner_time
self.cbspline = _cspline_transform(im4d.get_data())
else:
self.cbspline = np.zeros(self.dims, dtype='double')
for t in range(self.dims[3]):
self.cbspline[:, :, :, t] =\
_cspline_transform(im4d.get_data()[:, :, :, t])
# The reference scan conventionally defines the head
# coordinate system
self.optimize_template = optimize_template
if not optimize_template and refscan is None:
self.refscan = 0
else:
self.refscan = refscan
# Set the minimization method
self.set_fmin(optimizer, stepsize,
xtol=xtol,
ftol=ftol,
gtol=gtol,
maxiter=maxiter,
maxfun=maxfun)
# Auxiliary array for realignment estimation
self._res = np.zeros(masksize, dtype='double')
self._res0 = np.zeros(masksize, dtype='double')
self._aux = np.zeros(masksize, dtype='double')
self.A = np.zeros((masksize, self.transforms[0].param.size),
dtype='double')
self._pc = None
def resample(self, t):
"""
Resample a particular time frame on the (sub-sampled) working
grid.
x,y,z,t are "head" grid coordinates
X,Y,Z,T are "scanner" grid coordinates
"""
X, Y, Z = scanner_coords(self.xyz, self.transforms[t].as_affine(),
self.inv_affine, self.affine)
if self.time_interp:
T = self.scanner_time(Z, self.timestamps[t])
_cspline_sample4d(self.data[:, t],
self.cbspline,
X, Y, Z, T,
mx='reflect',
my='reflect',
mz='reflect',
mt='reflect')
else:
_cspline_sample3d(self.data[:, t],
self.cbspline[:, :, :, t],
X, Y, Z,
mx='reflect',
my='reflect',
mz='reflect')
def resample_full_data(self):
if VERBOSE:
print('Gridding...')
xyz = make_grid(self.dims[0:3])
res = np.zeros(self.dims)
for t in range(self.nscans):
if VERBOSE:
print('Fully resampling scan %d/%d' % (t + 1, self.nscans))
X, Y, Z = scanner_coords(xyz, self.transforms[t].as_affine(),
self.inv_affine, self.affine)
if self.time_interp:
T = self.scanner_time(Z, self.timestamps[t])
_cspline_sample4d(res[:, :, :, t],
self.cbspline,
X, Y, Z, T,
mt='nearest')
else:
_cspline_sample3d(res[:, :, :, t],
self.cbspline[:, :, :, t],
X, Y, Z)
return res
def set_fmin(self, optimizer, stepsize, **kwargs):
"""
Return the minimization function
"""
self.stepsize = stepsize
self.optimizer = optimizer
self.optimizer_kwargs = kwargs
self.optimizer_kwargs.setdefault('xtol', XTOL)
self.optimizer_kwargs.setdefault('ftol', FTOL)
self.optimizer_kwargs.setdefault('gtol', GTOL)
self.optimizer_kwargs.setdefault('maxiter', MAXITER)
self.optimizer_kwargs.setdefault('maxfun', MAXFUN)
self.use_derivatives = use_derivatives(self.optimizer)
def init_instant_motion(self, t):
"""
Pre-compute and cache some constants (at fixed time) for
repeated computations of the alignment energy.
The idea is to decompose the average temporal variance via:
V = (n-1)/n V* + (n-1)/n^2 (x-m*)^2
with x the considered volume at time t, and m* the mean of all
resampled volumes but x. Only the second term is variable when
one volumes while the others are fixed. A similar
decomposition is used for the global variance, so we end up
with:
V/V0 = [nV* + (x-m*)^2] / [nV0* + (x-m0*)^2]
"""
fixed = list(range(self.nscans))
fixed.remove(t)
aux = self.data[:, fixed]
if self.optimize_template:
self.mu = np.mean(aux, 1)
self.offset = self.nscans * np.mean((aux.T - self.mu) ** 2)
self.mu0 = np.mean(aux)
self.offset0 = self.nscans * np.mean((aux - self.mu0) ** 2)
self._t = t
self._pc = None
def set_transform(self, t, pc):
self.transforms[t].param = pc
self.resample(t)
def _init_energy(self, pc):
if pc is self._pc:
return
self.set_transform(self._t, pc)
self._pc = pc
self._res[:] = self.data[:, self._t] - self.mu[:]
self._V = np.maximum(self.offset + np.mean(self._res ** 2), SMALL)
self._res0[:] = self.data[:, self._t] - self.mu0
self._V0 = np.maximum(self.offset0 + np.mean(self._res0 ** 2), SMALL)
if self.use_derivatives:
# linearize the data wrt the transform parameters
# use the auxiliary array to save the current resampled data
self._aux[:] = self.data[:, self._t]
basis = np.eye(6)
for j in range(pc.size):
self.set_transform(self._t, pc + self.stepsize * basis[j])
self.A[:, j] = (self.data[:, self._t] - self._aux)\
/ self.stepsize
self.transforms[self._t].param = pc
self.data[:, self._t] = self._aux[:]
# pre-compute gradient and hessian of numerator and
# denominator
c = 2 / float(self.data.shape[0])
self._dV = c * np.dot(self.A.T, self._res)
self._dV0 = c * np.dot(self.A.T, self._res0)
self._H = c * np.dot(self.A.T, self.A)
def _energy(self):
"""
The alignment energy is defined as the log-ratio between the
average temporal variance in the sequence and the global
spatio-temporal variance.
"""
return np.log(self._V / self._V0)
def _energy_gradient(self):
return self._dV / self._V - self._dV0 / self._V0
def _energy_hessian(self):
return (1 / self._V - 1 / self._V0) * self._H\
- np.dot(self._dV, self._dV.T) / np.maximum(self._V ** 2, SMALL)\
+ np.dot(self._dV0, self._dV0.T) / np.maximum(self._V0 ** 2, SMALL)
def estimate_instant_motion(self, t):
"""
Estimate motion parameters at a particular time.
"""
if VERBOSE:
print('Estimating motion at time frame %d/%d...'
% (t + 1, self.nscans))
def f(pc):
self._init_energy(pc)
return self._energy()
def fprime(pc):
self._init_energy(pc)
return self._energy_gradient()
def fhess(pc):
self._init_energy(pc)
return self._energy_hessian()
self.init_instant_motion(t)
fmin, args, kwargs =\
configure_optimizer(self.optimizer,
fprime=fprime,
fhess=fhess,
**self.optimizer_kwargs)
# With scipy >= 0.9, some scipy minimization functions like
# fmin_bfgs may crash due to the subroutine
# `scalar_search_armijo` returning None as a stepsize when
# unhappy about the objective function. This seems to have the
# potential to occur in groupwise registration when using
# strong image subsampling, i.e. at the coarser levels of the
# multiscale pyramid. To avoid crashes, we insert a try/catch
# instruction.
try:
pc = fmin(f, self.transforms[t].param, disp=VERBOSE,
*args, **kwargs)
self.set_transform(t, pc)
except:
warnings.warn('Minimization failed')
def estimate_motion(self):
"""
Optimize motion parameters for the whole sequence. All the
time frames are initially resampled according to the current
space/time transformation, the parameters of which are further
optimized sequentially.
"""
for t in range(self.nscans):
if VERBOSE:
print('Resampling scan %d/%d' % (t + 1, self.nscans))
self.resample(t)
# Set the template as the reference scan (will be overwritten
# if template is to be optimized)
if not hasattr(self, 'template'):
self.mu = self.data[:, self.refscan].copy()
for t in range(self.nscans):
self.estimate_instant_motion(t)
if VERBOSE:
print(self.transforms[t])
def align_to_refscan(self):
"""
The `motion_estimate` method aligns scans with an online
template so that spatial transforms map some average head
space to the scanner space. To conventionally redefine the
head space as being aligned with some reference scan, we need
to right compose each head_average-to-scanner transform with
the refscan's 'to head_average' transform.
"""
if self.refscan is None:
return
Tref_inv = self.transforms[self.refscan].inv()
for t in range(self.nscans):
self.transforms[t] = (self.transforms[t]).compose(Tref_inv)
def resample4d(im4d, transforms, time_interp=True):
"""
Resample a 4D image according to the specified sequence of spatial
transforms, using either 4D interpolation if `time_interp` is True
and 3D interpolation otherwise.
"""
r = Realign4dAlgorithm(im4d, transforms=transforms,
time_interp=time_interp)
res = r.resample_full_data()
im4d.free_data()
return res
def adjust_subsampling(speedup, dims):
dims = np.array(dims)
aux = np.maximum(speedup * dims / np.prod(dims) ** (1 / 3.), [1, 1, 1])
return aux.astype('int')
def single_run_realign4d(im4d,
affine_class=Rigid,
time_interp=True,
loops=5,
speedup=5,
refscan=0,
borders=(1, 1, 1),
optimizer='ncg',
xtol=XTOL,
ftol=FTOL,
gtol=GTOL,
stepsize=STEPSIZE,
maxiter=MAXITER,
maxfun=MAXFUN):
"""
Realign a single run in space and time.
Parameters
----------
im4d : Image4d instance
speedup : int or sequence
If a sequence, implement a multi-scale realignment
"""
if not type(loops) in (list, tuple, np.array):
loops = [loops]
repeats = len(loops)
def format_arg(x):
if not type(x) in (list, tuple, np.array):
x = [x for i in range(repeats)]
else:
if not len(x) == repeats:
raise ValueError('inconsistent length in arguments')
return x
speedup = format_arg(speedup)
optimizer = format_arg(optimizer)
xtol = format_arg(xtol)
ftol = format_arg(ftol)
gtol = format_arg(gtol)
stepsize = format_arg(stepsize)
maxiter = format_arg(maxiter)
maxfun = format_arg(maxfun)
transforms = None
opt_params = zip(loops, speedup, optimizer,
xtol, ftol, gtol,
stepsize, maxiter, maxfun)
for loops_, speedup_, optimizer_, xtol_, ftol_, gtol_,\
stepsize_, maxiter_, maxfun_ in opt_params:
subsampling = adjust_subsampling(speedup_, im4d.get_shape()[0:3])
r = Realign4dAlgorithm(im4d,
transforms=transforms,
affine_class=affine_class,
time_interp=time_interp,
subsampling=subsampling,
refscan=refscan,
borders=borders,
optimizer=optimizer_,
xtol=xtol_,
ftol=ftol_,
gtol=gtol_,
stepsize=stepsize_,
maxiter=maxiter_,
maxfun=maxfun_)
for loop in range(loops_):
r.estimate_motion()
r.align_to_refscan()
transforms = r.transforms
im4d.free_data()
return transforms
def realign4d(runs,
affine_class=Rigid,
time_interp=True,
align_runs=True,
loops=5,
between_loops=5,
speedup=5,
refscan=0,
borders=(1, 1, 1),
optimizer='ncg',
xtol=XTOL,
ftol=FTOL,
gtol=GTOL,
stepsize=STEPSIZE,
maxiter=MAXITER,
maxfun=MAXFUN):
"""
Parameters
----------
runs : list of Image4d objects
Returns
-------
transforms : list
nested list of rigid transformations
transforms map an 'ideal' 4d grid (conventionally aligned with the
first scan of the first run) to the 'acquisition' 4d grid for each
run
"""
# Single-session case
if not type(runs) in (list, tuple, np.array):
runs = [runs]
nruns = len(runs)
if nruns == 1:
align_runs = False
# Correct motion and slice timing in each sequence separately
transforms = [single_run_realign4d(run,
affine_class=affine_class,
time_interp=time_interp,
loops=loops,
speedup=speedup,
refscan=refscan,
borders=borders,
optimizer=optimizer,
xtol=xtol,
ftol=ftol,
gtol=gtol,
stepsize=stepsize,
maxiter=maxiter,
maxfun=maxfun) for run in runs]
if not align_runs:
return transforms, transforms, None
# Correct between-session motion using the mean image of each
# corrected run, and creating a fake time series with no temporal
# smoothness. If the runs have different affines, a correction is
# applied to the transforms associated with each run (except for
# the first run) so that all images included in the fake series
# have the same affine, namely that of the first run.
is_same_affine = lambda a1, a2: np.max(np.abs(a1 - a2)) < 1e-5
mean_img_shape = list(runs[0].get_shape()[0:3]) + [nruns]
mean_img_data = np.zeros(mean_img_shape)
for i in range(nruns):
if is_same_affine(runs[0].affine, runs[i].affine):
transforms_i = transforms[i]
else:
runs[i].affine = runs[0].affine
aff_corr = Affine(np.dot(runs[0].affine,
np.linalg.inv(runs[i].affine)))
transforms_i = [aff_corr.compose(Affine(t.as_affine()))\
for t in transforms[i]]
corr_run = resample4d(runs[i], transforms=transforms_i,
time_interp=time_interp)
mean_img_data[..., i] = corr_run.mean(3)
del corr_run
mean_img = Image4d(mean_img_data, affine=runs[0].affine,
tr=1.0, slice_times=0)
transfo_mean = single_run_realign4d(mean_img,
affine_class=affine_class,
time_interp=False,
loops=between_loops,
speedup=speedup,
borders=borders,
optimizer=optimizer,
xtol=xtol,
ftol=ftol,
gtol=gtol,
stepsize=stepsize,
maxiter=maxiter,
maxfun=maxfun)
# Compose transformations for each run
ctransforms = [None for i in range(nruns)]
for i in range(nruns):
ctransforms[i] = [t.compose(transfo_mean[i]) for t in transforms[i]]
return ctransforms, transforms, transfo_mean
class Realign4d(object):
def __init__(self, images, tr, slice_times=None, slice_info=None,
affine_class=Rigid):
"""
Spatiotemporal realignment class for series of 3D images.
The algorithm performs simultaneous motion and slice timing
correction for fMRI series or other data where slices are not
acquired simultaneously.
Parameters
----------
images : image or list of images
Single or multiple input 4d images representing one or
several sessions.
tr : float
Inter-scan repetition time, i.e. the time elapsed between
two consecutive scans. The unit in which `tr` is given is
arbitrary although it needs to be consistent with the
`slice_times` argument.
slice_times : None or array-like
If None, slices are assumed to be acquired simultaneously
hence no slice timing correction is performed. If
array-like, then the slice acquisition times.
slice_info : None or tuple, optional
None, or a tuple with slice axis as the first element and
direction as the second, for instance (2, 1). If None, then
guess the slice axis, and direction, as the closest to the z
axis, as estimated from the affine.
"""
self._init(images, tr, slice_times, slice_info, affine_class)
def _init(self, images, tr, slice_times, slice_info, affine_class):
"""
Generic initialization method.
"""
if slice_times is None:
tr = 1.0
slice_times = 0.0
time_interp = False
else:
time_interp = True
self.slice_times = slice_times
self.tr = tr
if tr is None:
raise ValueError('Repetition time cannot be None')
if not isinstance(images, (list, tuple, np.ndarray)):
images = [images]
self._runs = []
self.affine_class = affine_class
# Note that, the affine of each run may be different. This is
# the case, for instance, if the subject exits the scanner
# inbetween sessions.
for im in images:
self._runs.append(Image4d(im.get_data,
im.get_affine(),
tr,
slice_times=slice_times,
slice_info=slice_info))
self._transforms = [None for run in self._runs]
self._within_run_transforms = [None for run in self._runs]
self._mean_transforms = [None for run in self._runs]
self._time_interp = time_interp
def estimate(self,
loops=5,
between_loops=None,
align_runs=True,
speedup=5,
refscan=0,
borders=(1, 1, 1),
optimizer='ncg',
xtol=XTOL,
ftol=FTOL,
gtol=GTOL,
stepsize=STEPSIZE,
maxiter=MAXITER,
maxfun=MAXFUN):
"""Estimate motion parameters.
Parameters
----------
loops : int or sequence of ints
Determines the number of iterations performed to realign
scans within each run for each pass defined by the
``speedup`` argument. For instance, setting ``speedup`` ==
(5,2) and ``loops`` == (5,1) means that 5 iterations are
performed in a first pass where scans are subsampled by an
isotropic factor 5, followed by one iteration where scans
are subsampled by a factor 2.
between_loops : None, int or sequence of ints
Similar to ``loops`` for between-run motion
estimation. Determines the number of iterations used to
realign scans across runs, a procedure similar to
within-run realignment that uses the mean images from each
run. If None, assumed to be the same as ``loops``.
The setting used in the experiments described in Roche,
IEEE TMI 2011, was: ``speedup`` = (5, 2), ``loops`` = (5,
1) and ``between_loops`` = (5, 1).
align_runs : bool
Determines whether between-run motion is estimated or
not. If False, the ``between_loops`` argument is ignored.
speedup: int or sequence of ints
Determines an isotropic sub-sampling factor, or a sequence
of such factors, applied to the scans to perform motion
estimation. If a sequence, several estimation passes are
applied.
refscan : None or int
Defines the number of the scan used as the reference
coordinate system for each run. If None, a reference
coordinate system is defined internally that does not
correspond to any particular scan. Note that the
coordinate system associated with the first run is always
borders : sequence of ints
Should be of length 3. Determines the field of view for
motion estimation in terms of the number of slices at each
extremity of the reference grid that are ignored for
motion parameter estimation. For instance,
``borders``==(1,1,1) means that the realignment cost
function will not take into account voxels located in the
first and last axial/sagittal/coronal slices in the
reference grid. Please note that this choice only affects
parameter estimation but does not affect image resampling
in any way, see ``resample`` method.
optimizer : str
Defines the optimization method. One of 'simplex',
'powell', 'cg', 'ncg', 'bfgs' and 'steepest'.
xtol : float
Tolerance on variations of transformation parameters to
test numerical convergence.
ftol : float
Tolerance on variations of the intensity comparison metric
to test numerical convergence.
gtol : float
Tolerance on the gradient of the intensity comparison
metric to test numerical convergence. Applicable to
optimizers 'cg', 'ncg', 'bfgs' and 'steepest'.
stepsize : float
Step size to approximate the gradient and Hessian of the
intensity comparison metric w.r.t. transformation
parameters. Applicable to optimizers 'cg', 'ncg', 'bfgs'
and 'steepest'.
maxiter : int
Maximum number of iterations in optimization.
maxfun : int
Maximum number of function evaluations in maxfun.
"""
if between_loops is None:
between_loops = loops
t = realign4d(self._runs,
affine_class=self.affine_class,
time_interp=self._time_interp,
align_runs=align_runs,
loops=loops,
between_loops=between_loops,
speedup=speedup,
refscan=refscan,
borders=borders,
optimizer=optimizer,
xtol=xtol,
ftol=ftol,
gtol=gtol,
stepsize=stepsize,
maxiter=maxiter,
maxfun=maxfun)
self._transforms, self._within_run_transforms,\
self._mean_transforms = t
def resample(self, r=None, align_runs=True):
"""
Return the resampled run number r as a 4d nibabel-like
image. Returns all runs as a list of images if r == None.
"""
if align_runs:
transforms = self._transforms
else:
transforms = self._within_run_transforms
runs = range(len(self._runs))
if r is None:
data = [resample4d(self._runs[r], transforms=transforms[r],
time_interp=self._time_interp) for r in runs]
return [Nifti1Image(data[r], self._runs[r].affine)
for r in runs]
else:
data = resample4d(self._runs[r], transforms=transforms[r],
time_interp=self._time_interp)
return Nifti1Image(data, self._runs[r].affine)
class SpaceTimeRealign(Realign4d):
def __init__(self, images, tr, slice_times, slice_info,
affine_class=Rigid):
""" Spatiotemporal realignment class for fMRI series.
This class gives a high-level interface to :class:`Realign4d`
Parameters
----------
images : image or list of images
Single or multiple input 4d images representing one or several fMRI
runs.
tr : float
Inter-scan repetition time in seconds, i.e. the time elapsed between
two consecutive scans.
slice_times : str or callable or array-like
If str, one of the function names in ``SLICETIME_FUNCTIONS``
dictionary from :mod:`register.slicetiming.timefuncs`. If
callable, a function taking two parameters: ``n_slices`` and ``tr``
(number of slices in the images, inter-scan repetition time in
seconds). This function returns a vector of times of slice
acquisition $t_i$ for each slice $i$ in the volumes. See
:mod:`register.slicetiming.timefuncs` for a collection of
functions for common slice acquisition schemes. If array-like, then
should be a slice time vector as above.
slice_info : int or length 2 sequence
If int, the axis in `images` that is the slice axis. In a 4D image,
this will often be axis = 2. If a 2 sequence, then elements are
``(slice_axis, slice_direction)``, where ``slice_axis`` is the slice
axis in the image as above, and ``slice_direction`` is 1 if the
slices were acquired slice 0 first, slice -1 last, or -1 if acquired
slice -1 first, slice 0 last. If `slice_info` is an int, assume
``slice_direction`` == 1.
affine_class : ``Affine`` class, optional
transformation class to use to calculate transformations between
the volumes. Default is :class:``Rigid``
"""
if slice_times == None:
raise ValueError("slice_times must be set for space/time "
"registration; use SpaceRealign for space-only "
"registration")
if slice_info == None:
raise ValueError("slice_info cannot be None")
try:
len(slice_info)
except TypeError:
# Presumably an int
slice_axis = slice_info
slice_info = (slice_axis, 1)
else: # sequence
slice_axis, slice_direction = slice_info
if type(images) in (list, tuple):
n_slices = images[0].shape[slice_axis]
else:
n_slices = images.shape[slice_axis]
if isinstance(slice_times, string_types):
slice_times = timefuncs.SLICETIME_FUNCTIONS[slice_times]
if hasattr(slice_times, '__call__'):
slice_times = slice_times(n_slices, tr)
self._init(images, tr, slice_times, slice_info, affine_class)
class SpaceRealign(Realign4d):
def __init__(self, images, affine_class=Rigid):
""" Spatial registration of time series with no time interpolation
Parameters
----------
images : image or list of images
Single or multiple input 4d images representing one or several fMRI
runs.
affine_class : ``Affine`` class, optional
transformation class to use to calculate transformations between
the volumes. Default is :class:``Rigid``
"""
self._init(images, 1., None, None, affine_class)
| {
"content_hash": "3ae0d05e126664fdc3e14b5bb489be34",
"timestamp": "",
"source": "github",
"line_count": 956,
"max_line_length": 86,
"avg_line_length": 38.29916317991632,
"alnum_prop": 0.5403124487900803,
"repo_name": "nipy/nireg",
"id": "95ace5fc17d931196c24c0a54488d583cf412559",
"size": "36728",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nireg/groupwise_registration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "38030"
},
{
"name": "Python",
"bytes": "467407"
}
],
"symlink_target": ""
} |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Fonctions très utiles par Yves Stadler. Optimisations par Michaël Coquard.
Remerciements spéciaux à D. Kratsch pour les cours d'algo pour le tri
fusion :)."""
factCache = [1]
def fact(i) :
""" Calcul de factorielle avec mise en cache des résultats. """
n = len(factCache)
if i >= n:
while n <= i:
factCache.append(factCache[n-1]*n)
n += 1
return factCache[i]
def unrank(x,l):
""" Calcule la xième permutation de la liste l en tenant compte de l'ordre
lexicographique des permutations. x == 0 correspond à la première
permutations, c'est à dire la liste triée en ordre lexicographique.
La liste l est supposée triée. """
m = []
n = len(l)
f = fact(n)
x = x % f
while l:
f /= n
q,r = divmod(x,f)
m.append(l.pop(q))
n -= 1
x = r
return m
def rank(l):
""" Fonction inverse de 'rank'. Cette fonction trouve à quelle indice
se situe la permutation donnée en paramètre. L'ordre lexicographique des
objets de la liste est utilisé pour retrouvé l'indice. """
n = len(l)
if n == 1 or n == 0:
return 0 #une seule permutation possible !
# REM : ici, on doit effectuer un tri sur la liste car on ne dispose pas des
# indices absolus des éléments (on doit donc les comparer)
# On peut peut être réduire le temps d'exécution qui est de O(n*n) en
# utilisant un autre tri mais cela semble difficile (même avec un tri
# récursif de type tri-fusion puisqu'il faut tenir compte du cout de
# permutation pour calculer le rang)
m = [l.pop()]
r = 0
n = 1
# cout d'une permutation au départ
cost = 1
while l:
e = l.pop()
i = 0
while (i < len(m)) and (m[i] < e):
i += 1
r += cost
# élément en fin de liste
if i == len(m):
m.append(e)
# insertion
elif m[i] >= e:
m.insert(i,e)
n += 1
cost *= n
return r
def intToBinaryList(n,m):
""" Converti un entier n en sa représentation binaire sous forme de liste.
le bit de poids faible se trouve en fin de liste. m précise le nombre
de bits qui doivent être récupérés du nombre n (en partant du bit de
poids faible). """
l = []
mask = 1 << (m - 1)
while mask:
if n & mask:
l.append(1)
else:
l.append(0)
mask >>= 1
return l
def XMLTagExtract(s):
""" Parse une balise XML et retourne un triplet contenant dans l'ordre :
- la liste des attributs de la balise sous forme de chaîne
- la marque de début de balise (ex : '<html')
- la marque de fin de balise.
REM : on suppose que la balise est une balise xml valide. """
SP = ['\n','\r','\t',' ']
intag = False
insep = False
inval1 = False
inval2 = False
current = ""
start = ""
end = ""
l = []
for i in s:
if inval1:
current += i
if i == "'":
intag = False
inval1 = False
inval2 = False
l.append(current)
current = ""
elif inval2:
current += i
if i == '"':
intag = False
inval1 = False
inval2 = False
l.append(current)
current = ""
elif intag:
current += i
if i == "'":
inval1 = True
if i == '"':
inval2 = True
elif insep:
if not (i in SP):
current += i
intag = True
elif i in SP:
insep = True
start = current
current = ""
else:
current += i
end = current
return (l,start,end)
def intToHex(n,upper=False):
""" Converti un entier sa représentation hexadécimale.
le paramètre upper permet de spécifier si la représentation
sera en majuscules (True) ou en minuscules (False) """
if upper:
num = "0123456789ABCDEF"
else:
num = "0123456789abcdef"
result = ""
while n != 0:
result = num[n & 0xf] + result
n >>= 4
return result
if __name__=='__main__':
print "Tools module by Yves Stadler and Michael Coquard" | {
"content_hash": "d486586555f728cdc9915747418b2f88",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 80,
"avg_line_length": 30.098039215686274,
"alnum_prop": 0.5061889250814332,
"repo_name": "mvy/Demaratus-Framework",
"id": "728bc0372d93beede5fb00a3c7259fe5f21692a9",
"size": "4647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "legacy/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90747"
}
],
"symlink_target": ""
} |
'''
A brute force solution to the annoying Star Trek logic problem:
http://io9.com/can-you-solve-this-extremely-difficult-star-trek-puzzle-1667991339
by John Bohannon
14 December 2014
'''
from itertools import permutations
class Person():
def __init__(self, name):
self.name = name
self.fizz = self.tri = 0
self.hero = self.fear = self
crew_names = ['G', 'P', 'T', 'W', 'D', 'R']
crew = {i:Person(name=i) for i in crew_names}
def solve():
progress = 0
problem_size = len([i for i in permutations(crew_names)])
# start with Fizzbin rankings
for fizz_list in permutations(crew_names):
# keep track of progress
progress += 1
if (problem_size - progress) % 10 == 0:
print problem_size - progress
# for each Fizzbin ranking permutation, check all Tri-Chess ranking permutations
for tri_list in permutations(crew_names):
# let's start by testing the game-only conditions
if not all([
# 1. Geordi ranks 2 at Tri-D Chess
tri_list.index('G') == 1,
# 2. Picard ranks two positions behind Troi at Fizzbin
fizz_list.index('T') - fizz_list.index('P') == 2,
# 8. The person who is worst at Fizzbin is better than Troi at Tri-D Chess
tri_list.index(fizz_list[0]) > tri_list.index('T'),
# 9. The person ranked number 3 at Tri-D Chess is ranked 4 positions higher than Data at Fizzbin
fizz_list.index(tri_list[2]) - fizz_list.index('D') == 4,
# 11. Riker is ranked 2 lower at Tri-D Chess than the crew member ranked 2 at Fizzbin
tri_list.index(fizz_list[1]) - tri_list.index('R') == 2
]):
continue
# those conditions are satisfied, so load the rankings into each crew person
for n, fizz_player in enumerate(fizz_list):
crew[fizz_player].fizz = n + 1
for n, tri_player in enumerate(tri_list):
crew[tri_player].tri = n + 1
# build a valid hero graph
for hero_list in permutations(crew_names):
# make sure nobody is their own hero
if any([crew_names[n] == i for n, i in enumerate(hero_list)]):
continue
# load the hero into each crew person
for n, i in enumerate(hero_list):
crew[crew_names[n]].hero = crew[i]
# now that we have heroes, let's do fears
for fear_list in permutations(crew_names):
# make sure they don't fear themselves
if any([crew_names[n] == i for n, i in enumerate(fear_list)]):
continue
# and make sure no one fears their hero
if any([hero_list[n] == i for n, i in enumerate(fear_list)]):
continue
# load fear into each crew person
for n, i in enumerate(fear_list):
crew[crew_names[n]].fear = crew[i]
# finally, let's test the relationship conditions
if all([
# 3. Troi is feared by the person Geordi fears
crew['T'] == crew['G'].fear.fear,
# 4. Worf's hero ranks 3 times lower at Tri-D Chess than the crew member who is best at Fizzbin
3 * crew['W'].hero.tri == crew[fizz_list[-1]].tri,
# 5. Picard's hero fears Geordi
crew['P'].hero.fear == crew['G'],
# 6. Data's hero is not Geordi
crew['D'].hero != crew['G'],
# 7. Data is the hero of Riker's hero
crew['R'].hero.hero == crew['D'],
# 10. Riker is feared by the person Picard fears and is the hero of Worf's hero
crew['R'] == crew['P'].fear.fear == crew['W'].hero.hero
]):
return True
return False
solved = solve()
if solved == True:
print 'person: fizz, tri, hero, fears'
for c in crew_names:
print c + ':', crew[c].fizz, crew[c].tri, crew[c].hero.name, crew[c].fear.name
else:
print "Highly illogical, Captain. There is no solution. Not in this generation, nor the next."
| {
"content_hash": "4adeaad505a68b85a1965d3e1dd6ea53",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 102,
"avg_line_length": 36.43,
"alnum_prop": 0.6426022508921219,
"repo_name": "gonzolabs/star_trek",
"id": "fd6d65981e09b15d6d43bae8b0f180018f88f633",
"size": "3666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "star_trek.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3666"
}
],
"symlink_target": ""
} |
"""Module to help with parsing and generating configuration files."""
from collections import OrderedDict
import logging
import os
from pathlib import Path
import re
import shutil
from types import ModuleType
from typing import Any, Callable, Dict, Optional, Sequence, Set, Tuple, Union
from awesomeversion import AwesomeVersion
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant import auth
from homeassistant.auth import (
mfa_modules as auth_mfa_modules,
providers as auth_providers,
)
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_FRIENDLY_NAME,
ATTR_HIDDEN,
CONF_ALLOWLIST_EXTERNAL_DIRS,
CONF_ALLOWLIST_EXTERNAL_URLS,
CONF_AUTH_MFA_MODULES,
CONF_AUTH_PROVIDERS,
CONF_CUSTOMIZE,
CONF_CUSTOMIZE_DOMAIN,
CONF_CUSTOMIZE_GLOB,
CONF_ELEVATION,
CONF_EXTERNAL_URL,
CONF_ID,
CONF_INTERNAL_URL,
CONF_LATITUDE,
CONF_LEGACY_TEMPLATES,
CONF_LONGITUDE,
CONF_MEDIA_DIRS,
CONF_NAME,
CONF_PACKAGES,
CONF_TEMPERATURE_UNIT,
CONF_TIME_ZONE,
CONF_TYPE,
CONF_UNIT_SYSTEM,
CONF_UNIT_SYSTEM_IMPERIAL,
LEGACY_CONF_WHITELIST_EXTERNAL_DIRS,
TEMP_CELSIUS,
__version__,
)
from homeassistant.core import DOMAIN as CONF_CORE, SOURCE_YAML, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, extract_domain_configs
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_values import EntityValues
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import Integration, IntegrationNotFound
from homeassistant.requirements import (
RequirementsNotFound,
async_get_integration_with_requirements,
)
from homeassistant.util.package import is_docker_env
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM
from homeassistant.util.yaml import SECRET_YAML, Secrets, load_yaml
_LOGGER = logging.getLogger(__name__)
DATA_PERSISTENT_ERRORS = "bootstrap_persistent_errors"
RE_YAML_ERROR = re.compile(r"homeassistant\.util\.yaml")
RE_ASCII = re.compile(r"\033\[[^m]*m")
YAML_CONFIG_FILE = "configuration.yaml"
VERSION_FILE = ".HA_VERSION"
CONFIG_DIR_NAME = ".homeassistant"
DATA_CUSTOMIZE = "hass_customize"
GROUP_CONFIG_PATH = "groups.yaml"
AUTOMATION_CONFIG_PATH = "automations.yaml"
SCRIPT_CONFIG_PATH = "scripts.yaml"
SCENE_CONFIG_PATH = "scenes.yaml"
LOAD_EXCEPTIONS = (ImportError, FileNotFoundError)
INTEGRATION_LOAD_EXCEPTIONS = (
IntegrationNotFound,
RequirementsNotFound,
*LOAD_EXCEPTIONS,
)
DEFAULT_CONFIG = f"""
# Configure a default setup of Home Assistant (frontend, api, etc)
default_config:
# Text to speech
tts:
- platform: google_translate
group: !include {GROUP_CONFIG_PATH}
automation: !include {AUTOMATION_CONFIG_PATH}
script: !include {SCRIPT_CONFIG_PATH}
scene: !include {SCENE_CONFIG_PATH}
"""
DEFAULT_SECRETS = """
# Use this file to store secrets like usernames and passwords.
# Learn more at https://www.home-assistant.io/docs/configuration/secrets/
some_password: welcome
"""
TTS_PRE_92 = """
tts:
- platform: google
"""
TTS_92 = """
tts:
- platform: google_translate
service_name: google_say
"""
def _no_duplicate_auth_provider(
configs: Sequence[Dict[str, Any]]
) -> Sequence[Dict[str, Any]]:
"""No duplicate auth provider config allowed in a list.
Each type of auth provider can only have one config without optional id.
Unique id is required if same type of auth provider used multiple times.
"""
config_keys: Set[Tuple[str, Optional[str]]] = set()
for config in configs:
key = (config[CONF_TYPE], config.get(CONF_ID))
if key in config_keys:
raise vol.Invalid(
f"Duplicate auth provider {config[CONF_TYPE]} found. "
"Please add unique IDs "
"if you want to have the same auth provider twice"
)
config_keys.add(key)
return configs
def _no_duplicate_auth_mfa_module(
configs: Sequence[Dict[str, Any]]
) -> Sequence[Dict[str, Any]]:
"""No duplicate auth mfa module item allowed in a list.
Each type of mfa module can only have one config without optional id.
A global unique id is required if same type of mfa module used multiple
times.
Note: this is different than auth provider
"""
config_keys: Set[str] = set()
for config in configs:
key = config.get(CONF_ID, config[CONF_TYPE])
if key in config_keys:
raise vol.Invalid(
f"Duplicate mfa module {config[CONF_TYPE]} found. "
"Please add unique IDs "
"if you want to have the same mfa module twice"
)
config_keys.add(key)
return configs
PACKAGES_CONFIG_SCHEMA = cv.schema_with_slug_keys( # Package names are slugs
vol.Schema({cv.string: vol.Any(dict, list, None)}) # Component config
)
CUSTOMIZE_DICT_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_HIDDEN): cv.boolean,
vol.Optional(ATTR_ASSUMED_STATE): cv.boolean,
},
extra=vol.ALLOW_EXTRA,
)
CUSTOMIZE_CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(CONF_CUSTOMIZE, default={}): vol.Schema(
{cv.entity_id: CUSTOMIZE_DICT_SCHEMA}
),
vol.Optional(CONF_CUSTOMIZE_DOMAIN, default={}): vol.Schema(
{cv.string: CUSTOMIZE_DICT_SCHEMA}
),
vol.Optional(CONF_CUSTOMIZE_GLOB, default={}): vol.Schema(
{cv.string: CUSTOMIZE_DICT_SCHEMA}
),
}
)
CORE_CONFIG_SCHEMA = CUSTOMIZE_CONFIG_SCHEMA.extend(
{
CONF_NAME: vol.Coerce(str),
CONF_LATITUDE: cv.latitude,
CONF_LONGITUDE: cv.longitude,
CONF_ELEVATION: vol.Coerce(int),
vol.Optional(CONF_TEMPERATURE_UNIT): cv.temperature_unit,
CONF_UNIT_SYSTEM: cv.unit_system,
CONF_TIME_ZONE: cv.time_zone,
vol.Optional(CONF_INTERNAL_URL): cv.url,
vol.Optional(CONF_EXTERNAL_URL): cv.url,
vol.Optional(CONF_ALLOWLIST_EXTERNAL_DIRS): vol.All(
cv.ensure_list, [vol.IsDir()] # pylint: disable=no-value-for-parameter
),
vol.Optional(LEGACY_CONF_WHITELIST_EXTERNAL_DIRS): vol.All(
cv.ensure_list, [vol.IsDir()] # pylint: disable=no-value-for-parameter
),
vol.Optional(CONF_ALLOWLIST_EXTERNAL_URLS): vol.All(cv.ensure_list, [cv.url]),
vol.Optional(CONF_PACKAGES, default={}): PACKAGES_CONFIG_SCHEMA,
vol.Optional(CONF_AUTH_PROVIDERS): vol.All(
cv.ensure_list,
[
auth_providers.AUTH_PROVIDER_SCHEMA.extend(
{
CONF_TYPE: vol.NotIn(
["insecure_example"],
"The insecure_example auth provider"
" is for testing only.",
)
}
)
],
_no_duplicate_auth_provider,
),
vol.Optional(CONF_AUTH_MFA_MODULES): vol.All(
cv.ensure_list,
[
auth_mfa_modules.MULTI_FACTOR_AUTH_MODULE_SCHEMA.extend(
{
CONF_TYPE: vol.NotIn(
["insecure_example"],
"The insecure_example mfa module is for testing only.",
)
}
)
],
_no_duplicate_auth_mfa_module,
),
# pylint: disable=no-value-for-parameter
vol.Optional(CONF_MEDIA_DIRS): cv.schema_with_slug_keys(vol.IsDir()),
vol.Optional(CONF_LEGACY_TEMPLATES): cv.boolean,
}
)
def get_default_config_dir() -> str:
"""Put together the default configuration directory based on the OS."""
data_dir = os.getenv("APPDATA") if os.name == "nt" else os.path.expanduser("~")
return os.path.join(data_dir, CONFIG_DIR_NAME) # type: ignore
async def async_ensure_config_exists(hass: HomeAssistant) -> bool:
"""Ensure a configuration file exists in given configuration directory.
Creating a default one if needed.
Return boolean if configuration dir is ready to go.
"""
config_path = hass.config.path(YAML_CONFIG_FILE)
if os.path.isfile(config_path):
return True
print(
"Unable to find configuration. Creating default one in", hass.config.config_dir
)
return await async_create_default_config(hass)
async def async_create_default_config(hass: HomeAssistant) -> bool:
"""Create a default configuration file in given configuration directory.
Return if creation was successful.
"""
return await hass.async_add_executor_job(
_write_default_config, hass.config.config_dir
)
def _write_default_config(config_dir: str) -> bool:
"""Write the default config."""
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
secret_path = os.path.join(config_dir, SECRET_YAML)
version_path = os.path.join(config_dir, VERSION_FILE)
group_yaml_path = os.path.join(config_dir, GROUP_CONFIG_PATH)
automation_yaml_path = os.path.join(config_dir, AUTOMATION_CONFIG_PATH)
script_yaml_path = os.path.join(config_dir, SCRIPT_CONFIG_PATH)
scene_yaml_path = os.path.join(config_dir, SCENE_CONFIG_PATH)
# Writing files with YAML does not create the most human readable results
# So we're hard coding a YAML template.
try:
with open(config_path, "wt") as config_file:
config_file.write(DEFAULT_CONFIG)
with open(secret_path, "wt") as secret_file:
secret_file.write(DEFAULT_SECRETS)
with open(version_path, "wt") as version_file:
version_file.write(__version__)
with open(group_yaml_path, "wt"):
pass
with open(automation_yaml_path, "wt") as fil:
fil.write("[]")
with open(script_yaml_path, "wt"):
pass
with open(scene_yaml_path, "wt"):
pass
return True
except OSError:
print("Unable to create default configuration file", config_path)
return False
async def async_hass_config_yaml(hass: HomeAssistant) -> Dict:
"""Load YAML from a Home Assistant configuration file.
This function allow a component inside the asyncio loop to reload its
configuration by itself. Include package merge.
"""
if hass.config.config_dir is None:
secrets = None
else:
secrets = Secrets(Path(hass.config.config_dir))
# Not using async_add_executor_job because this is an internal method.
config = await hass.loop.run_in_executor(
None,
load_yaml_config_file,
hass.config.path(YAML_CONFIG_FILE),
secrets,
)
core_config = config.get(CONF_CORE, {})
await merge_packages_config(hass, config, core_config.get(CONF_PACKAGES, {}))
return config
def load_yaml_config_file(
config_path: str, secrets: Optional[Secrets] = None
) -> Dict[Any, Any]:
"""Parse a YAML configuration file.
Raises FileNotFoundError or HomeAssistantError.
This method needs to run in an executor.
"""
conf_dict = load_yaml(config_path, secrets)
if not isinstance(conf_dict, dict):
msg = (
f"The configuration file {os.path.basename(config_path)} "
"does not contain a dictionary"
)
_LOGGER.error(msg)
raise HomeAssistantError(msg)
# Convert values to dictionaries if they are None
for key, value in conf_dict.items():
conf_dict[key] = value or {}
return conf_dict
def process_ha_config_upgrade(hass: HomeAssistant) -> None:
"""Upgrade configuration if necessary.
This method needs to run in an executor.
"""
version_path = hass.config.path(VERSION_FILE)
try:
with open(version_path) as inp:
conf_version = inp.readline().strip()
except FileNotFoundError:
# Last version to not have this file
conf_version = "0.7.7"
if conf_version == __version__:
return
_LOGGER.info(
"Upgrading configuration directory from %s to %s", conf_version, __version__
)
version_obj = AwesomeVersion(conf_version)
if version_obj < AwesomeVersion("0.50"):
# 0.50 introduced persistent deps dir.
lib_path = hass.config.path("deps")
if os.path.isdir(lib_path):
shutil.rmtree(lib_path)
if version_obj < AwesomeVersion("0.92"):
# 0.92 moved google/tts.py to google_translate/tts.py
config_path = hass.config.path(YAML_CONFIG_FILE)
with open(config_path, encoding="utf-8") as config_file:
config_raw = config_file.read()
if TTS_PRE_92 in config_raw:
_LOGGER.info("Migrating google tts to google_translate tts")
config_raw = config_raw.replace(TTS_PRE_92, TTS_92)
try:
with open(config_path, "wt", encoding="utf-8") as config_file:
config_file.write(config_raw)
except OSError:
_LOGGER.exception("Migrating to google_translate tts failed")
if version_obj < AwesomeVersion("0.94") and is_docker_env():
# In 0.94 we no longer install packages inside the deps folder when
# running inside a Docker container.
lib_path = hass.config.path("deps")
if os.path.isdir(lib_path):
shutil.rmtree(lib_path)
with open(version_path, "wt") as outp:
outp.write(__version__)
@callback
def async_log_exception(
ex: Exception,
domain: str,
config: Dict,
hass: HomeAssistant,
link: Optional[str] = None,
) -> None:
"""Log an error for configuration validation.
This method must be run in the event loop.
"""
if hass is not None:
async_notify_setup_error(hass, domain, link)
message, is_friendly = _format_config_error(ex, domain, config, link)
_LOGGER.error(message, exc_info=not is_friendly and ex)
@callback
def _format_config_error(
ex: Exception, domain: str, config: Dict, link: Optional[str] = None
) -> Tuple[str, bool]:
"""Generate log exception for configuration validation.
This method must be run in the event loop.
"""
is_friendly = False
message = f"Invalid config for [{domain}]: "
if isinstance(ex, vol.Invalid):
if "extra keys not allowed" in ex.error_message:
path = "->".join(str(m) for m in ex.path)
message += (
f"[{ex.path[-1]}] is an invalid option for [{domain}]. "
f"Check: {domain}->{path}."
)
else:
message += f"{humanize_error(config, ex)}."
is_friendly = True
else:
message += str(ex) or repr(ex)
try:
domain_config = config.get(domain, config)
except AttributeError:
domain_config = config
message += (
f" (See {getattr(domain_config, '__config_file__', '?')}, "
f"line {getattr(domain_config, '__line__', '?')}). "
)
if domain != CONF_CORE and link:
message += f"Please check the docs at {link}"
return message, is_friendly
async def async_process_ha_core_config(hass: HomeAssistant, config: Dict) -> None:
"""Process the [homeassistant] section from the configuration.
This method is a coroutine.
"""
config = CORE_CONFIG_SCHEMA(config)
# Only load auth during startup.
if not hasattr(hass, "auth"):
auth_conf = config.get(CONF_AUTH_PROVIDERS)
if auth_conf is None:
auth_conf = [{"type": "homeassistant"}]
mfa_conf = config.get(
CONF_AUTH_MFA_MODULES,
[{"type": "totp", "id": "totp", "name": "Authenticator app"}],
)
setattr(
hass, "auth", await auth.auth_manager_from_config(hass, auth_conf, mfa_conf)
)
await hass.config.async_load()
hac = hass.config
if any(
k in config
for k in [
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_ELEVATION,
CONF_TIME_ZONE,
CONF_UNIT_SYSTEM,
CONF_EXTERNAL_URL,
CONF_INTERNAL_URL,
]
):
hac.config_source = SOURCE_YAML
for key, attr in (
(CONF_LATITUDE, "latitude"),
(CONF_LONGITUDE, "longitude"),
(CONF_NAME, "location_name"),
(CONF_ELEVATION, "elevation"),
(CONF_INTERNAL_URL, "internal_url"),
(CONF_EXTERNAL_URL, "external_url"),
(CONF_MEDIA_DIRS, "media_dirs"),
(CONF_LEGACY_TEMPLATES, "legacy_templates"),
):
if key in config:
setattr(hac, attr, config[key])
if CONF_TIME_ZONE in config:
hac.set_time_zone(config[CONF_TIME_ZONE])
if CONF_MEDIA_DIRS not in config:
if is_docker_env():
hac.media_dirs = {"local": "/media"}
else:
hac.media_dirs = {"local": hass.config.path("media")}
# Init whitelist external dir
hac.allowlist_external_dirs = {hass.config.path("www"), *hac.media_dirs.values()}
if CONF_ALLOWLIST_EXTERNAL_DIRS in config:
hac.allowlist_external_dirs.update(set(config[CONF_ALLOWLIST_EXTERNAL_DIRS]))
elif LEGACY_CONF_WHITELIST_EXTERNAL_DIRS in config:
_LOGGER.warning(
"Key %s has been replaced with %s. Please update your config",
LEGACY_CONF_WHITELIST_EXTERNAL_DIRS,
CONF_ALLOWLIST_EXTERNAL_DIRS,
)
hac.allowlist_external_dirs.update(
set(config[LEGACY_CONF_WHITELIST_EXTERNAL_DIRS])
)
# Init whitelist external URL list – make sure to add / to every URL that doesn't
# already have it so that we can properly test "path ownership"
if CONF_ALLOWLIST_EXTERNAL_URLS in config:
hac.allowlist_external_urls.update(
url if url.endswith("/") else f"{url}/"
for url in config[CONF_ALLOWLIST_EXTERNAL_URLS]
)
# Customize
cust_exact = dict(config[CONF_CUSTOMIZE])
cust_domain = dict(config[CONF_CUSTOMIZE_DOMAIN])
cust_glob = OrderedDict(config[CONF_CUSTOMIZE_GLOB])
for name, pkg in config[CONF_PACKAGES].items():
pkg_cust = pkg.get(CONF_CORE)
if pkg_cust is None:
continue
try:
pkg_cust = CUSTOMIZE_CONFIG_SCHEMA(pkg_cust)
except vol.Invalid:
_LOGGER.warning("Package %s contains invalid customize", name)
continue
cust_exact.update(pkg_cust[CONF_CUSTOMIZE])
cust_domain.update(pkg_cust[CONF_CUSTOMIZE_DOMAIN])
cust_glob.update(pkg_cust[CONF_CUSTOMIZE_GLOB])
hass.data[DATA_CUSTOMIZE] = EntityValues(cust_exact, cust_domain, cust_glob)
if CONF_UNIT_SYSTEM in config:
if config[CONF_UNIT_SYSTEM] == CONF_UNIT_SYSTEM_IMPERIAL:
hac.units = IMPERIAL_SYSTEM
else:
hac.units = METRIC_SYSTEM
elif CONF_TEMPERATURE_UNIT in config:
unit = config[CONF_TEMPERATURE_UNIT]
hac.units = METRIC_SYSTEM if unit == TEMP_CELSIUS else IMPERIAL_SYSTEM
_LOGGER.warning(
"Found deprecated temperature unit in core "
"configuration expected unit system. Replace '%s: %s' "
"with '%s: %s'",
CONF_TEMPERATURE_UNIT,
unit,
CONF_UNIT_SYSTEM,
hac.units.name,
)
def _log_pkg_error(package: str, component: str, config: Dict, message: str) -> None:
"""Log an error while merging packages."""
message = f"Package {package} setup failed. Integration {component} {message}"
pack_config = config[CONF_CORE][CONF_PACKAGES].get(package, config)
message += (
f" (See {getattr(pack_config, '__config_file__', '?')}:"
f"{getattr(pack_config, '__line__', '?')}). "
)
_LOGGER.error(message)
def _identify_config_schema(module: ModuleType) -> Optional[str]:
"""Extract the schema and identify list or dict based."""
if not isinstance(module.CONFIG_SCHEMA, vol.Schema): # type: ignore
return None
schema = module.CONFIG_SCHEMA.schema # type: ignore
if isinstance(schema, vol.All):
for subschema in schema.validators:
if isinstance(subschema, dict):
schema = subschema
break
else:
return None
try:
key = next(k for k in schema if k == module.DOMAIN) # type: ignore
except (TypeError, AttributeError, StopIteration):
return None
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error identifying config schema")
return None
if hasattr(key, "default") and not isinstance(
key.default, vol.schema_builder.Undefined
):
default_value = module.CONFIG_SCHEMA({module.DOMAIN: key.default()})[ # type: ignore
module.DOMAIN # type: ignore
]
if isinstance(default_value, dict):
return "dict"
if isinstance(default_value, list):
return "list"
return None
domain_schema = schema[key]
t_schema = str(domain_schema)
if t_schema.startswith("{") or "schema_with_slug_keys" in t_schema:
return "dict"
if t_schema.startswith(("[", "All(<function ensure_list")):
return "list"
return None
def _recursive_merge(conf: Dict[str, Any], package: Dict[str, Any]) -> Union[bool, str]:
"""Merge package into conf, recursively."""
error: Union[bool, str] = False
for key, pack_conf in package.items():
if isinstance(pack_conf, dict):
if not pack_conf:
continue
conf[key] = conf.get(key, OrderedDict())
error = _recursive_merge(conf=conf[key], package=pack_conf)
elif isinstance(pack_conf, list):
conf[key] = cv.remove_falsy(
cv.ensure_list(conf.get(key)) + cv.ensure_list(pack_conf)
)
else:
if conf.get(key) is not None:
return key
conf[key] = pack_conf
return error
async def merge_packages_config(
hass: HomeAssistant,
config: Dict,
packages: Dict[str, Any],
_log_pkg_error: Callable = _log_pkg_error,
) -> Dict:
"""Merge packages into the top-level configuration. Mutate config."""
PACKAGES_CONFIG_SCHEMA(packages)
for pack_name, pack_conf in packages.items():
for comp_name, comp_conf in pack_conf.items():
if comp_name == CONF_CORE:
continue
# If component name is given with a trailing description, remove it
# when looking for component
domain = comp_name.split(" ")[0]
try:
integration = await async_get_integration_with_requirements(
hass, domain
)
component = integration.get_component()
except INTEGRATION_LOAD_EXCEPTIONS as ex:
_log_pkg_error(pack_name, comp_name, config, str(ex))
continue
merge_list = hasattr(component, "PLATFORM_SCHEMA")
if not merge_list and hasattr(component, "CONFIG_SCHEMA"):
merge_list = _identify_config_schema(component) == "list"
if merge_list:
config[comp_name] = cv.remove_falsy(
cv.ensure_list(config.get(comp_name)) + cv.ensure_list(comp_conf)
)
continue
if comp_conf is None:
comp_conf = OrderedDict()
if not isinstance(comp_conf, dict):
_log_pkg_error(
pack_name, comp_name, config, "cannot be merged. Expected a dict."
)
continue
if comp_name not in config or config[comp_name] is None:
config[comp_name] = OrderedDict()
if not isinstance(config[comp_name], dict):
_log_pkg_error(
pack_name,
comp_name,
config,
"cannot be merged. Dict expected in main config.",
)
continue
error = _recursive_merge(conf=config[comp_name], package=comp_conf)
if error:
_log_pkg_error(
pack_name, comp_name, config, f"has duplicate key '{error}'"
)
return config
async def async_process_component_config(
hass: HomeAssistant, config: ConfigType, integration: Integration
) -> Optional[ConfigType]:
"""Check component configuration and return processed configuration.
Returns None on error.
This method must be run in the event loop.
"""
domain = integration.domain
try:
component = integration.get_component()
except LOAD_EXCEPTIONS as ex:
_LOGGER.error("Unable to import %s: %s", domain, ex)
return None
# Check if the integration has a custom config validator
config_validator = None
try:
config_validator = integration.get_platform("config")
except ImportError as err:
# Filter out import error of the config platform.
# If the config platform contains bad imports, make sure
# that still fails.
if err.name != f"{integration.pkg_path}.config":
_LOGGER.error("Error importing config platform %s: %s", domain, err)
return None
if config_validator is not None and hasattr(
config_validator, "async_validate_config"
):
try:
return await config_validator.async_validate_config( # type: ignore
hass, config
)
except (vol.Invalid, HomeAssistantError) as ex:
async_log_exception(ex, domain, config, hass, integration.documentation)
return None
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unknown error calling %s config validator", domain)
return None
# No custom config validator, proceed with schema validation
if hasattr(component, "CONFIG_SCHEMA"):
try:
return component.CONFIG_SCHEMA(config) # type: ignore
except vol.Invalid as ex:
async_log_exception(ex, domain, config, hass, integration.documentation)
return None
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unknown error calling %s CONFIG_SCHEMA", domain)
return None
component_platform_schema = getattr(
component, "PLATFORM_SCHEMA_BASE", getattr(component, "PLATFORM_SCHEMA", None)
)
if component_platform_schema is None:
return config
platforms = []
for p_name, p_config in config_per_platform(config, domain):
# Validate component specific platform schema
try:
p_validated = component_platform_schema(p_config)
except vol.Invalid as ex:
async_log_exception(ex, domain, p_config, hass, integration.documentation)
continue
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Unknown error validating %s platform config with %s component platform schema",
p_name,
domain,
)
continue
# Not all platform components follow same pattern for platforms
# So if p_name is None we are not going to validate platform
# (the automation component is one of them)
if p_name is None:
platforms.append(p_validated)
continue
try:
p_integration = await async_get_integration_with_requirements(hass, p_name)
except (RequirementsNotFound, IntegrationNotFound) as ex:
_LOGGER.error("Platform error: %s - %s", domain, ex)
continue
try:
platform = p_integration.get_platform(domain)
except LOAD_EXCEPTIONS:
_LOGGER.exception("Platform error: %s", domain)
continue
# Validate platform specific schema
if hasattr(platform, "PLATFORM_SCHEMA"):
try:
p_validated = platform.PLATFORM_SCHEMA(p_config) # type: ignore
except vol.Invalid as ex:
async_log_exception(
ex,
f"{domain}.{p_name}",
p_config,
hass,
p_integration.documentation,
)
continue
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Unknown error validating config for %s platform for %s component with PLATFORM_SCHEMA",
p_name,
domain,
)
continue
platforms.append(p_validated)
# Create a copy of the configuration with all config for current
# component removed and add validated config back in.
config = config_without_domain(config, domain)
config[domain] = platforms
return config
@callback
def config_without_domain(config: Dict, domain: str) -> Dict:
"""Return a config with all configuration for a domain removed."""
filter_keys = extract_domain_configs(config, domain)
return {key: value for key, value in config.items() if key not in filter_keys}
async def async_check_ha_config_file(hass: HomeAssistant) -> Optional[str]:
"""Check if Home Assistant configuration file is valid.
This method is a coroutine.
"""
# pylint: disable=import-outside-toplevel
import homeassistant.helpers.check_config as check_config
res = await check_config.async_check_ha_config_file(hass)
if not res.errors:
return None
return res.error_str
@callback
def async_notify_setup_error(
hass: HomeAssistant, component: str, display_link: Optional[str] = None
) -> None:
"""Print a persistent notification.
This method must be run in the event loop.
"""
# pylint: disable=import-outside-toplevel
from homeassistant.components import persistent_notification
errors = hass.data.get(DATA_PERSISTENT_ERRORS)
if errors is None:
errors = hass.data[DATA_PERSISTENT_ERRORS] = {}
errors[component] = errors.get(component) or display_link
message = "The following integrations and platforms could not be set up:\n\n"
for name, link in errors.items():
part = f"[{name}]({link})" if link else name
message += f" - {part}\n"
message += "\nPlease check your config and [logs](/config/logs)."
persistent_notification.async_create(
hass, message, "Invalid config", "invalid_config"
)
| {
"content_hash": "db54c5e9fa809ca3aedc9dab5014525b",
"timestamp": "",
"source": "github",
"line_count": 931,
"max_line_length": 108,
"avg_line_length": 33.13641245972073,
"alnum_prop": 0.6137439222042139,
"repo_name": "partofthething/home-assistant",
"id": "cfc1390a37ba2c5efe05c626de094115ec8b786c",
"size": "30852",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.customer_facing_user_object import CustomerFacingUserObject # noqa: E501
from wavefront_api_client.rest import ApiException
class TestCustomerFacingUserObject(unittest.TestCase):
"""CustomerFacingUserObject unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCustomerFacingUserObject(self):
"""Test CustomerFacingUserObject"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.customer_facing_user_object.CustomerFacingUserObject() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a719e73fc0bbeaa1940b14139da320c9",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 409,
"avg_line_length": 35.5,
"alnum_prop": 0.7338769458858414,
"repo_name": "wavefrontHQ/python-client",
"id": "11651afe1746a2ffd1234bdff0b7b3809c84c57c",
"size": "1366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_customer_facing_user_object.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4642252"
},
{
"name": "Shell",
"bytes": "3458"
}
],
"symlink_target": ""
} |
"""Tests frame.inspect() """
import unittest
from sparktkregtests.lib import sparktk_test
class FrameInspectTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(FrameInspectTest, self).setUp()
dataset = self.get_file("movie_user_5ratings.csv")
schema = [("src", int),
("vertex_type", str),
("dest", int),
("weight", int),
("edge_type", str)]
self.frame = self.context.frame.import_csv(
dataset, schema=schema)
def test_frame_inspect_0_offset(self):
"""Test offset of 0 does nothing"""
inspect = self.frame.get_inspect(n=5, offset=0)
self.assertEqual(len(inspect.rows), 5)
def test_frame_inspect_offset_large(self):
"""Test offset of a large value"""
inspect = self.frame.get_inspect(n=5, offset=1000)
self.assertEqual(len(inspect.rows), 5)
def test_frame_inspect_offset_overflow(self):
"""Test inspecting more lines than in frrame from offset truncates"""
inspect = self.frame.get_inspect(n=10, offset=self.frame.count()-3)
self.assertEqual(len(inspect.rows), 3)
def test_frame_inspect_0_count(self):
"""Test inspecting 0 rows returns nothing"""
inspect = self.frame.get_inspect(n=0)
self.assertEqual(len(inspect.rows), 0)
def test_frame_inspect_n(self):
"""Test requesting n rows returns n rows"""
inspect = self.frame.get_inspect(n=1)
self.assertEqual(len(inspect.rows), 1)
def test_frame_inspect_default(self):
"""Test the default number of rows is 10"""
inspect = self.frame.get_inspect()
self.assertEqual(len(inspect.rows), 10)
def test_frame_inspect_all(self):
"""Test inspecting entire frame returns entire frame"""
inspect = self.frame.get_inspect(n=self.frame.count())
self.assertEqual(len(inspect.rows), self.frame.count())
def test_frame_inspect_count_overflow(self):
"""Test inspecting more than entire frame returns the entire frame"""
row_count = self.frame.count()
inspect = self.frame.get_inspect(n=row_count*10)
self.assertEqual(len(inspect.rows), row_count)
# compare 'inspect' with the actual entire frame RowInspection object
self.assertEqual(str(inspect),
str(self.frame.get_inspect(n=row_count)))
def test_negative_offset(self):
"""Test a negative offset errors"""
with self.assertRaisesRegexp(
ValueError, "Expected non-negative integer"):
self.frame.get_inspect(n=5, offset=-1)
def test_negative_count(self):
"""Test taking a negative number of rows errors"""
with self.assertRaises(ValueError):
self.frame.get_inspect(n=-1)
def test_float_count(self):
"""Test float for count errors"""
with self.assertRaisesRegexp(TypeError, "Expected type <type 'int'>"):
self.frame.get_inspect(n=1.5)
def test_float_offset(self):
"""Test float for offset errors"""
with self.assertRaises(TypeError):
self.frame.get_inspect(n=1, offset=1.5)
def test_take_no_columns(self):
"""Test taking an empty list of columns gets an empty list"""
self.assertEqual([], self.frame.take(n=10, columns=[]))
def test_take_invalid_column(self):
"""Test taking a column that doesn't exist errors"""
with self.assertRaisesRegexp(
ValueError, "Invalid column name .* provided"):
self.frame.take(n=10, columns=["no_such_col", "weight"])
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "968a336a3c481027bfce6b2585787841",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 78,
"avg_line_length": 37,
"alnum_prop": 0.6162697350816163,
"repo_name": "grehx/spark-tk",
"id": "b07ea73c45b41ce39d9a79348da8dde158ace239",
"size": "4442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regression-tests/sparktkregtests/testcases/frames/frame_inspect_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "31130"
},
{
"name": "Python",
"bytes": "1767772"
},
{
"name": "R",
"bytes": "2242"
},
{
"name": "Scala",
"bytes": "2250722"
},
{
"name": "Shell",
"bytes": "29752"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
from django.conf import settings
from django.contrib import admin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from plugins.models import Slide, Slider
from plugins.serializers import SliderWithInlinesPluginSerializer
class SlideInlineAdmin(admin.StackedInline):
model = Slide
class SliderWithInlinesPlugin(CMSPluginBase):
"""
Sample Slider plugin, to test inlines processing with API
no styles and scrips, since it is not important for api
"""
model = Slider
name = 'Slider'
render_template = 'plugins/slider.html'
inlines = (SlideInlineAdmin,)
def render(self, context, instance, placeholder):
slides = instance.slides.all()
context.update({
'slides': slides,
'instance': instance,
'MEDIA_URL': settings.MEDIA_URL
})
return context
plugin_pool.register_plugin(SliderWithInlinesPlugin)
class SliderPlugin(CMSPluginBase):
"""
Plugin with predefined serializer class for API
"""
model = Slider
name = 'Slider'
render_template = 'plugins/slider.html'
inlines = (SlideInlineAdmin,)
serializer_class = SliderWithInlinesPluginSerializer
def render(self, context, instance, placeholder):
slides = instance.slides.all()
context.update({
'slides': slides,
'instance': instance,
'MEDIA_URL': settings.MEDIA_URL
})
return context
plugin_pool.register_plugin(SliderPlugin)
| {
"content_hash": "223a2abbd84eeb637b7e275b2eb17a14",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 72,
"avg_line_length": 28,
"alnum_prop": 0.6842105263157895,
"repo_name": "divio/djangocms-rest-api",
"id": "007200e2f6ec1b0c8a1bb9e5653842daed21a457",
"size": "1620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_project/plugins/cms_plugins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "74"
},
{
"name": "HTML",
"bytes": "4624"
},
{
"name": "JavaScript",
"bytes": "212"
},
{
"name": "Python",
"bytes": "47186"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numpy as np
import sys
import os
import inspect
from distutils.version import LooseVersion
from ..ext.six import string_types
from ..util import use_log_level
###############################################################################
# Adapted from Python's unittest2
# http://docs.python.org/2/license.html
try:
from unittest.case import SkipTest
except ImportError:
try:
from unittest2.case import SkipTest
except ImportError:
class SkipTest(Exception):
pass
def _safe_rep(obj, short=False):
"""Helper for assert_* ports"""
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < 80:
return result
return result[:80] + ' [truncated]...'
def _safe_str(obj):
"""Helper for assert_* ports"""
try:
return str(obj)
except Exception:
return object.__str__(obj)
def _format_msg(msg, std_msg):
"""Helper for assert_* ports"""
if msg is None:
msg = std_msg
else:
try:
msg = '%s : %s' % (std_msg, msg)
except UnicodeDecodeError:
msg = '%s : %s' % (_safe_str(std_msg), _safe_str(msg))
return msg
def nottest(func):
"""Decorator to mark a function or method as *not* a test
"""
func.__test__ = False
return func
def assert_raises(exp, func, *args, **kwargs):
"""Backport"""
try:
func(*args, **kwargs)
except exp:
return
std_msg = '%s not raised' % (_safe_rep(exp))
raise AssertionError(_format_msg(None, std_msg))
def assert_in(member, container, msg=None):
"""Backport"""
if member in container:
return
std_msg = '%s not found in %s' % (_safe_rep(member), _safe_rep(container))
raise AssertionError(_format_msg(msg, std_msg))
def assert_true(x, msg=None):
"""Backport"""
if x:
return
std_msg = '%s is not True' % (_safe_rep(x),)
raise AssertionError(_format_msg(msg, std_msg))
def assert_equal(x, y, msg=None):
"""Backport"""
if x == y:
return
std_msg = '%s not equal to %s' % (_safe_rep(x), _safe_rep(y))
raise AssertionError(_format_msg(msg, std_msg))
def assert_not_equal(x, y, msg=None):
"""Backport"""
if x != y:
return
std_msg = '%s equal to %s' % (_safe_rep(x), _safe_rep(y))
raise AssertionError(_format_msg(msg, std_msg))
def assert_not_in(member, container, msg=None):
"""Backport"""
if member not in container:
return
std_msg = '%s found in %s' % (_safe_rep(member), _safe_rep(container))
raise AssertionError(_format_msg(msg, std_msg))
def assert_is(expr1, expr2, msg=None):
"""Backport"""
if expr1 is not expr2:
std_msg = '%s is not %s' % (_safe_rep(expr1), _safe_rep(expr2))
raise AssertionError(_format_msg(msg, std_msg))
class raises(object):
"""Helper class to test exception raising"""
def __init__(self, exc):
self.exc = exc
def __enter__(self):
return self
def __exit__(self, exc_typ, exc, tb):
if isinstance(exc, self.exc):
return True
elif exc is None:
raise AssertionError("Expected %s (no exception raised)" %
self.exc.__name__)
else:
raise AssertionError("Expected %s, got %s instead" %
(self.exc.__name__, type(exc).__name__))
###############################################################################
# GL stuff
def has_pyopengl():
try:
from OpenGL import GL # noqa, analysis:ignore
except Exception:
return False
else:
return True
def requires_pyopengl():
return np.testing.dec.skipif(not has_pyopengl(), 'Requires PyOpenGL')
###############################################################################
# App stuff
def has_backend(backend, has=(), capable=(), out=()):
from ..app.backends import BACKENDMAP
using = os.getenv('_VISPY_TESTING_APP', None)
if using is not None and using != backend:
# e.g., we are on a 'pyglet' run but the test requires PyQt4
ret = (False,) if len(out) > 0 else False
for o in out:
ret += (None,)
return ret
# let's follow the standard code path
module_name = BACKENDMAP[backend.lower()][1]
with use_log_level('warning', print_msg=False):
mod = __import__('app.backends.%s' % module_name, globals(), level=2)
mod = getattr(mod.backends, module_name)
good = mod.testable
for h in has:
good = (good and getattr(mod, 'has_%s' % h))
for cap in capable:
good = (good and mod.capability[cap])
ret = (good,) if len(out) > 0 else good
for o in out:
ret += (getattr(mod, o),)
return ret
def has_application(backend=None, has=(), capable=()):
"""Determine if a suitable app backend exists"""
from ..app.backends import BACKEND_NAMES
# avoid importing other backends if we don't need to
if backend is None:
for backend in BACKEND_NAMES:
if has_backend(backend, has=has, capable=capable):
good = True
msg = backend
break
else:
good = False
msg = 'Requires application backend'
else:
good, why = has_backend(backend, has=has, capable=capable,
out=['why_not'])
if not good:
msg = 'Requires %s: %s' % (backend, why)
else:
msg = backend
return good, msg
def composed(*decs):
def deco(f):
for dec in reversed(decs):
f = dec(f)
return f
return deco
def requires_application(backend=None, has=(), capable=()):
"""Return a decorator for tests that require an application"""
good, msg = has_application(backend, has, capable)
dec_backend = np.testing.dec.skipif(not good, "Skipping test: %s" % msg)
try:
import pytest
except Exception:
return dec_backend
dec_app = pytest.mark.vispy_app_test
return composed(dec_app, dec_backend)
def requires_img_lib():
"""Decorator for tests that require an image library"""
from ..io import _check_img_lib
if sys.platform.startswith('win'):
has_img_lib = False # PIL breaks tests on windows (!)
else:
has_img_lib = not all(c is None for c in _check_img_lib())
return np.testing.dec.skipif(not has_img_lib, 'imageio or PIL required')
def has_ipython(version='3.0'):
"""function that checks the presence of IPython"""
# typecast version to a string, in case an integer is given
version = str(version)
try:
import IPython # noqa
except Exception:
return False, "IPython library not found"
else:
if LooseVersion(IPython.__version__) >= LooseVersion(version):
return True, "IPython present"
else:
message = (
"current IPython version: (%s) is "
"older than expected version: (%s)") % \
(IPython.__version__, version)
return False, message
def requires_ipython(version='3.0'):
ipython_present, message = has_ipython(version)
return np.testing.dec.skipif(not ipython_present, message)
def has_matplotlib(version='1.2'):
"""Determine if mpl is a usable version"""
try:
import matplotlib
except Exception:
has_mpl = False
else:
if LooseVersion(matplotlib.__version__) >= LooseVersion(version):
has_mpl = True
else:
has_mpl = False
return has_mpl
###############################################################################
# Visuals stuff
def _has_scipy(min_version):
try:
assert isinstance(min_version, string_types)
import scipy # noqa, analysis:ignore
from distutils.version import LooseVersion
this_version = LooseVersion(scipy.__version__)
if this_version < min_version:
return False
except Exception:
return False
else:
return True
def requires_scipy(min_version='0.13'):
return np.testing.dec.skipif(not _has_scipy(min_version),
'Requires Scipy version >= %s' % min_version)
@nottest
def TestingCanvas(bgcolor='black', size=(100, 100), dpi=None, decorate=False,
**kwargs):
"""Class wrapper to avoid importing scene until necessary"""
# On Windows decorations can force windows to be an incorrect size
# (e.g., instead of 100x100 they will be 100x248), having no
# decorations works around this
from ..scene import SceneCanvas
class TestingCanvas(SceneCanvas):
def __init__(self, bgcolor, size, dpi, decorate, **kwargs):
self._entered = False
SceneCanvas.__init__(self, bgcolor=bgcolor, size=size,
dpi=dpi, decorate=decorate,
**kwargs)
def __enter__(self):
SceneCanvas.__enter__(self)
# sometimes our window can be larger than our requsted draw
# area (e.g. on Windows), and this messes up our tests that
# typically use very small windows. Here we "fix" it.
scale = np.array(self.physical_size) / np.array(self.size, float)
scale = int(np.round(np.mean(scale)))
self._wanted_vp = 0, 0, size[0] * scale, size[1] * scale
self.context.set_state(clear_color=self._bgcolor)
self.context.set_viewport(*self._wanted_vp)
self._entered = True
return self
def draw_visual(self, visual, event=None, viewport=None, clear=True):
if not self._entered:
return
if clear:
self.context.clear()
SceneCanvas.draw_visual(self, visual, event, viewport)
# must set this because draw_visual sets it back to the
# canvas size when it's done
self.context.set_viewport(*self._wanted_vp)
self.context.finish()
return TestingCanvas(bgcolor, size, dpi, decorate, **kwargs)
@nottest
def save_testing_image(image, location):
from ..gloo.util import _screenshot
from ..util import make_png
if image == "screenshot":
image = _screenshot(alpha=False)
with open(location+'.png', 'wb') as fid:
fid.write(make_png(image))
@nottest
def run_tests_if_main():
"""Run tests in a given file if it is run as a script"""
local_vars = inspect.currentframe().f_back.f_locals
if not local_vars.get('__name__', '') == '__main__':
return
# we are in a "__main__"
fname = local_vars['__file__']
# Run ourselves. post-mortem debugging!
try:
import faulthandler
faulthandler.enable()
except Exception:
pass
import __main__
try:
import pytest
pytest.main(['-s', '--tb=short', fname])
except ImportError:
print('==== Running tests in script\n==== %s' % fname)
run_tests_in_object(__main__)
print('==== Tests pass')
def run_tests_in_object(ob):
# Setup
for name in dir(ob):
if name.lower().startswith('setup'):
print('Calling %s' % name)
getattr(ob, name)()
# Exec
for name in sorted(dir(ob), key=lambda x: x.lower()): # consistent order
val = getattr(ob, name)
if name.startswith('_'):
continue
elif callable(val) and (name[:4] == 'test' or name[-4:] == 'test'):
print('Running test-func %s ... ' % name, end='')
try:
val()
print('ok')
except Exception as err:
if 'skiptest' in err.__class__.__name__.lower():
print('skip')
else:
raise
elif isinstance(val, type) and 'Test' in name:
print('== Running test-class %s' % name)
run_tests_in_object(val())
print('== Done with test-class %s' % name)
# Teardown
for name in dir(ob):
if name.lower().startswith('teardown'):
print('Calling %s' % name)
getattr(ob, name)()
| {
"content_hash": "bd4bb7c0a07ff62816ebcfb081f707a7",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 79,
"avg_line_length": 30.149144254278728,
"alnum_prop": 0.5589976482037142,
"repo_name": "hronoses/vispy",
"id": "fb4a9797c2393200127434b0de22d5b1b9759a8e",
"size": "12657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vispy/testing/_testing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "171513"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1593"
},
{
"name": "PowerShell",
"bytes": "4151"
},
{
"name": "Python",
"bytes": "2858273"
}
],
"symlink_target": ""
} |
import os
import re
import subprocess
import time
import urllib.request
from urllib.error import HTTPError
TO_PUBLISH = [
'crates/cargo-platform',
'crates/cargo-util',
'crates/crates-io',
'.',
]
def already_published(name, version):
try:
urllib.request.urlopen('https://crates.io/api/v1/crates/%s/%s/download' % (name, version))
except HTTPError as e:
if e.code == 404:
return False
raise
return True
def maybe_publish(path):
content = open(os.path.join(path, 'Cargo.toml')).read()
name = re.search('^name = "([^"]+)"', content, re.M).group(1)
version = re.search('^version = "([^"]+)"', content, re.M).group(1)
if already_published(name, version):
print('%s %s is already published, skipping' % (name, version))
return False
subprocess.check_call(['cargo', 'publish', '--no-verify'], cwd=path)
return True
def main():
print('Starting publish...')
for i, path in enumerate(TO_PUBLISH):
if maybe_publish(path):
if i < len(TO_PUBLISH)-1:
# Sleep to allow the index to update. This should probably
# check that the index is updated, or use a retry loop
# instead.
time.sleep(5)
print('Publish complete!')
if __name__ == '__main__':
main()
| {
"content_hash": "0a2c1937da65387208a1e0e9b4c2fc4a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 98,
"avg_line_length": 26.470588235294116,
"alnum_prop": 0.5881481481481482,
"repo_name": "eminence/cargo",
"id": "5ace18f7282ea545b26a1b045eb1a527f1068da0",
"size": "1428",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "publish.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "13977"
},
{
"name": "Python",
"bytes": "1428"
},
{
"name": "Roff",
"bytes": "320856"
},
{
"name": "Rust",
"bytes": "6057209"
},
{
"name": "Shell",
"bytes": "13125"
}
],
"symlink_target": ""
} |
import webapp2
import os
from google.appengine.ext.webapp import template
import user_filter
class Require(webapp2.RequestHandler):
def get(self):
user, nickname = user_filter.do_filter()
self.response.out.write(template.render('html/require.html',
{
'user': user,
'nickname': nickname,
}))
debug = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
app = webapp2.WSGIApplication([
('/require', Require),
], debug=debug) | {
"content_hash": "0206584986d15386cd8143b82f57fd49",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 69,
"avg_line_length": 32.59090909090909,
"alnum_prop": 0.4435146443514644,
"repo_name": "thinkAmi/9784798123028_GAE",
"id": "2d4e0dfaebcc5cfe773c701efd5452c6e5eeb35b",
"size": "742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chap10/option/require.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "13215"
},
{
"name": "Python",
"bytes": "525558"
}
],
"symlink_target": ""
} |
""" (Compute) Unit tests
"""
import os
import sys
import radical.pilot as rp
import unittest
import uuid
from copy import deepcopy
from radical.pilot.db import Session
from pymongo import MongoClient
# DBURL defines the MongoDB server URL and has the format mongodb://host:port.
# For the installation of a MongoDB server, refer to the MongoDB website:
# http://docs.mongodb.org/manual/installation/
DBURL = os.getenv("RADICAL_PILOT_DBURL")
if DBURL is None:
print "ERROR: RADICAL_PILOT_DBURL (MongoDB server URL) is not defined."
sys.exit(1)
DBNAME = os.getenv("RADICAL_PILOT_TEST_DBNAME")
if DBNAME is None:
print "ERROR: RADICAL_PILOT_TEST_DBNAME (MongoDB database name) is not defined."
sys.exit(1)
#-----------------------------------------------------------------------------
#
class TestIssue114(unittest.TestCase):
# silence deprecation warnings under py3
def setUp(self):
# clean up fragments from previous tests
client = MongoClient(DBURL)
client.drop_database(DBNAME)
def tearDown(self):
# clean up after ourselves
client = MongoClient(DBURL)
client.drop_database(DBNAME)
def failUnless(self, expr):
# St00pid speling.
return self.assertTrue(expr)
def failIf(self, expr):
# St00pid speling.
return self.assertFalse(expr)
#-------------------------------------------------------------------------
#
def test__issue_114_part_1(self):
""" https://github.com/radical-cybertools/radical.pilot/issues/114
"""
session = rp.Session(database_url=DBURL, database_name=DBNAME)
pm = rp.PilotManager(session=session)
cpd = rp.ComputePilotDescription()
cpd.resource = "local.localhost"
cpd.cores = 1
cpd.runtime = 5
cpd.sandbox = "/tmp/radical.pilot.sandbox.unittests"
cpd.cleanup = True
pilot = pm.submit_pilots(pilot_descriptions=cpd)
state = pm.wait_pilots(state=[rp.ACTIVE,
rp.DONE,
rp.FAILED],
timeout=5*60)
assert (pilot.state == rp.ACTIVE), "pilot state: %s" % pilot.state
um = rp.UnitManager(
session=session,
scheduler=rp.SCHED_DIRECT_SUBMISSION
)
um.add_pilots(pilot)
all_tasks = []
for i in range(0,2):
cudesc = rp.ComputeUnitDescription()
cudesc.cores = 1
cudesc.executable = "/bin/sleep"
cudesc.arguments = ['60']
all_tasks.append(cudesc)
units = um.submit_units(all_tasks)
states = um.wait_units (state=[rp.SCHEDULING, rp.EXECUTING],
timeout=2*60)
assert rp.SCHEDULING in states, "states: %s" % states
states = um.wait_units (state=[rp.EXECUTING, rp.DONE],
timeout=1*60)
assert rp.EXECUTING in states, "states: %s" % states
session.close()
#-------------------------------------------------------------------------
#
def test__issue_114_part_2(self):
""" https://github.com/radical-cybertools/radical.pilot/issues/114
"""
session = rp.Session(database_url=DBURL, database_name=DBNAME)
pm = rp.PilotManager(session=session)
cpd = rp.ComputePilotDescription()
cpd.resource = "local.localhost"
cpd.cores = 1
cpd.runtime = 5
cpd.sandbox = "/tmp/radical.pilot.sandbox.unittests"
cpd.cleanup = True
pilot = pm.submit_pilots(pilot_descriptions=cpd)
um = rp.UnitManager(
session=session,
scheduler=rp.SCHED_DIRECT_SUBMISSION
)
um.add_pilots(pilot)
state = pm.wait_pilots(state=[rp.ACTIVE,
rp.DONE,
rp.FAILED],
timeout=5*60)
assert (pilot.state == rp.ACTIVE), "pilot state: %s" % pilot.state
cudesc = rp.ComputeUnitDescription()
cudesc.cores = 1
cudesc.executable = "/bin/sleep"
cudesc.arguments = ['60']
cu = um.submit_units(cudesc)
state = um.wait_units(state=[rp.EXECUTING], timeout=60)
assert state == [rp.EXECUTING], 'state : %s' % state
assert cu.state == rp.EXECUTING , 'cu state: %s' % cu.state
state = um.wait_units(timeout=2*60)
assert state == [rp.DONE], 'state : %s' % state
assert cu.state == rp.DONE , 'cu state: %s' % cu.state
session.close()
#-------------------------------------------------------------------------
#
def test__issue_114_part_3(self):
""" https://github.com/radical-cybertools/radical.pilot/issues/114
"""
session = rp.Session(database_url=DBURL, database_name=DBNAME)
pm = rp.PilotManager(session=session)
cpd = rp.ComputePilotDescription()
cpd.resource = "local.localhost"
cpd.cores = 1
cpd.runtime = 1
cpd.sandbox = "/tmp/radical.pilot.sandbox.unittests"
cpd.cleanup = True
pilot = pm.submit_pilots(pilot_descriptions=cpd)
um = rp.UnitManager(
session = session,
scheduler = rp.SCHED_DIRECT_SUBMISSION
)
um.add_pilots(pilot)
state = pm.wait_pilots(state=[rp.ACTIVE,
rp.DONE,
rp.FAILED],
timeout=10*60)
assert state == [rp.ACTIVE], 'state : %s' % state
assert pilot.state == rp.ACTIVE , 'pilot state: %s' % pilot.state
state = pm.wait_pilots(timeout=3*60)
assert state == [rp.DONE], 'state : %s' % state
assert pilot.state == rp.DONE , 'pilot state: %s' % pilot.state
session.close()
| {
"content_hash": "3a7aa84044037056f8c314949bc03350",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 84,
"avg_line_length": 31.978835978835978,
"alnum_prop": 0.5286234281932495,
"repo_name": "JensTimmerman/radical.pilot",
"id": "b734009f663184dd9ebee44eb718ba4180165fd1",
"size": "6044",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "src/radical/pilot/tests/issues/issue_114.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "11424"
},
{
"name": "HTML",
"bytes": "58"
},
{
"name": "JavaScript",
"bytes": "379"
},
{
"name": "Makefile",
"bytes": "261"
},
{
"name": "Python",
"bytes": "803009"
},
{
"name": "Shell",
"bytes": "74376"
}
],
"symlink_target": ""
} |
import mwapi
import mwapi.errors
my_agent = 'mwapi demo script <ahalfaker@wikimedia.org>'
session = mwapi.Session('https://10.11.12.13', user_agent=my_agent,
timeout=0.5)
print("Making a request that should hang for 0.5 seconds and then timeout.")
try:
session.get(action="fake")
except mwapi.errors.TimeoutError as e:
print(e.__class__.__name__, str(e))
| {
"content_hash": "af31ce646fcb6f1df855ca78f7a389c6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 76,
"avg_line_length": 32.5,
"alnum_prop": 0.6692307692307692,
"repo_name": "wikimedia/operations-debs-python-mwapi",
"id": "93004312d24cef9c880ac00eb4f8d0525dde02f3",
"size": "390",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "demo_timeout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13954"
}
],
"symlink_target": ""
} |
from .boolalg import (to_cnf, to_dnf, And, Or, Not, Xor, Nand, Nor, Implies,
Equivalent, ITE, POSform, SOPform, simplify_logic,
bool_equal, bool_map, true, false)
from .inference import satisfiable
| {
"content_hash": "dd0fbdb353d57581be16e2002ae81bed",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 76,
"avg_line_length": 51.5,
"alnum_prop": 0.7135922330097088,
"repo_name": "hrashk/sympy",
"id": "f849c03b8a54b7c000de16a3aef0021ad434e89f",
"size": "206",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sympy/logic/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13971941"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "1300"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.base.build_environment import get_buildroot
from pants_test.backend.python.pants_requirement_integration_test_base import \
PantsRequirementIntegrationTestBase
class PantsRequirementIntegrationTest(PantsRequirementIntegrationTestBase):
"""A pants plugin should be able to depend on a pants_requirement() alone to
declare its dependencies on pants modules. This plugin, when added to the
pythonpath and backend_packages, should be able to declare new BUILD file
objects."""
def run_with_testproject_backend_pkgs(self, cmd):
testproject_backend_src_dir = os.path.join(
get_buildroot(), 'testprojects/pants-plugins/src/python')
testproject_backend_pkg_name = 'test_pants_plugin'
pants_req_addr = 'testprojects/pants-plugins/3rdparty/python/pants'
pants_test_infra_addr = 'tests/python/pants_test:test_infra'
pre_cmd_args = [
"--pythonpath=+['{}']".format(testproject_backend_src_dir),
"--backend-packages=+['{}']".format(testproject_backend_pkg_name),
"--pants-test-infra-pants-requirement-target={}".format(pants_req_addr),
"--pants-test-infra-pants-test-infra-target={}".format(pants_test_infra_addr),
]
command = pre_cmd_args + cmd
return self.run_pants(command=command)
def test_pants_requirement(self):
self.maxDiff = None
with self.create_unstable_pants_distribution() as repo:
tests_dir = 'testprojects/pants-plugins/tests/python/test_pants_plugin'
with self.file_renamed(os.path.join(get_buildroot(), tests_dir), 'TEST_BUILD', 'BUILD'):
test_pants_requirement_cmd = ['--python-repos-repos={}'.format(repo),
'test',
tests_dir]
pants_run = self.run_with_testproject_backend_pkgs(test_pants_requirement_cmd)
self.assert_success(pants_run)
| {
"content_hash": "46f36a7a492206202bb2317320079b52",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 94,
"avg_line_length": 48.023809523809526,
"alnum_prop": 0.6906296479920674,
"repo_name": "foursquare/pants",
"id": "c7845f56a394fbffe04aea136ae955d18c64e4c7",
"size": "2164",
"binary": false,
"copies": "3",
"ref": "refs/heads/1.7.0+fsX",
"path": "tests/python/pants_test/backend/python/test_pants_requirement_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343"
},
{
"name": "C++",
"bytes": "1138"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "3034"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1922"
},
{
"name": "HTML",
"bytes": "49126"
},
{
"name": "Java",
"bytes": "490360"
},
{
"name": "JavaScript",
"bytes": "33289"
},
{
"name": "Python",
"bytes": "5461553"
},
{
"name": "Rust",
"bytes": "443987"
},
{
"name": "Scala",
"bytes": "76065"
},
{
"name": "Shell",
"bytes": "77142"
},
{
"name": "Starlark",
"bytes": "357125"
},
{
"name": "Thrift",
"bytes": "3365"
}
],
"symlink_target": ""
} |
from office365.runtime.client_result import ClientResult
from office365.runtime.client_value_collection import ClientValueCollection
from office365.runtime.queries.create_entity import CreateEntityQuery
from office365.runtime.queries.service_operation import ServiceOperationQuery
from office365.runtime.paths.service_operation import ServiceOperationPath
from office365.sharepoint.base_entity_collection import BaseEntityCollection
from office365.sharepoint.principal.group import Group
from office365.sharepoint.utilities.principal_info import PrincipalInfo
class GroupCollection(BaseEntityCollection):
"""Represents a collection of Group resources."""
def __init__(self, context, resource_path=None):
super(GroupCollection, self).__init__(context, Group, resource_path)
def expand_to_principals(self, max_count):
"""
Expands groups to a collection of principals.
:param int max_count: Specifies the maximum number of principals to be returned.
"""
return_type = ClientResult(self.context, ClientValueCollection(PrincipalInfo))
for cur_grp in self: # type: Group
return_type = cur_grp.expand_to_principals(max_count)
return return_type
def add(self, group_creation_information):
"""Creates a Group resource
:type group_creation_information: any
"""
group = Group(self.context)
self.add_child(group)
qry = CreateEntityQuery(self, group_creation_information, group)
self.context.add_query(qry)
return group
def get_by_id(self, group_id):
"""Returns the list item with the specified list item identifier.
:param str group_id: Specifies the member identifier.
"""
return Group(self.context, ServiceOperationPath("GetById", [group_id], self.resource_path))
def get_by_name(self, group_name):
"""Returns a cross-site group from the collection based on the name of the group.
:param str group_name: A string that contains the name of the group.
"""
return Group(self.context,
ServiceOperationPath("GetByName", [group_name], self.resource_path))
def remove_by_id(self, group_id):
"""Removes the group with the specified member ID from the collection.
:param str group_id: Specifies the member identifier.
"""
qry = ServiceOperationQuery(self, "RemoveById", [group_id])
self.context.add_query(qry)
return self
def remove_by_login_name(self, group_name):
"""Removes the cross-site group with the specified name from the collection.
:param str group_name: A string that contains the name of the group.
"""
qry = ServiceOperationQuery(self, "RemoveByLoginName", [group_name])
self.context.add_query(qry)
return self
| {
"content_hash": "9e99377cb320341685ec0dc3fcfd8dfe",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 99,
"avg_line_length": 41.08571428571429,
"alnum_prop": 0.6933240611961057,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "076251ab3ccb4220e67f8d7a6518ffffbc1368b7",
"size": "2876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "office365/sharepoint/principal/group_collection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
} |
"""
Created on Thu Jan 14 16:44:09 2021
@author: Mateo
This short script demonstrates how to use the module for computing
[1] Sabelhaus & Song (2010) age profiles of income volatility.
It does so by replicating the results from the original paper (Figure 6 in [1])
[1] Sabelhaus, J., & Song, J. (2010). The great moderation in micro labor
earnings. Journal of Monetary Economics, 57(4), 391-403.
"""
import matplotlib.pyplot as plt
from HARK.Calibration.Income.IncomeTools import sabelhaus_song_var_profile
import numpy as np
# Set up ages and cohorts at which we will get the variances
age_min = 27
age_max = 54
cohorts = [1940, 1965, None]
# Find volatility profiles using the module
variances = [
sabelhaus_song_var_profile(age_min=age_min, age_max=age_max, cohort=c)
for c in cohorts
]
# %% Plots
# Plot transitory shock variances
plt.figure()
for i in range(len(cohorts)):
coh_label = "aggregate" if cohorts[i] is None else cohorts[i]
plt.plot(
variances[i]["Age"],
np.power(variances[i]["TranShkStd"],2),
label="Tran. {} cohort".format(coh_label),
)
plt.legend()
# Plot permanent shock variances
plt.figure()
for i in range(len(cohorts)):
coh_label = "aggregate" if cohorts[i] is None else cohorts[i]
plt.plot(
variances[i]["Age"],
np.power(variances[i]["PermShkStd"],2),
label="Perm. {} cohort".format(coh_label),
)
plt.legend()
| {
"content_hash": "065e271e8111312746680998321d1673",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 25.607142857142858,
"alnum_prop": 0.6820083682008368,
"repo_name": "econ-ark/HARK",
"id": "0017be6c8a30f8a499fbdbeba9912d85b34473ac",
"size": "1458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/Calibration/Sabelhaus_Song_var_profiles.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "111"
},
{
"name": "Python",
"bytes": "1397750"
},
{
"name": "Shell",
"bytes": "350"
}
],
"symlink_target": ""
} |
import datetime
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
BODY_MAX_LENGTH = getattr(settings, 'SIMPLE_COMMENTS_BODY_MAX_LENGTH', 3000)
class BaseComment(models.Model):
"""Abstract base class used to create comment models.
All subclasses **must** implement a ``models.ForeignKey`` specifying the
model of the instance comments should refence. This field **must** have the
name ``target``.
The class works well with authenticated users as well as manual input of
``author_name`` and ``author_email``. If a ``User`` instance is supplied
these fields will be retrieved from that instance. Data retrieved from a
supplied user will always take precedence.
"""
user = models.ForeignKey(User, null=True, blank=True)
# Store the username here as a simple act of denormalization. Using
# convenience methods (such as get_absolute_url) on nullable foreign keys
# can be expensive. We're betting on that the id or username will be
# sufficient to reconstruct URLs in most cases.
user_username = models.CharField(max_length=30, blank=True)
author_name = models.CharField(max_length=61)
author_email = models.EmailField()
author_website = models.URLField(blank=True)
body = models.TextField(max_length=BODY_MAX_LENGTH)
pub_date = models.DateTimeField(default=datetime.datetime.now)
ip_address = models.IPAddressField(blank=True, null=True)
def __init__(self, *args, **kwargs):
"""Override to make sure that a ``ForeignKeyField`` named ``target``
exists on subclasses.
"""
super(BaseComment, self).__init__(*args, **kwargs)
try:
target_field = self._meta.get_field('target')
if not issubclass(target_field.__class__, models.ForeignKey):
raise TypeError
except (models.FieldDoesNotExist, TypeError):
raise TypeError(u"Subclasses of BaseComment must add a foreign "
u"key field named target")
def denormalize_user_instance(self):
"""Set the author name, email and username on the model if a ``User``
instance has been supplied.
"""
user = self.user
if user is not None:
if user.first_name or user.last_name:
self.author_name = user.get_full_name()
else:
# Fall back on the user's username if neither first- nor last
# names were set on the user instance.
self.author_name = user.username
self.author_email = user.email
self.user_username = user.username
def save(self, *args, **kwargs):
self.denormalize_user_instance()
super(BaseComment, self).save(*args, **kwargs)
@classmethod
def get_target_model(cls):
"""Return the model that we're referencing in the required
``ForeignKey`` field of subclasses.
"""
return cls._meta.get_field('target').rel.to
class Meta:
abstract = True
get_latest_by = 'pub_date'
| {
"content_hash": "cca8ad887001e3c1f73f38d89f3eae2d",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 38.34939759036145,
"alnum_prop": 0.6352497643732328,
"repo_name": "strange/django-simple-comments",
"id": "78b1ec80df202c41d2123ec8e49e4e5de9dc7f35",
"size": "3183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_comments/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33874"
}
],
"symlink_target": ""
} |
import brian_no_units
import cPickle
from brian import *
from time import time
import os
#Regular stimulation at cerebellum doesn't mean that thalamocortical synapses are firing regularly
#Perhaps irregular stimulation two synapses away leads to regular stimulation
from optparse import OptionParser
from numpy import random
op = OptionParser()
op.add_option('--rate', dest='rate', type='int',
help='Average firing rate of input stimuli')
op.add_option('--rhythm',dest='rhythm',type='str',
help='REGULAR for tonic firing, IRREGULAR for phasic firing')
op.add_option('--duration',dest='duration',type='int',
help='duration of recording in seconds')
op.print_help()
opts,args = op.parse_args()
if len(args) > 0:
op.error('This script only takes arguments preceded by command line options.')
sys.exit(1)
defaultclock.dt = 1*ms
N = 1000
taum = 10 * ms
tau_pre = 20 * ms
tau_post = tau_pre
Ee = 0 * mV
vt = -54 * mV
vr = -60 * mV
El = -74 * mV
taue = 5 * ms
F = 15 * Hz
gmax = .05
dA_pre = .01
dA_post = -dA_pre * tau_pre / tau_post * 1.05
dA_post *= gmax
dA_pre *= gmax
eqs_neurons = '''
dv/dt=(ge*(Ee-vr)+El-v)/taum : volt # the synaptic current is linearized
dge/dt=-ge/taue : 1
'''
os.system('say "starting simulation with stimulation at %d hurts and %s"'%(opts.rate,opts.rhythm))
'''
Accurate stimulation pattern
Across all frequencies
'''
def lognormal_spiketime(start,stop,avg_rate):
increments = random.lognormal(0.2,0.2,(stop-start)*opts.rate) #assumes rate is in units of Hz
return np.arange(start,stop,avg_rate) + increments
if opts.rhythm != 'IRREGULAR': #Default action if rhythm type not recognized is to make regular rhythm
spiketimes = [(i,t) for i in xrange(N) for t in np.arange(0,opts.duration,1./opts.rate)]
else:
spiketimes = [(i,t) for i in xrange(N) for t in lognormal_spiketime(0,opts.duration,1./opts.rate)]
input = SpikeGeneratorGroup(N,spiketimes)
neurons = NeuronGroup(1, model=eqs_neurons, threshold=vt, reset=vr)
S = Synapses(input, neurons,
model='''w:1
A_pre:1
A_post:1''',
pre='''ge+=w
A_pre=A_pre*exp((lastupdate-t)/tau_pre)+dA_pre
A_post=A_post*exp((lastupdate-t)/tau_post)
w=clip(w+A_post,0,gmax)''',
post='''
A_pre=A_pre*exp((lastupdate-t)/tau_pre)
A_post=A_post*exp((lastupdate-t)/tau_post)+dA_post
w=clip(w+A_pre,0,gmax)''')
neurons.v = vr
S[:,:]=True
S.w='rand()*gmax'
rate = PopulationRateMonitor(neurons)
M = StateMonitor(S,'w',record=True,clock=Clock(dt=100*ms)) # monitors synapses number 0 and 1
start_time = time()
run(opts.duration * second, report='text')
cPickle.dump({'synapses':M.values,'postsynaptic_rates':rate.rate,'postsynaptic_rate_smoothed':rate.smooth_rate(100*ms),
'postsynaptic_times':rate.times/second},open('../results/output-%s-%s.pkl'%(str(opts.rate),opts.rhythm),'wb'))
os.system('say "weightmonitor is finished" ') | {
"content_hash": "f72eda311cb5eea5f6239c53bba920fd",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 120,
"avg_line_length": 34.02272727272727,
"alnum_prop": 0.6653306613226453,
"repo_name": "mac389/deep-brain-stimulation",
"id": "c608930e36371620c61a60d3f0dc3b1612674af5",
"size": "2994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/weightmonitor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47298"
},
{
"name": "R",
"bytes": "240"
},
{
"name": "Shell",
"bytes": "242"
}
],
"symlink_target": ""
} |
import datetime
import pytz
from dateutil import relativedelta
UTC_TZ = pytz.timezone('UTC')
EASTERN_TZ = pytz.timezone('US/Eastern')
# Timezones are way harder than one would imagine.
# from betterself.utils import date_utils
def get_datetime_in_eastern_timezone(year, month, day, hour, minute, second=0):
""" Don't judge me, I use this a lot for debugging my timezone thoughts """
eastern_datetime = datetime.datetime(year, month, day, hour, minute, second, tzinfo=EASTERN_TZ)
return eastern_datetime
def get_current_utc_time_and_tz():
"""
For the love of god, I can never remember if this includes the timezone or not
This includes it.
"""
return datetime.datetime.now(pytz.UTC)
def get_current_usertime(user):
return datetime.datetime.now(user.pytz_timezone)
def get_current_userdate(user):
return get_current_usertime(user).date()
def days_ago_from_current_day(days):
now = datetime.datetime.utcnow()
# make sure the timezone is added to the datetime, otherwise many warnings
now = UTC_TZ.localize(now)
date_days_ago = now - relativedelta.relativedelta(days=days)
return date_days_ago
def get_current_date_months_ago(months):
today = datetime.date.today()
return today - relativedelta.relativedelta(months=months)
def get_current_date_years_ago(years):
today = datetime.date.today()
return today - relativedelta.relativedelta(years=years)
def get_current_date_days_ago(days_ago):
today = datetime.date.today()
return today - relativedelta.relativedelta(days=days_ago)
def get_midnight_datetime_from_date_parameter(user, date):
# for a date, transform it into a datetime object at midnight
time_serialized = datetime.datetime.combine(date, datetime.datetime.min.time())
return user.pytz_timezone.localize(time_serialized)
| {
"content_hash": "a8901fb7a03501dfda654e65e55ab962",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 99,
"avg_line_length": 30.21311475409836,
"alnum_prop": 0.7303309820944113,
"repo_name": "jeffshek/betterself",
"id": "635abf6dec14b023f54fbef717249aa83ae3fb3d",
"size": "1843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "betterself/utils/date_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "430527"
},
{
"name": "HTML",
"bytes": "26382"
},
{
"name": "JavaScript",
"bytes": "232349"
},
{
"name": "Python",
"bytes": "525014"
},
{
"name": "Shell",
"bytes": "6298"
}
],
"symlink_target": ""
} |
import copy
from django.core.exceptions import ValidationError
from django.forms.utils import ErrorDict, ErrorList, flatatt
from django.test import SimpleTestCase
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy
class FormsUtilsTestCase(SimpleTestCase):
# Tests for forms/utils.py module.
def test_flatatt(self):
###########
# flatatt #
###########
self.assertEqual(flatatt({'id': "header"}), ' id="header"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this"}), ' class="news" title="Read this"')
self.assertEqual(
flatatt({'class': "news", 'title': "Read this", 'required': "required"}),
' class="news" required="required" title="Read this"'
)
self.assertEqual(
flatatt({'class': "news", 'title': "Read this", 'required': True}),
' class="news" title="Read this" required'
)
self.assertEqual(
flatatt({'class': "news", 'title': "Read this", 'required': False}),
' class="news" title="Read this"'
)
self.assertEqual(flatatt({'class': None}), '')
self.assertEqual(flatatt({}), '')
def test_flatatt_no_side_effects(self):
"""
flatatt() does not modify the dict passed in.
"""
attrs = {'foo': 'bar', 'true': True, 'false': False}
attrs_copy = copy.copy(attrs)
self.assertEqual(attrs, attrs_copy)
first_run = flatatt(attrs)
self.assertEqual(attrs, attrs_copy)
self.assertEqual(first_run, ' foo="bar" true')
second_run = flatatt(attrs)
self.assertEqual(attrs, attrs_copy)
self.assertEqual(first_run, second_run)
def test_validation_error(self):
###################
# ValidationError #
###################
# Can take a string.
self.assertHTMLEqual(
str(ErrorList(ValidationError("There was an error.").messages)),
'<ul class="errorlist"><li>There was an error.</li></ul>'
)
# Can take a unicode string.
self.assertHTMLEqual(
str(ErrorList(ValidationError("Not \u03C0.").messages)),
'<ul class="errorlist"><li>Not π.</li></ul>'
)
# Can take a lazy string.
self.assertHTMLEqual(
str(ErrorList(ValidationError(ugettext_lazy("Error.")).messages)),
'<ul class="errorlist"><li>Error.</li></ul>'
)
# Can take a list.
self.assertHTMLEqual(
str(ErrorList(ValidationError(["Error one.", "Error two."]).messages)),
'<ul class="errorlist"><li>Error one.</li><li>Error two.</li></ul>'
)
# Can take a dict.
self.assertHTMLEqual(
str(ErrorList(sorted(ValidationError({'error_1': "1. Error one.", 'error_2': "2. Error two."}).messages))),
'<ul class="errorlist"><li>1. Error one.</li><li>2. Error two.</li></ul>'
)
# Can take a mixture in a list.
self.assertHTMLEqual(
str(ErrorList(sorted(ValidationError([
"1. First error.",
"2. Not \u03C0.",
ugettext_lazy("3. Error."),
{
'error_1': "4. First dict error.",
'error_2': "5. Second dict error.",
},
]).messages))),
'<ul class="errorlist">'
'<li>1. First error.</li>'
'<li>2. Not π.</li>'
'<li>3. Error.</li>'
'<li>4. First dict error.</li>'
'<li>5. Second dict error.</li>'
'</ul>'
)
class VeryBadError:
def __str__(self):
return "A very bad error."
# Can take a non-string.
self.assertHTMLEqual(
str(ErrorList(ValidationError(VeryBadError()).messages)),
'<ul class="errorlist"><li>A very bad error.</li></ul>'
)
# Escapes non-safe input but not input marked safe.
example = 'Example of link: <a href="http://www.example.com/">example</a>'
self.assertHTMLEqual(
str(ErrorList([example])),
'<ul class="errorlist"><li>Example of link: '
'<a href="http://www.example.com/">example</a></li></ul>'
)
self.assertHTMLEqual(
str(ErrorList([mark_safe(example)])),
'<ul class="errorlist"><li>Example of link: '
'<a href="http://www.example.com/">example</a></li></ul>'
)
self.assertHTMLEqual(
str(ErrorDict({'name': example})),
'<ul class="errorlist"><li>nameExample of link: '
'<a href="http://www.example.com/">example</a></li></ul>'
)
self.assertHTMLEqual(
str(ErrorDict({'name': mark_safe(example)})),
'<ul class="errorlist"><li>nameExample of link: '
'<a href="http://www.example.com/">example</a></li></ul>'
)
def test_error_dict_copy(self):
e = ErrorDict()
e['__all__'] = ErrorList([
ValidationError(
message='message %(i)s',
params={'i': 1},
),
ValidationError(
message='message %(i)s',
params={'i': 2},
),
])
e_copy = copy.copy(e)
self.assertEqual(e, e_copy)
self.assertEqual(e.as_data(), e_copy.as_data())
e_deepcopy = copy.deepcopy(e)
self.assertEqual(e, e_deepcopy)
self.assertEqual(e.as_data(), e_copy.as_data())
def test_error_dict_html_safe(self):
e = ErrorDict()
e['username'] = 'Invalid username.'
self.assertTrue(hasattr(ErrorDict, '__html__'))
self.assertEqual(force_text(e), e.__html__())
def test_error_list_html_safe(self):
e = ErrorList(['Invalid username.'])
self.assertTrue(hasattr(ErrorList, '__html__'))
self.assertEqual(force_text(e), e.__html__())
| {
"content_hash": "07a3af86349b7652983d9e56fd0633f8",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 119,
"avg_line_length": 36.92168674698795,
"alnum_prop": 0.5278185674661445,
"repo_name": "twz915/django",
"id": "f52c1956377e9831bae7ac421fad02165b42516c",
"size": "6131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/forms_tests/tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55929"
},
{
"name": "HTML",
"bytes": "182880"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11852079"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import unittest.mock as mock
from unittest import TestCase
from minio import Minio
from minio.api import _DEFAULT_USER_AGENT
from .minio_mocks import MockConnection, MockResponse
class StatObject(TestCase):
def test_object_is_string(self):
client = Minio('localhost:9000')
self.assertRaises(TypeError, client.stat_object, 'hello', 1234)
def test_object_is_not_empty_string(self):
client = Minio('localhost:9000')
self.assertRaises(ValueError, client.stat_object, 'hello', ' \t \n ')
def test_stat_object_invalid_name(self):
client = Minio('localhost:9000')
self.assertRaises(ValueError, client.stat_object, 'AB#CD', 'world')
@mock.patch('urllib3.PoolManager')
def test_stat_object_works(self, mock_connection):
mock_headers = {
'content-type': 'application/octet-stream',
'last-modified': 'Fri, 26 Jun 2015 19:05:37 GMT',
'content-length': 11,
'etag': '5eb63bbbe01eeed093cb22bb8f5acdc3'
}
mock_server = MockConnection()
mock_connection.return_value = mock_server
mock_server.mock_add_request(
MockResponse('HEAD',
'https://localhost:9000/hello/world',
{'User-Agent': _DEFAULT_USER_AGENT}, 200,
response_headers=mock_headers)
)
client = Minio('localhost:9000')
client.stat_object('hello', 'world')
| {
"content_hash": "03bf8e5d0dca9334df7b360d5934fc21",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 36.825,
"alnum_prop": 0.6184657162253904,
"repo_name": "minio/minio-py",
"id": "ebf32adeed101c6df16c419efc7cf895a41cb832",
"size": "2130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/stat_object_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "603"
},
{
"name": "Python",
"bytes": "452355"
},
{
"name": "Shell",
"bytes": "1978"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('../../')
from scipy import ndimage
from proximal.utils.utils import *
from proximal.halide.halide import *
from proximal.lin_ops import *
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import cv2
############################################################
# Load image
np_img = get_test_image(512)
print('Type ', np_img.dtype, 'Shape', np_img.shape)
plt.figure()
plt.subplot(231)
imgplot = plt.imshow(np_img,
interpolation="nearest",
clim=(0.0, 255.0),
cmap='gray')
plt.title('Numpy')
# Generate transform
theta_rad = 5.0 * np.pi / 180.0
H = np.array([[np.cos(theta_rad), -np.sin(theta_rad), -128.],
[np.sin(theta_rad), np.cos(theta_rad), 0.], [0., 0., 1.]],
dtype=np.float32,
order='F')
Hinv = np.asfortranarray(np.linalg.pinv(H))
tic()
# Reference
output_ref = cv2.warpPerspective(np_img,
H,
np_img.shape[1::-1],
flags=cv2.INTER_LINEAR | cv2.WARP_INVERSE_MAP,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0.)
print('Running cv2.warpPerspective took: {0:.1f}ms'.format(toc()))
plt.subplot(232)
imgplot = plt.imshow(output_ref,
interpolation="nearest",
clim=(0.0, 255.0),
cmap='gray')
plt.title('Output from CV2')
# Test halide interface
output = np.empty(np_img.shape, order='F', dtype=np.float32)
hl = Halide('A_warp', recompile=True) # Force recompile
tic()
hl.A_warp(np_img, H, output) # Call
print('Running halide took: {0:.1f}ms'.format(toc()))
plt.subplot(233)
imgplot = plt.imshow(output,
interpolation="nearest",
clim=(0.0, 255.0),
cmap='gray')
plt.title('Output from halide')
# Error
delta = np.linalg.norm(output_ref.ravel() - output.ravel(), np.Inf)
norm = np.amax((output_ref.max(), output.max()))
print('Relative error {0}'.format(delta / norm))
############################################################################
# Check correlation
############################################################################
output_trans = np.zeros_like(np_img)
hl = Halide('At_warp', recompile=True) # Force recompile
tic()
hl.At_warp(output, Hinv, output_trans) # Call
print('Running correlation took: {0:.1f}ms'.format(toc()))
plt.subplot(236)
imgplot = plt.imshow(output_trans,
interpolation="nearest",
clim=(0.0, 255.0),
cmap='gray')
plt.title('Output trans from halide')
# Compute reference
tic()
output_ref_trans = cv2.warpPerspective(output_ref,
H,
np_img.shape[1::-1],
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0.)
print('Running cv2.warpPerspective took: {0:.1f}ms'.format(toc()))
plt.subplot(235)
plt.imshow(output_ref_trans,
interpolation="nearest",
clim=(0.0, 255.0),
cmap='gray')
plt.title('Output trans from CV2')
# Error
delta = np.linalg.norm(output_ref_trans.ravel() - output_trans.ravel(), np.Inf)
norm = np.amax((output_ref_trans.max(), output_trans.max()))
print('Relative error trans {0}'.format(delta / norm))
plt.show()
| {
"content_hash": "c17a76d9450f6c92fdfa38334eb946b6",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 79,
"avg_line_length": 30.586206896551722,
"alnum_prop": 0.5245208568207441,
"repo_name": "comp-imaging/ProxImaL",
"id": "587b8a8628c3a4451e595535c324b76e216682d8",
"size": "3559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proximal/examples/test_warp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5500"
},
{
"name": "C++",
"bytes": "135520"
},
{
"name": "Jinja",
"bytes": "3865"
},
{
"name": "Meson",
"bytes": "7090"
},
{
"name": "Python",
"bytes": "397574"
},
{
"name": "Shell",
"bytes": "689"
}
],
"symlink_target": ""
} |
from ietf.utils.test_utils import SimpleUrlTestCase
class MailingListsUrlTestCase(SimpleUrlTestCase):
def testUrls(self):
self.doTestUrls(__file__)
| {
"content_hash": "acc0046cd4a9f85b9ba85eab3cd10f8c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 51,
"avg_line_length": 27,
"alnum_prop": 0.7592592592592593,
"repo_name": "mcr/ietfdb",
"id": "043584ea079d4999bf5fa9bdb5c487a0045b2692",
"size": "1830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ietf/mailinglists/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "239198"
},
{
"name": "JavaScript",
"bytes": "450755"
},
{
"name": "Perl",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "10286676"
},
{
"name": "Ruby",
"bytes": "3468"
},
{
"name": "Shell",
"bytes": "39950"
},
{
"name": "TeX",
"bytes": "23944"
}
],
"symlink_target": ""
} |
from distutils.command.build_py import build_py
from distutils.command.build_scripts import build_scripts
from distutils.command.install_scripts import install_scripts
from distutils.command.sdist import sdist
import glob
import os.path
import sys
from setuptools import find_packages, setup
from euca2ools import __version__
REQUIREMENTS = ['lxml',
'PyYAML',
'requestbuilder>=0.3.4',
'requests',
'six>=1.4']
if sys.version_info < (2, 7):
REQUIREMENTS.append('argparse')
# Cheap hack: install symlinks separately from regular files.
# cmd.copy_tree accepts a preserve_symlinks option, but when we call
# ``setup.py install'' more than once the method fails when it encounters
# symlinks that are already there.
class build_scripts_except_symlinks(build_scripts):
'''Like build_scripts, but ignoring symlinks'''
def copy_scripts(self):
orig_scripts = self.scripts
self.scripts = [script for script in self.scripts
if not os.path.islink(script)]
build_scripts.copy_scripts(self)
self.scripts = orig_scripts
class install_scripts_and_symlinks(install_scripts):
'''Like install_scripts, but also replicating nonexistent symlinks'''
def run(self):
install_scripts.run(self)
# Replicate symlinks if they don't exist
for script in self.distribution.scripts:
if os.path.islink(script):
target = os.readlink(script)
newlink = os.path.join(self.install_dir,
os.path.basename(script))
if not os.path.exists(newlink):
os.symlink(target, newlink)
class build_py_with_git_version(build_py):
'''Like build_py, but also hardcoding the version in __init__.__version__
so it's consistent even outside of the source tree'''
def build_module(self, module, module_file, package):
build_py.build_module(self, module, module_file, package)
print module, module_file, package
if module == '__init__' and '.' not in package:
version_line = "__version__ = '{0}'\n".format(__version__)
old_init_name = self.get_module_outfile(self.build_lib, (package,),
module)
new_init_name = old_init_name + '.new'
with open(new_init_name, 'w') as new_init:
with open(old_init_name) as old_init:
for line in old_init:
if line.startswith('__version__ ='):
new_init.write(version_line)
else:
new_init.write(line)
new_init.flush()
os.rename(new_init_name, old_init_name)
class sdist_with_git_version(sdist):
'''Like sdist, but also hardcoding the version in __init__.__version__ so
it's consistent even outside of the source tree'''
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
version_line = "__version__ = '{0}'\n".format(__version__)
old_init_name = os.path.join(base_dir, 'euca2ools/__init__.py')
new_init_name = old_init_name + '.new'
with open(new_init_name, 'w') as new_init:
with open(old_init_name) as old_init:
for line in old_init:
if line.startswith('__version__ ='):
new_init.write(version_line)
else:
new_init.write(line)
new_init.flush()
os.rename(new_init_name, old_init_name)
setup(name="euca2ools",
version=__version__,
description="Eucalyptus Command Line Tools",
long_description="Eucalyptus Command Line Tools",
author="Eucalyptus Systems, Inc.",
author_email="support@eucalyptus.com",
url="http://www.eucalyptus.com",
scripts=sum((glob.glob('bin/euare-*'),
glob.glob('bin/euca-*'),
glob.glob('bin/euform-*'),
glob.glob('bin/euimage-*'),
glob.glob('bin/eulb-*'),
glob.glob('bin/euscale-*'),
glob.glob('bin/euwatch-*')),
[]),
data_files=[('share/man/man1', glob.glob('man/*.1')),
('share/man/man5', glob.glob('man/*.5')),
('share/man/man7', glob.glob('man/*.7'))],
packages=find_packages(),
install_requires=REQUIREMENTS,
license='BSD (Simplified)',
platforms='Posix; MacOS X',
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Simplified BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet'],
cmdclass={'build_py': build_py_with_git_version,
'build_scripts': build_scripts_except_symlinks,
'install_scripts': install_scripts_and_symlinks,
'sdist': sdist_with_git_version})
| {
"content_hash": "e2d4365b421852ecbfd1ad605ad12005",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 79,
"avg_line_length": 42.00769230769231,
"alnum_prop": 0.561435634499176,
"repo_name": "jhajek/euca2ools",
"id": "f4af8b5c8b6acd7db904591c3cd74df3d645f358",
"size": "6808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1230266"
},
{
"name": "Shell",
"bytes": "872"
}
],
"symlink_target": ""
} |
class Student(object):
"""docstring for Student"""
def __init__(self, name, score): # 第一个参数永远是self 表示创建的实例本身
super(Student, self).__init__()
self.__name = name
self.__score = score
def print_score(self):
print('%s: %s' % (self.__name, self.__score))
def get_grade(self):
if self.__score >= 90:
return 'A'
elif self.__score >= 60:
return 'B'
else:
return 'C'
def set_name(self, name):
self.__name = name
def set_score(self, score):
if 0 <= score <= 100:
self.__score = score
else:
raise ValueError('error score')
def get_name(self):
return self.__name
def get_score(self):
return self.__score
emily = Student('Emily', 98)
emily.print_score()
print(emily.get_grade())
emily.set_name('Henry')
emily.set_score(99)
print(emily.get_name(), emily.get_score()) | {
"content_hash": "e64abc5a9e98648f2b8c6961791952cc",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 59,
"avg_line_length": 19.73170731707317,
"alnum_prop": 0.6291718170580964,
"repo_name": "henryneu/Python",
"id": "46c26be0ae5a18a68c3f2da2581d9502b21b8399",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample/clas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "115"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='level',
field=models.CharField(choices=[('intern', 'Intern'), ('standard', 'Standard'), ('senior', 'Senior'), ('experienced', 'Experienced'), ('exceptional', 'Exceptional')], default='standard', max_length=15),
),
]
| {
"content_hash": "a12d1d33a27c0b43b3850d365da6af6f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 214,
"avg_line_length": 30.5,
"alnum_prop": 0.5881147540983607,
"repo_name": "groundupnews/gu",
"id": "e964a707eefd674c9243809345c9a70c11259e93",
"size": "537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "payment/migrations/0002_invoice_level.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "222991"
},
{
"name": "HTML",
"bytes": "563742"
},
{
"name": "JavaScript",
"bytes": "790912"
},
{
"name": "PHP",
"bytes": "2275"
},
{
"name": "Python",
"bytes": "598998"
},
{
"name": "Roff",
"bytes": "888"
},
{
"name": "Shell",
"bytes": "803"
},
{
"name": "XSLT",
"bytes": "870"
}
],
"symlink_target": ""
} |
from django.test import SimpleTestCase
from cfp import search
class SearchTest(SimpleTestCase):
def test_single_quotes(self):
self.assertEquals(['foo', 'foo bar', 'baz'],
search.tokenize_query("'foo' 'foo bar' baz"))
def test_double_quotes(self):
self.assertEquals(['foo bar', 'baz'],
search.tokenize_query('"foo bar" baz'))
def test_whitespace(self):
self.assertEquals(['hello'], search.tokenize_query(" hello "))
def test_colons(self):
self.assertEquals(['foo', 'loc:us', 'topic:python'],
search.tokenize_query("foo loc:us topic:python"))
def test_filters(self):
self.assertEquals([['location', 'us'], ['topic', 'python']],
search.filters("foo location:us topic:python"))
| {
"content_hash": "538e14c2bab78d904607ba7fd7e51822",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.5754716981132075,
"repo_name": "kyleconroy/speakers",
"id": "3c34ccce76be35e17edfb4337586e0c2624f1c7d",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cfp/tests/test_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "66276"
},
{
"name": "HTML",
"bytes": "85548"
},
{
"name": "JavaScript",
"bytes": "4576"
},
{
"name": "Makefile",
"bytes": "262"
},
{
"name": "Python",
"bytes": "137640"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse # noqa
from django.template import defaultfilters as filters
from django.utils.http import urlencode # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import tables
from openstack_dashboard import api
class DeleteFlavor(tables.DeleteAction):
data_type_singular = _("Flavor")
data_type_plural = _("Flavors")
def delete(self, request, obj_id):
api.nova.flavor_delete(request, obj_id)
class CreateFlavor(tables.LinkAction):
name = "create"
verbose_name = _("Create Flavor")
url = "horizon:admin:flavors:create"
classes = ("ajax-modal", "btn-create")
class UpdateFlavor(tables.LinkAction):
name = "update"
verbose_name = _("Edit Flavor")
url = "horizon:admin:flavors:update"
classes = ("ajax-modal", "btn-edit")
class ViewFlavorExtras(tables.LinkAction):
name = "extras"
verbose_name = _("View Extra Specs")
url = "horizon:admin:flavors:extras:index"
classes = ("btn-edit",)
class ModifyAccess(tables.LinkAction):
name = "projects"
verbose_name = _("Modify Access")
url = "horizon:admin:flavors:update"
classes = ("ajax-modal", "btn-edit")
def get_link_url(self, flavor):
step = 'update_flavor_access'
base_url = reverse(self.url, args=[flavor.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class FlavorFilterAction(tables.FilterAction):
def filter(self, table, flavors, filter_string):
""" Really naive case-insensitive search. """
q = filter_string.lower()
def comp(flavor):
return q in flavor.name.lower()
return filter(comp, flavors)
def get_size(flavor):
return _("%sMB") % flavor.ram
def get_swap_size(flavor):
return _("%sMB") % (flavor.swap or 0)
class FlavorsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Flavor Name'))
vcpus = tables.Column('vcpus', verbose_name=_('VCPUs'))
ram = tables.Column(get_size,
verbose_name=_('RAM'),
attrs={'data-type': 'size'})
disk = tables.Column('disk', verbose_name=_('Root Disk'))
ephemeral = tables.Column('OS-FLV-EXT-DATA:ephemeral',
verbose_name=_('Ephemeral Disk'))
swap = tables.Column(get_swap_size,
verbose_name=_('Swap Disk'),
attrs={'data-type': 'size'})
flavor_id = tables.Column('id', verbose_name=_('ID'))
public = tables.Column("is_public",
verbose_name=_("Public"),
empty_value=False,
filters=(filters.yesno, filters.capfirst))
class Meta:
name = "flavors"
verbose_name = _("Flavors")
table_actions = (FlavorFilterAction, CreateFlavor, DeleteFlavor)
row_actions = (UpdateFlavor,
ModifyAccess,
ViewFlavorExtras,
DeleteFlavor)
| {
"content_hash": "6da10386e83d1610eb9117ab6af129bc",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 72,
"avg_line_length": 31.52577319587629,
"alnum_prop": 0.6007194244604317,
"repo_name": "Havate/havate-openstack",
"id": "557ad57919732065e1fea5715d67eddb9bb13d67",
"size": "3867",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/admin/flavors/tables.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "407618"
},
{
"name": "HTML",
"bytes": "507406"
},
{
"name": "JavaScript",
"bytes": "25322"
},
{
"name": "Makefile",
"bytes": "6165"
},
{
"name": "Python",
"bytes": "21665856"
},
{
"name": "Shell",
"bytes": "62617"
}
],
"symlink_target": ""
} |
import pygtk
pygtk.require("2.0")
import gtk, gobject
from utils.Form import FormBuilder
from utils.Busqueda import BusquedaWindow
from utils.Database import DataModel
from ImprimirTicket import Ticket
from decimal import *
import datetime
import re
class VentasFactory(gtk.Frame):
__id_venta = 0
def __init__(self, main):
super(VentasFactory, self).__init__()
self.main = main
self.builder = gtk.Builder()
self.builder.add_from_file("ventas_frame.glade")
self.builder.connect_signals(self)
content = self.builder.get_object("vbox_content")
content.reparent(self)
content.show()
self.form_builder = FormBuilder(self.builder, 'Producto')
self.producto_model = self.form_builder.get_model()
self.venta_model = DataModel('Venta')
self.venta_detalle_model = DataModel('VentaDetalle')
self.ventas_grid = self.builder.get_object('ventas_grid')
self._load_ventas_grid()
self.form_builder.load_widget_value('fecha_hora_label',
datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
self.idevent = gobject.timeout_add(1000, self._update_fecha_hora)
def on_buscar_producto_button_clicked(self, widget):
if self.__id_venta != 0:
self._show_error_message('Esta visualizando el detalle de una venta realizada, de click en el boton nuevo para realizar una nueva venta')
return
busqueda = BusquedaWindow('Producto', self._load_producto,
search_fields={'id': 'match', 'nombre': 'like'},
display_fields=['id', 'nombre', 'precio_venta', 'existencia'])
def on_buscar_button_clicked(self, widget):
busqueda = BusquedaWindow('Venta', self._load_venta,
search_fields={'id': 'match', 'fecha_sistema': 'match'},
display_fields=['id', 'fecha_sistema', 'sub_total', 'impuesto', 'total'])
def on_agregar_button_clicked(self, widget):
if self.__id_venta != 0:
self._show_error_message('Esta visualizando el detalle de una venta realizada, de click en el boton nuevo para realizar una nueva venta')
return
id_producto = self.form_builder.get_widget_value('id_producto')
if id_producto == '' or id_producto == '0':
self._show_error_message('Escriba un código de producto valido')
return
nombre_producto = self.form_builder.get_widget_value('nombre_producto')
if nombre_producto.strip() == '':
self._show_error_message('Escriba una descripción del producto')
return
precio_producto = self.form_builder.get_widget_value('precio_producto')
try:
precio_producto = Decimal(precio_producto)
if precio_producto < 0:
raise NameError('Precio no valido')
except:
self._show_error_message('Escriba un precio valido')
return
cantidad_producto = self.form_builder.get_widget_value('cantidad_producto')
try:
cantidad_producto = int(cantidad_producto)
if cantidad_producto == 0:
raise NameError('Cantidad no valida')
except:
self._show_error_message('Escriba una cantidad valida')
return
append = True
producto = self.producto_model.get_record(id_producto)
if producto:
if producto['existencia'] < cantidad_producto:
self._show_error_message('No existe la cantidad necesaria del producto en inventario')
return
for row in self.ventas_grid_model:
if row[0] == id_producto:
row[2] = int(row[2]) + cantidad_producto
row[4] = str(round(int(row[2]) * Decimal(row[3]), 2))
append = False
break
if append:
self.ventas_grid_model.append([str(id_producto), nombre_producto,
str(cantidad_producto), str(round(precio_producto, 2)),
str(round(cantidad_producto * precio_producto, 2))])
self._clear_producto(True)
self._ensure_ventas_grid()
self._calcular_totales()
gobject.source_remove(self.idevent)
else:
self._show_error_message('El codigo de producto no existe')
def on_eliminar_butto_clicked(self, widget):
if self.__id_venta != 0:
self._show_error_message('Esta visualizando el detalle de una venta realizada, de click en el boton nuevo para realizar una nueva venta')
return
selection = self.ventas_grid.get_selection()
model, treeiter = selection.get_selection_selected()
if treeiter:
del model[treeiter]
self._ensure_ventas_grid()
self._calcular_totales()
def on_iva_check_toggled(self, widget):
self._calcular_totales()
def _load_producto(self, value):
self.form_builder.load_widget_value('id_producto', value)
def id_producto_key_release_event_cb(self, widget, ev, data=None):
if ev.keyval == 65293:
producto_id = widget.get_text()
producto = self.producto_model.get_record(producto_id)
self._clear_producto()
if producto:
self._load_producto_information(producto)
def on_nuevo_button_clicked(self, widget):
self.__id_venta = 0
self._clear_producto(True)
self._clear_venta()
self.ventas_grid_model.clear()
self._ensure_ventas_grid()
self._update_fecha_hora()
def on_guardar_button_clicked(self, widget, upsert=False):
total = self.form_builder.get_widget_value('total_label').replace(',', '').strip()
cambio = 0
pago_recibido = self.form_builder.get_widget_value('pago_recibido').replace(',', '').strip()
try:
pago_recibido = float(pago_recibido)
total_numero = float(total)
if pago_recibido <= 0 or pago_recibido < total_numero:
raise NameError('Cantidad no valida')
else:
cambio = pago_recibido - total_numero
except:
self._show_error_message('La cantidad recibida de pago no es valida')
return
if self.__id_venta == 0:
if self._show_save_continue() != -9:
fecha_hora = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
fecha_sistema = datetime.datetime.now().strftime("%Y/%m/%d")
sub_total = self.form_builder.get_widget_value('subtotal_label').replace(',', '')
impuesto = self.form_builder.get_widget_value('iva_label').replace(',', '')
id_venta = self.venta_model.create_record({'fecha_hora': fecha_hora, 'sub_total': sub_total,
'impuesto': impuesto, 'total': total, 'fecha_sistema': fecha_sistema,
'pago_recibido': str(pago_recibido).strip(),
'cambio': str(cambio).strip()})
if id_venta:
for row in self.ventas_grid_model:
producto = self.producto_model.get_record(row[0])
if producto:
self.venta_detalle_model.create_record({'venta_id': str(id_venta), 'producto_id': row[0].strip(),
'producto_precio': row[3].strip(), 'producto_cantidad': row[2].strip(),
'subtotal': row[4].strip(), 'nombre': row[1].strip()})
self.producto_model.update_record({
'existencia': str(self.normalizar_existencia(producto['existencia']) - int(row[2].strip()))},
row[0].strip())
self._show_error_message('El cambio es de %s' % '{:20,.2f}'.format(float(cambio)))
if not upsert:
self._clear_producto(True)
self._clear_venta()
self.ventas_grid_model.clear()
self._ensure_ventas_grid()
self.idevent = gobject.timeout_add(1000, self._update_fecha_hora)
else:
return id_venta
else:
return 0
else:
self._show_error_message('Esta visualizando el detalle de una venta realizada, de click en el boton nuevo para realizar una nueva venta')
def on_cancelar_button_clicked(self, widget):
page = self.parent.get_current_page()
self.parent.remove_page(page)
del self.main.pages[page]
def on_imprimir_ticket_button_clicked(self, widget):
if self.__id_venta == 0:
self.__id_venta = self.on_guardar_button_clicked(widget, True)
ticket = Ticket(self.__id_venta)
if not ticket.imprimir():
self._show_error_message('No se pudo imprimir ticket, asegurese de que la venta exista')
def _calcular_totales(self):
subtotal = 0.00
impuesto = 0.00
total = 0.00
for row in self.ventas_grid_model:
precio = Decimal(row[3]) * int(row[2])
subtotal += round(precio, 2)
if self.form_builder.get_widget_value('iva_check'):
impuesto = round(subtotal * 0.16, 2)
self.form_builder.load_widget_value('iva_label', '{:20,.2f}'.format(impuesto))
else:
self.form_builder.load_widget_value('iva_label', '0.00')
total = round(subtotal + impuesto, 2)
self.form_builder.load_widget_value('subtotal_label', '{:20,.2f}'.format(subtotal))
self.form_builder.load_widget_value('total_label', '{:20,.2f}'.format(total))
def _load_ventas_grid(self):
self.ventas_grid_model = gtk.ListStore(str, str, str, str, str)
count = 0
columns = ['Id Producto', 'Producto', 'Cantidad', 'Precio', 'Subtotal']
sizes = [50, 200, 60, 100, 100]
for column in columns:
render = gtk.CellRendererText()
column_instance = gtk.TreeViewColumn(column, render, text=count)
column_instance.set_min_width(sizes[count])
self.ventas_grid.append_column(column_instance)
count += 1
self._ensure_ventas_grid()
def _ensure_ventas_grid(self):
self.ventas_grid.set_model(self.ventas_grid_model)
self.ventas_grid.show()
def _load_producto_information(self, row):
self.form_builder.load_widget_value('nombre_producto', row['nombre'])
self.form_builder.load_widget_value('precio_producto', row['precio_venta'])
self.form_builder.load_widget_value('cantidad_producto', '0')
self.builder.get_object('cantidad_producto').grab_focus()
def _clear_producto(self, clear_id=False):
if clear_id:
self.form_builder.load_widget_value('id_producto', '')
self.form_builder.load_widget_value('nombre_producto', '')
self.form_builder.load_widget_value('precio_producto', '0.00')
self.form_builder.load_widget_value('cantidad_producto', '0')
def _clear_venta(self):
self.form_builder.load_widget_value('subtotal_label', '0.00')
self.form_builder.load_widget_value('total_label', '0.00')
self.form_builder.load_widget_value('iva_label', '0.00')
self.form_builder.load_widget_value('pago_recibido', '0.00')
def _show_error_message(self, message):
dialog = gtk.MessageDialog(self.parent.parent.parent, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, message)
dialog.run()
dialog.destroy()
def _show_save_continue(self):
dialog = gtk.MessageDialog(self.parent.parent.parent, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, '¿Desea guardad la venta actual?')
response = dialog.run()
dialog.destroy()
return response
def _update_fecha_hora(self):
self.form_builder.load_widget_value('fecha_hora_label',
datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
self.idevent = gobject.timeout_add(1000, self._update_fecha_hora)
def _load_venta(self, value):
venta = self.venta_model.get_record(int(value))
if venta:
self._clear_producto(True)
self._clear_venta()
self.ventas_grid_model.clear()
self._ensure_ventas_grid()
self.__id_venta = venta['id']
self.form_builder.load_widget_value('subtotal_label', '{:20,.2f}'.format(float(venta['sub_total'])))
self.form_builder.load_widget_value('iva_label', '{:20,.2f}'.format(float(venta['impuesto'])))
self.form_builder.load_widget_value('total_label', '{:20,.2f}'.format(float(venta['total'])))
self.form_builder.load_widget_value('pago_recibido', '{:20,.2f}'.format(float(venta['pago_recibido'])))
gobject.source_remove(self.idevent)
self.form_builder.load_widget_value('fecha_hora_label', self._parse_fecha(str(venta['fecha_hora'])))
items = self.venta_detalle_model.get_records(venta_id=venta['id'])
if len(items) > 0:
for item in items:
self.ventas_grid_model.append([str(item['id']), item['nombre'].strip(),
str(item['producto_cantidad']).strip(), str(round(item['producto_precio'], 2)).strip(),
str(round(item['producto_cantidad'] * item['producto_precio'], 2)).strip()])
else:
self._show_error_message('La venta seleccionada no existe')
def _parse_fecha(self, fecha):
year = fecha[:4]
month = fecha[4:6]
day = fecha[6:8]
hour = fecha[8:10]
minutes = fecha[10:12]
seconds = fecha[12:14]
return "%s/%s/%s %s:%s:%s" % (year, month, day, hour, minutes, seconds)
def normalizar_existencia(self, existencia):
try:
existencia = int(existencia)
except:
if isinstance(existencia, str):
number = re.match(r"\d", existencia)
if number:
existencia = number.group(0)
else:
existencia = 0
else:
existencia = 0
return existencia | {
"content_hash": "82093e41a0e40189ab181801f84eb6fd",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 149,
"avg_line_length": 46.66233766233766,
"alnum_prop": 0.5816866128583357,
"repo_name": "hey-mx/ventas_pygtk",
"id": "688bb3733889ed8d4c3f478e013c5b65d61918ec",
"size": "14399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ventas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55092"
}
],
"symlink_target": ""
} |
__author__ = 'Orthanc Minas'
| {
"content_hash": "de3e26284365d6057a3935c8a472ac00",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 28,
"avg_line_length": 29,
"alnum_prop": 0.6206896551724138,
"repo_name": "orthancminas/Company-Symulator",
"id": "91935bcf5b0ed9ea5cf4be77b6998ba860a7640b",
"size": "29",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/com/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1055"
}
],
"symlink_target": ""
} |
from . import fake
FakeObject = fake.Fake
| {
"content_hash": "ba7d6d6a7e320eff68b9a394a775430f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 22,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.7441860465116279,
"repo_name": "haarcuba/testix",
"id": "8a9730aa7dc527f806e026d98410dc4620866f21",
"size": "43",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testix/fakeobject.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46054"
},
{
"name": "Ruby",
"bytes": "2247"
},
{
"name": "Shell",
"bytes": "450"
},
{
"name": "Vim Script",
"bytes": "76189"
}
],
"symlink_target": ""
} |
import sys
import re
import socket
from amonagent.modules.core import (
get_uptime,
get_memory_info,
get_cpu_utilization,
get_load_average,
get_network_traffic,
get_ip_address,
get_cpu_info,
disk_check
)
class TestSystemCheck(object):
def test_uptime(self):
uptime = get_uptime()
assert isinstance(uptime, str)
def test_ip_address(self):
ip_address = get_ip_address()
valid_ip = False
try:
socket.inet_pton(socket.AF_INET, ip_address)
valid_ip = True
except AttributeError: # no inet_pton here, sorry
try:
socket.inet_aton(ip_address)
valid_ip = True
except socket.error:
pass
except socket.error: # not a valid address
pass
assert valid_ip
def test_memory(self):
memory_dict = get_memory_info()
assert 'free_mb' in memory_dict
assert 'total_mb' in memory_dict
assert 'used_mb' in memory_dict
assert 'used_percent' in memory_dict
assert 'swap_free_mb' in memory_dict
assert 'swap_used_mb' in memory_dict
assert 'swap_used_percent' in memory_dict
assert 'swap_total_mb' in memory_dict
assert 'used_percent' > 0
for v in memory_dict.values():
assert isinstance(v, int)
def test_cpu_info(self):
cpu_info = get_cpu_info()
assert isinstance(cpu_info, dict)
assert len(cpu_info.keys()) > 0
def test_disk(self):
disk = disk_check.check()
for k in disk:
_dict = disk[k]
assert 'used' in _dict
assert 'percent' in _dict
assert 'free' in _dict
assert 'volume' in _dict
assert 'total' in _dict
def test_cpu(self):
cpu = get_cpu_utilization()
assert 'idle' in cpu
assert 'user' in cpu
assert 'system' in cpu
for v in cpu.values():
# Could be 1.10 - 4, 10.10 - 5, 100.00 - 6
assert len(v) == 4 or len(v) == 5 or len(v) == 6
value_regex = re.compile(r'\d+[\.]\d+')
assert re.match(value_regex, v)
def test_loadavg(self):
loadavg = get_load_average()
assert 'minute' in loadavg
assert 'five_minutes' in loadavg
assert 'fifteen_minutes' in loadavg
assert 'cores' in loadavg
assert isinstance(loadavg['cores'], int)
assert isinstance(loadavg['minute'], str)
assert isinstance(loadavg['five_minutes'], str)
assert isinstance(loadavg['fifteen_minutes'], str)
value_regex = re.compile(r'\d+[\.]\d+')
assert re.match(value_regex, loadavg['minute'])
assert re.match(value_regex, loadavg['five_minutes'])
assert re.match(value_regex, loadavg['fifteen_minutes'])
def test_network(self):
network_data = get_network_traffic()
value_regex = re.compile(r'\d+[\.]\d+')
assert isinstance(network_data, dict)
for key, value in network_data.iteritems():
assert key not in ['lo', 'IFACE']
for k in value.keys():
assert k in ['inbound', 'outbound']
for k, v in value.items():
assert re.match(value_regex, v)
| {
"content_hash": "d7c6a3b2a7788e8bde15ae5211949cc2",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 58,
"avg_line_length": 21.519083969465647,
"alnum_prop": 0.6644200070947145,
"repo_name": "amonapp/amonagent-legacy",
"id": "6caebb7c9bc5800c6c84b35ddc4cfe50c3b814b2",
"size": "2819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/modules_core_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "4892"
},
{
"name": "Puppet",
"bytes": "330"
},
{
"name": "Python",
"bytes": "62122"
},
{
"name": "SaltStack",
"bytes": "1193"
},
{
"name": "Shell",
"bytes": "10114"
}
],
"symlink_target": ""
} |
import os
import tempfile
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
import pandas as pd
import pytest
import yaml
from pandas.util import testing as pdt
from collections import OrderedDict
from .. import yamlio
@pytest.fixture
def test_cfg():
return {
'name': 'test',
'ytransform': 'xyz',
'unordered': 'abc'
}
@pytest.fixture
def expected_yaml():
return (
'name: test{newline}{newline}'
'ytransform: xyz{newline}{newline}'
'unordered: abc{newline}'.format(newline='\n')
)
@pytest.fixture
def test_file(request):
name = tempfile.NamedTemporaryFile(suffix='.yaml').name
def cleanup():
if os.path.exists(name):
os.remove(name)
request.addfinalizer(cleanup)
return name
def test_ordered_yaml(test_cfg, expected_yaml):
test_yaml = yamlio.ordered_yaml(test_cfg)
assert test_yaml == expected_yaml
def test_convert_to_yaml_str(test_cfg, expected_yaml):
test_yaml = yamlio.convert_to_yaml(test_cfg, str_or_buffer=None)
assert test_yaml == expected_yaml
def test_convert_to_yaml_file(test_cfg, expected_yaml, test_file):
yamlio.convert_to_yaml(test_cfg, test_file)
with open(test_file) as f:
assert f.read() == expected_yaml
def test_convert_to_yaml_buffer(test_cfg, expected_yaml):
test_buffer = StringIO()
yamlio.convert_to_yaml(test_cfg, test_buffer)
assert test_buffer.getvalue() == expected_yaml
class Test_yaml_to_dict(object):
@classmethod
def setup_class(cls):
cls.yaml_str = """
a:
x: 1
y: 2
z: 3
b:
x: 3
y: 4
z: 5
"""
cls.expect_dict = {
'a': {'x': 1, 'y': 2, 'z': 3},
'b': {'x': 3, 'y': 4, 'z': 5}}
def test_str(self):
assert yamlio.yaml_to_dict(yaml_str=self.yaml_str) == self.expect_dict
def test_file(self, test_file):
with open(test_file, 'w') as f:
f.write(self.yaml_str)
assert yamlio.yaml_to_dict(str_or_buffer=test_file) == self.expect_dict
def test_buffer(self):
buff = StringIO(self.yaml_str)
buff.seek(0)
assert yamlio.yaml_to_dict(str_or_buffer=buff) == self.expect_dict
def test_raises(self):
with pytest.raises(ValueError):
yamlio.yaml_to_dict()
def assert_series_equal(a, b):
assert (a.index.values == b.index.values).all()
assert (a.values == b.values).all()
def assert_dfs_equal(a, b):
assert (a.columns == b.columns).all()
assert (a.index.values == b.index.values).all()
assert (a.values == b.values).all()
def test_series_to_yaml_safe_int_index():
s = pd.Series(np.arange(100, 103), index=np.arange(3))
d = yamlio.series_to_yaml_safe(s)
assert d == {0: 100, 1: 101, 2: 102}
y = yaml.dump(d, default_flow_style=False)
assert_series_equal(pd.Series(yaml.load(y)), s)
def test_series_to_yaml_safe_str_index():
s = pd.Series(
np.array(['a', 'b', 'c']), index=np.array(['x', 'y', 'z']))
d = yamlio.series_to_yaml_safe(s)
assert d == {'x': 'a', 'y': 'b', 'z': 'c'}
y = yaml.dump(d, default_flow_style=False)
assert_series_equal(pd.Series(yaml.load(y)), s)
def test_frame_to_yaml_safe():
df = pd.DataFrame(
{'col1': np.array([100, 200, 300]),
'col2': np.array(['a', 'b', 'c'])},
index=np.arange(3))
d = yamlio.frame_to_yaml_safe(df)
assert d == {'col1': {0: 100, 1: 200, 2: 300},
'col2': {0: 'a', 1: 'b', 2: 'c'}}
y = yaml.dump(d, default_flow_style=False)
assert_dfs_equal(pd.DataFrame(yaml.load(y)), df)
def test_ordered_dict():
inner_dict = OrderedDict()
inner_dict['z'] = 'had'
inner_dict['a'] = 'a'
inner_dict['f'] = 'little'
outer_dict = OrderedDict()
outer_dict[10] = 'marry'
outer_dict['inner'] = inner_dict
outer_dict['a'] = 'lamb'
y = yamlio.convert_to_yaml(outer_dict, None)
d = yamlio.yaml_to_dict(y, ordered=True)
assert outer_dict == d
def test_ordered_series_to_yaml_safe():
s = pd.Series(np.arange(3), index=list('zxy'))
od = yamlio.series_to_yaml_safe(s, True)
y = yamlio.convert_to_yaml(od, None)
new_od = yamlio.yaml_to_dict(y, ordered=True)
new_s = pd.Series(new_od)
assert_series_equal(s, new_s)
def test_ordered_frame_to_yaml_safe():
# data frame to test with
df = pd.DataFrame(
OrderedDict([
('z', np.arange(0, 5)),
('y', np.arange(5, 10)),
('x', list('abcde'))
]),
index=pd.Index(np.arange(20, 15, -1))
)
# send to yaml
od = yamlio.frame_to_yaml_safe(df, True)
y = yamlio.convert_to_yaml(od, None)
# load from yaml
new_od = yamlio.yaml_to_dict(y, ordered=True)
new_df = pd.DataFrame.from_dict(new_od, orient='index').reindex(new_od.keys()).T
assert_dfs_equal(df, new_df)
| {
"content_hash": "265a5a6294c6522b36442cd24fd59264",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 84,
"avg_line_length": 24.748743718592966,
"alnum_prop": 0.5955329949238579,
"repo_name": "apdjustino/urbansim",
"id": "d5804bed5248820eecf47f0645cda3d77003fcc7",
"size": "4925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "urbansim/utils/tests/test_yamlio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "27290"
},
{
"name": "Python",
"bytes": "365979"
},
{
"name": "R",
"bytes": "1763"
},
{
"name": "Shell",
"bytes": "2072"
}
],
"symlink_target": ""
} |
import re
__all__ = ['NormalizedVersion', 'suggest_normalized_version',
'VersionPredicate', 'is_valid_version', 'is_valid_versions',
'is_valid_predicate']
# A marker used in the second and third parts of the `parts` tuple, for
# versions that don't have those segments, to sort properly. An example
# of versions in sort order ('highest' last):
# 1.0b1 ((1,0), ('b',1), ('f',))
# 1.0.dev345 ((1,0), ('f',), ('dev', 345))
# 1.0 ((1,0), ('f',), ('f',))
# 1.0.post256.dev345 ((1,0), ('f',), ('f', 'post', 256, 'dev', 345))
# 1.0.post345 ((1,0), ('f',), ('f', 'post', 345, 'f'))
# ^ ^ ^
# 'b' < 'f' ---------------------/ | |
# | |
# 'dev' < 'f' < 'post' -------------------/ |
# |
# 'dev' < 'f' ----------------------------------------------/
# Other letters would do, but 'f' for 'final' is kind of nice.
_FINAL_MARKER = ('f',)
_VERSION_RE = re.compile(r'''
^
(?P<version>\d+\.\d+) # minimum 'N.N'
(?P<extraversion>(?:\.\d+)*) # any number of extra '.N' segments
(?:
(?P<prerel>[abc]|rc) # 'a'=alpha, 'b'=beta, 'c'=release candidate
# 'rc'= alias for release candidate
(?P<prerelversion>\d+(?:\.\d+)*)
)?
(?P<postdev>(\.post(?P<post>\d+))?(\.dev(?P<dev>\d+))?)?
$''', re.VERBOSE)
class IrrationalVersionError(Exception):
"""This is an irrational version."""
pass
class HugeMajorVersionNumError(IrrationalVersionError):
"""An irrational version because the major version number is huge
(often because a year or date was used).
See `error_on_huge_major_num` option in `NormalizedVersion` for details.
This guard can be disabled by setting that option False.
"""
pass
class NormalizedVersion(object):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def __init__(self, s, error_on_huge_major_num=True):
"""Create a NormalizedVersion instance from a version string.
@param s {str} The version string.
@param error_on_huge_major_num {bool} Whether to consider an
apparent use of a year or full date as the major version number
an error. Default True. One of the observed patterns on PyPI before
the introduction of `NormalizedVersion` was version numbers like
this:
2009.01.03
20040603
2005.01
This guard is here to strongly encourage the package author to
use an alternate version, because a release deployed into PyPI
and, e.g. downstream Linux package managers, will forever remove
the possibility of using a version number like "1.0" (i.e.
where the major number is less than that huge major number).
"""
self.is_final = True # by default, consider a version as final.
self._parse(s, error_on_huge_major_num)
@classmethod
def from_parts(cls, version, prerelease=_FINAL_MARKER,
devpost=_FINAL_MARKER):
return cls(cls.parts_to_str((version, prerelease, devpost)))
def _parse(self, s, error_on_huge_major_num=True):
"""Parses a string version into parts."""
match = _VERSION_RE.search(s)
if not match:
raise IrrationalVersionError(s)
groups = match.groupdict()
parts = []
# main version
block = self._parse_numdots(groups['version'], s, False, 2)
extraversion = groups.get('extraversion')
if extraversion not in ('', None):
block += self._parse_numdots(extraversion[1:], s)
parts.append(tuple(block))
# prerelease
prerel = groups.get('prerel')
if prerel is not None:
block = [prerel]
block += self._parse_numdots(groups.get('prerelversion'), s,
pad_zeros_length=1)
parts.append(tuple(block))
self.is_final = False
else:
parts.append(_FINAL_MARKER)
# postdev
if groups.get('postdev'):
post = groups.get('post')
dev = groups.get('dev')
postdev = []
if post is not None:
postdev.extend([_FINAL_MARKER[0], 'post', int(post)])
if dev is None:
postdev.append(_FINAL_MARKER[0])
if dev is not None:
postdev.extend(['dev', int(dev)])
self.is_final = False
parts.append(tuple(postdev))
else:
parts.append(_FINAL_MARKER)
self.parts = tuple(parts)
if error_on_huge_major_num and self.parts[0][0] > 1980:
raise HugeMajorVersionNumError("huge major version number, %r, "
"which might cause future problems: %r" % (self.parts[0][0], s))
def _parse_numdots(self, s, full_ver_str, drop_trailing_zeros=True,
pad_zeros_length=0):
"""Parse 'N.N.N' sequences, return a list of ints.
@param s {str} 'N.N.N...' sequence to be parsed
@param full_ver_str {str} The full version string from which this
comes. Used for error strings.
@param drop_trailing_zeros {bool} Whether to drop trailing zeros
from the returned list. Default True.
@param pad_zeros_length {int} The length to which to pad the
returned list with zeros, if necessary. Default 0.
"""
nums = []
for n in s.split("."):
if len(n) > 1 and n[0] == '0':
raise IrrationalVersionError("cannot have leading zero in "
"version number segment: '%s' in %r" % (n, full_ver_str))
nums.append(int(n))
if drop_trailing_zeros:
while nums and nums[-1] == 0:
nums.pop()
while len(nums) < pad_zeros_length:
nums.append(0)
return nums
def __str__(self):
return self.parts_to_str(self.parts)
@classmethod
def parts_to_str(cls, parts):
"""Transforms a version expressed in tuple into its string
representation."""
# XXX This doesn't check for invalid tuples
main, prerel, postdev = parts
s = '.'.join(str(v) for v in main)
if prerel is not _FINAL_MARKER:
s += prerel[0]
s += '.'.join(str(v) for v in prerel[1:])
if postdev and postdev is not _FINAL_MARKER:
if postdev[0] == 'f':
postdev = postdev[1:]
i = 0
while i < len(postdev):
if i % 2 == 0:
s += '.'
s += str(postdev[i])
i += 1
return s
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self)
def _cannot_compare(self, other):
raise TypeError("cannot compare %s and %s"
% (type(self).__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts == other.parts
def __lt__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts < other.parts
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.parts)
def suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
NormalizedVersion(s)
return s # already rational
except IrrationalVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc|rc])[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.3.post17222
# 0.9.33-r17222 -> 0.9.3.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.3.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
NormalizedVersion(rs)
return rs # already rational
except IrrationalVersionError:
pass
return None
# A predicate is: "ProjectName (VERSION1, VERSION2, ..)
_PREDICATE = re.compile(r"(?i)^\s*(\w[\s\w-]*(?:\.\w*)*)(.*)")
_VERSIONS = re.compile(r"^\s*\((?P<versions>.*)\)\s*$|^\s*(?P<versions2>.*)\s*$")
_PLAIN_VERSIONS = re.compile(r"^\s*(.*)\s*$")
_SPLIT_CMP = re.compile(r"^\s*(<=|>=|<|>|!=|==)\s*([^\s,]+)\s*$")
def _split_predicate(predicate):
match = _SPLIT_CMP.match(predicate)
if match is None:
# probably no op, we'll use "=="
comp, version = '==', predicate
else:
comp, version = match.groups()
return comp, NormalizedVersion(version)
class VersionPredicate(object):
"""Defines a predicate: ProjectName (>ver1,ver2, ..)"""
_operators = {"<": lambda x, y: x < y,
">": lambda x, y: x > y,
"<=": lambda x, y: str(x).startswith(str(y)) or x < y,
">=": lambda x, y: str(x).startswith(str(y)) or x > y,
"==": lambda x, y: str(x).startswith(str(y)),
"!=": lambda x, y: not str(x).startswith(str(y)),
}
def __init__(self, predicate):
self._string = predicate
predicate = predicate.strip()
match = _PREDICATE.match(predicate)
if match is None:
raise ValueError('Bad predicate "%s"' % predicate)
name, predicates = match.groups()
self.name = name.strip()
self.predicates = []
if predicates is None:
return
predicates = _VERSIONS.match(predicates.strip())
if predicates is None:
return
predicates = predicates.groupdict()
if predicates['versions'] is not None:
versions = predicates['versions']
else:
versions = predicates.get('versions2')
if versions is not None:
for version in versions.split(','):
if version.strip() == '':
continue
self.predicates.append(_split_predicate(version))
def match(self, version):
"""Check if the provided version matches the predicates."""
if isinstance(version, str):
version = NormalizedVersion(version)
for operator, predicate in self.predicates:
if not self._operators[operator](version, predicate):
return False
return True
def __repr__(self):
return self._string
class _Versions(VersionPredicate):
def __init__(self, predicate):
predicate = predicate.strip()
match = _PLAIN_VERSIONS.match(predicate)
self.name = None
predicates = match.groups()[0]
self.predicates = [_split_predicate(pred.strip())
for pred in predicates.split(',')]
class _Version(VersionPredicate):
def __init__(self, predicate):
predicate = predicate.strip()
match = _PLAIN_VERSIONS.match(predicate)
self.name = None
self.predicates = _split_predicate(match.groups()[0])
def is_valid_predicate(predicate):
try:
VersionPredicate(predicate)
except (ValueError, IrrationalVersionError):
return False
else:
return True
def is_valid_versions(predicate):
try:
_Versions(predicate)
except (ValueError, IrrationalVersionError):
return False
else:
return True
def is_valid_version(predicate):
try:
_Version(predicate)
except (ValueError, IrrationalVersionError):
return False
else:
return True
def get_version_predicate(requirements):
"""Return a VersionPredicate object, from a string or an already
existing object.
"""
if isinstance(requirements, str):
requirements = VersionPredicate(requirements)
return requirements
| {
"content_hash": "6bf11213d0627bc6cd28816f9165b74c",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 81,
"avg_line_length": 35.02407002188184,
"alnum_prop": 0.5300512307884543,
"repo_name": "cournape/Bento",
"id": "36903cc2b6a7576422d2841257079cbc4000646b",
"size": "16006",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bento/private/version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7553"
},
{
"name": "C++",
"bytes": "165"
},
{
"name": "CSS",
"bytes": "5358"
},
{
"name": "FORTRAN",
"bytes": "97"
},
{
"name": "Python",
"bytes": "1325666"
},
{
"name": "Shell",
"bytes": "6042"
}
],
"symlink_target": ""
} |
import subprocess
import shutil
from time import sleep
from unittest import TestCase
from dusty.commands.upgrade import _test_dusty_binary
from dusty import constants
from ..testcases import DustyIntegrationTestCase
class TestUpgrade(DustyIntegrationTestCase):
def tearDown(self):
super(TestUpgrade, self).tearDown()
self.stop_daemon()
def run_daemon_binary(self, path='./dist/dusty'):
self.daemon_process = subprocess.Popen(args=[path, '-d', '--suppress-warnings'], stdout=subprocess.PIPE)
sleep(1)
def run_daemon_source(self):
self.daemon_process = subprocess.Popen(args=['dusty', '-d', '--suppress-warnings'], stdout=subprocess.PIPE)
sleep(1)
def stop_daemon(self):
self.daemon_process.terminate()
def recreate_dusty_binary(self):
subprocess.check_call(['./setup/create_binaries.sh'], stdout=subprocess.PIPE)
def test_upgrade_2_1(self):
self.run_daemon_binary()
version = '0.2.1'
output = self.run_command('version')
self.assertInSameLine(output, 'daemon', 'version')
output = self.run_command('upgrade {}'.format(version))
self.assertInSameLine(output, 'Downloading', version)
self.assertInSameLine(output, 'Finished upgrade', version)
sleep(2)
output = self.run_command('version', raise_on_error=False)
self.assertInSameLine(output, 'daemon', 'version', version)
self.assertInSameLine(output, 'client', 'version', constants.VERSION)
self.recreate_dusty_binary()
def test_upgrade_source_fails(self):
self.run_daemon_source()
output = self.run_command('upgrade')
self.assertTrue('It looks like you\'re running Dusty from source' in output)
self.assertBinaryVersionUnchanged()
def test_upgrade_bad_name_fails(self):
shutil.copy('dist/dusty', 'dist/python')
self.run_daemon_binary(path='./dist/python')
with self.assertRaises(self.CommandError):
self.run_command('upgrade 0.2.2')
self.assertBinaryVersionUnchanged()
def assertBinaryVersionUnchanged(self):
output = self.run_command('version')
self.assertInSameLine(output, 'daemon', 'version', constants.VERSION)
| {
"content_hash": "025c41e86ece485a57cb41f59ced4cc0",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 115,
"avg_line_length": 37.733333333333334,
"alnum_prop": 0.6731448763250883,
"repo_name": "gamechanger/dusty",
"id": "d91c0daa17f6235ab85f033a740b95f44edc6094",
"size": "2264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/upgrade_integration/upgrade_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "845"
},
{
"name": "JavaScript",
"bytes": "1675"
},
{
"name": "Python",
"bytes": "493669"
},
{
"name": "Ruby",
"bytes": "769"
},
{
"name": "Shell",
"bytes": "3875"
}
],
"symlink_target": ""
} |
class action3component(object):
def __init__(self):
self._initialized = False
def set_animplayfunc(self, animplayfunc):
self._animplay = animplayfunc
def set_soundplayfunc(self, soundplayfunc):
self._soundplay = soundplayfunc
def set_addanimfunc(self, addanimfunc):
self.add_animation = addanimfunc
def set_addsoundfunc(self, addsoundfunc):
self.add_sound = addsoundfunc
def _init(self):
if self._initialized: return
self.add_animation("swim", "splash-animation")
self.add_sound("swim", "splash.wav")
self.add_animation("crouch", "crouching")
self.add_sound("crouch", "crouch.wav")
self._initialized = True
def animplay(self, anim):
self._init()
self._animplay(anim)
def soundplay(self, sound):
self._init()
self._soundplay(sound)
| {
"content_hash": "4a5113d6018865412067d5effe29bc00",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 54,
"avg_line_length": 27.96875,
"alnum_prop": 0.6212290502793296,
"repo_name": "agoose77/hivesystem",
"id": "ece81847edd3881d9c50a64a8dc2bd25ea777bae",
"size": "895",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tutorial/layers/layer20/action3.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2491478"
},
{
"name": "Shell",
"bytes": "1164"
}
],
"symlink_target": ""
} |
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1.schema.predict.params",
manifest={
"ImageClassificationPredictionParams",
},
)
class ImageClassificationPredictionParams(proto.Message):
r"""Prediction model parameters for Image Classification.
Attributes:
confidence_threshold (float):
The Model only returns predictions with at
least this confidence score. Default value is
0.0
max_predictions (int):
The Model only returns up to that many top,
by confidence score, predictions per instance.
If this number is very high, the Model may
return fewer predictions. Default value is 10.
"""
confidence_threshold = proto.Field(
proto.FLOAT,
number=1,
)
max_predictions = proto.Field(
proto.INT32,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "6a08ed196f5550fb72da22fc82da5b1b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 68,
"avg_line_length": 26.72972972972973,
"alnum_prop": 0.6319514661274014,
"repo_name": "googleapis/python-aiplatform",
"id": "92826107207e4971db4c8dfec9092382dd02f8c4",
"size": "1589",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
import numpy as np
import math
from scipy import ndimage, misc
from scipy.interpolate import interp1d
import re
import matplotlib.pyplot as plt
from glob import glob
from define_parameters import paths, parameters
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class image_augmentation:
def __init__(self):
self.data_path = paths.DATA_PATH
self.output_path = paths.OUTPUT_PATH
self.translation = parameters.TRANSLATION
self.rotation = parameters.ROTATION
self.shearing = parameters.SHEARING
self.contrast = parameters.CONTRAST
self.test = False
def _read_data(self, file):
image = misc.imread(file)
return image
def mirror_rotate(self, image, direction='horizontal'):
if(direction == 'horizontal'):
image_mirror = image[:,::-1,:]
elif(direction == 'vertical'):
image_mirror = image[::-1,:,:]
return image_mirror
def translate(self, image, shift):
image_trans_left = np.append(image[:,int((1-shift)*len(image[0])):,:], image[:,0:int((1-shift)*len(image[0])),:], axis = 1)
image_trans_right = np.append(image[:,int(shift*len(image[0])):,:], image[:,0:int(shift*len(image[0])),:], axis = 1)
image_trans_down = np.append(image[:int(shift*len(image)),:,:], image[:int((1-shift)*len(image)),:,:], axis = 0)
image_trans_up = np.append(image[int(shift*len(image)):,:,:], image[int((1-shift)*len(image)):,:,:], axis = 0)
return image_trans_left, image_trans_right, image_trans_up, image_trans_down
def rotate(self, image, angle):
angle = angle*3.14159/180.0
image_rotated = np.zeros([len(image),len(image[0]),len(image[0][0])])
for chan in range(0,len(image[0][0])):
for x in range(0,len(image)):
for y in range(0,len(image[0])):
# Rotate pixels
xpix = x - len(image)/2
ypix = y - len(image[0])/2
newX = int(round(math.cos(angle)*(xpix) + math.sin(angle)*(ypix)))
newY = int(round(-math.sin(angle)*(xpix) + math.cos(angle)*(ypix)))
if(newX <= 0 and newY < 0):
newX2 = newX + int(len(image)/2)
newY2 = newY + int(len(image[0])/2)
if(newX < 0 and newY >= 0):
newX2 = newX + int(len(image)/2)
newY2 = newY - int(len(image[0])/2)
if(newX >= 0 and newY > 0):
newX2 = newX - int(len(image)/2)
newY2 = newY - int(len(image[0])/2)
if(newX > 0 and newY <= 0):
newX2 = newX - int(len(image)/2)
newY2 = newY + int(len(image[0])/2)
image_rotated[newX2,newY2,chan] = image[x,y,chan]
# Fix lost pixels by correcting with previous pixel
if(chan == len(image[0][0])-1 and np.array_equal(image_rotated[x,y], np.array([0,0,0]))
and np.array_equal(image_rotated[x, y-1], np.array([0,0,0])) == False):
image_rotated[x,y] = image_rotated[x, y-1]
return image_rotated
def shear(self, image, factor, direction='horizontal'):
image_shear = np.zeros([len(image),len(image[0]),len(image[0][0])])
for chan in range(0,len(image[0][0])):
if(direction == "vertical"):
for x in range(0,len(image)):
for y in range(0,len(image[0])):
newX = int(round(x + factor*y))
newY = y
if(newX >= len(image)-1): break
image_shear[newX,newY,chan] = image[x,y,chan]
elif(direction == "horizontal"):
for x in range(0,len(image)):
for y in range(0,len(image[0])):
newX = x
newY = int(round(y + factor*x))
if(newY >= len(image[0])-1): break
image_shear[newX,newY,chan] = image[x,y,chan]
return image_shear
def change_contrast(self, image, factor_gain, factor_bias):
image_contrast = np.zeros([len(image),len(image[0]),len(image[0][0])])
for chan in range(0,len(image[0][0])):
image_contrast[:,:,chan] = (chan+1)*factor_gain*image[:,:,chan]+factor_bias
return image_contrast
def resize_image(self, image, npix):
resized_image_l = []
# resize x axis
for c in range(0,len(image[0][0])):
for y in range(0,len(image)):
f = interp1d(np.arange(0,len(image[0])), image[y,:,c], kind='cubic')
xnew = np.linspace(0, len(image[0])-1, num=npix)
if(y==0):
resized_image_c = f(xnew)
else:
resized_image_c = np.vstack((resized_image_c,f(xnew)))
resized_image_l.append(resized_image_c)
resized_image_a = np.dstack([resized_image_l[0],resized_image_l[1],resized_image_l[2]])
# resize y axis
resized_image_l = []
for c in range(0,len(image[0][0])):
for x in range(0,len(resized_image_a[0])):
f = interp1d(np.arange(0,len(resized_image_a)), resized_image_a[:,x,c], kind='cubic')
xnew = np.linspace(0, len(resized_image_a)-1, num=npix)
if(x==0):
resized_image_c = f(xnew)
else:
resized_image_c = np.vstack((resized_image_c,f(xnew)))
resized_image_l.append(resized_image_c)
resized_image = np.dstack([resized_image_l[0],resized_image_l[1],resized_image_l[2]])
# Fix image orientation
for i in range(0,3):
resized_image=np.rot90(resized_image)
return resized_image
def save_image(self, image, ori_file, suffixe):
# Use Regular Expression to get the name of the Data folder
count_slash = ori_file.count('/')
pattern=""
for i in range(count_slash-1):
pattern=pattern+".*/"
pattern=pattern+"(.*?)."+ori_file[-3]
# Save the image using the Data folder as name
for i in range(0,len(image)):
misc.imsave(self.output_path+re.search(pattern, ori_file).group(1)
+"_"+suffixe[i]+".jpg",image[i])
def plot_image(self, image):
plt.imshow(image)
plt.show()
def perform_augmentation(self,npix):
# List all images within folder
filelist = glob(self.data_path+'*[a-zA-Z0-9].*')
logger.info(("All images to be augmented are: ", filelist))
#--
# Set size for all images
logger.info(("Image size: ",npix))
#--
# Tranformation for each image
for file in filelist:
logger.info(("Performing transformation for image: ", file))
# Read image and resize it
image = self._read_data(file)
image = self.resize_image(image, npix)
self.save_image([image],file,["z"])
# Perform mirror rotation and save
image_mirror = self.mirror_rotate(image)
self.save_image([image_mirror],file,["a"])
# Perform translation on original image and save
image_trans_left, image_trans_right, image_trans_up, image_trans_down = self.translate(image, self.translation)
self.save_image([image_trans_left, image_trans_right, image_trans_up, image_trans_down],file,["b","c","d","e"])
# Perform translation on mirror image and save
image_trans_left, image_trans_right, image_trans_up, image_trans_down = self.translate(image_mirror, self.translation)
self.save_image([image_trans_left, image_trans_right, image_trans_up, image_trans_down],file,["f","g","h","i"])
# Perform rotation on original image and save
image_rotated = self.rotate(image, self.rotation)
self.save_image([image_rotated],file,["j"])
# Perform rotation on mirror image and save
image_rotated = self.rotate(image_mirror, self.rotation)
self.save_image([image_rotated],file,["k"])
# Perform shearing on original image and save
image_shear = self.shear(image, self.shearing)
self.save_image([image_shear],file,["l"])
# Perform shearing on mirror image and save
image_shear = self.shear(image_mirror, self.shearing)
self.save_image([image_shear],file,["m"])
# Perform change contrast on original image and save
image_contrast = self.change_contrast(image, self.contrast[0], self.contrast[1])
self.save_image([image_contrast],file,["n"])
# Perform change contrast on mirror image and save
image_contrast = self.change_contrast(image_mirror, self.contrast[0], self.contrast[1])
self.save_image([image_contrast],file,["o"])
# Limit to the first image for testing
if(self.test):
logger.info("Stopping after the first image.")
exit()
if __name__ == '__main__':
process = image_augmentation()
process.perform_augmentation(npix = parameters.SIZE_IMAGE)
| {
"content_hash": "b9b8f1e178471559193182a01aaab90f",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 131,
"avg_line_length": 48.74226804123711,
"alnum_prop": 0.546002538071066,
"repo_name": "ethilliez/Image_augmentation",
"id": "65b9753499d4782f6b62b78298a9ffaa82859f93",
"size": "9456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "image_augmentation_nolib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15875"
}
],
"symlink_target": ""
} |
__author__ = 'Xsank'
import os.path
import tornado.ioloop
import tornado.options
import tornado.httpserver
import tornado.web
from urls import handlers
from settings import PORT
from settings import settings
class Application(tornado.web.Application):
def __init__(self):
tornado.web.Application.__init__(self,handlers,**settings)
def main():
tornado.options.parse_command_line()
http_server=tornado.httpserver.HTTPServer(Application())
http_server.listen(PORT)
tornado.ioloop.IOLoop.instance().start()
if __name__=="__main__":
main() | {
"content_hash": "d7d8a1e8ad682ac57d7d391469a1165f",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 66,
"avg_line_length": 22.03846153846154,
"alnum_prop": 0.7242582897033158,
"repo_name": "xsank/Pyrumpetroll",
"id": "d75fc77f170d563c28dbff76784bb07b074e25c7",
"size": "573",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4278"
},
{
"name": "JavaScript",
"bytes": "34046"
},
{
"name": "Python",
"bytes": "2424"
}
],
"symlink_target": ""
} |
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
# check if we have any users on call to assign to
users_on_call = demisto.executeCommand("getUsers", {"onCall": "true"})[0]['Contents']
# if we don't have on shift users, return error, else reassign the provided incident id's to the on-call analysts
if not users_on_call:
return_error("No users on shift")
else:
incident_id = demisto.args().get('incident_id')
demisto.results(demisto.executeCommand("executeCommandAt", {
"command": "AssignAnalystToIncident", "arguments": {"onCall": "true"}, "incidents": incident_id}))
| {
"content_hash": "9856db409d66fc48c1d2df236b972eb1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 118,
"avg_line_length": 49.61538461538461,
"alnum_prop": 0.7007751937984497,
"repo_name": "VirusTotal/content",
"id": "047308f089e3391f0a8d3b78430c2a65b55e6de1",
"size": "645",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/ShiftManagement-AssignToNextShift/Scripts/AssignToNextShift/AssignToNextShift.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
import os
# try/except added for compatibility with python < 3.8
try:
from unittest import mock
from unittest.mock import AsyncMock # pragma: NO COVER
except ImportError: # pragma: NO COVER
import mock
import math
from google.api_core import (
future,
gapic_v1,
grpc_helpers,
grpc_helpers_async,
operation,
operations_v1,
path_template,
)
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import operation_async # type: ignore
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.type import date_pb2 # type: ignore
import grpc
from grpc.experimental import aio
from proto.marshal.rules import wrappers
from proto.marshal.rules.dates import DurationRule, TimestampRule
import pytest
from google.cloud.retail_v2.services.completion_service import (
CompletionServiceAsyncClient,
CompletionServiceClient,
transports,
)
from google.cloud.retail_v2.types import completion_service, import_config
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CompletionServiceClient._get_default_mtls_endpoint(None) is None
assert (
CompletionServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
CompletionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
CompletionServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
CompletionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
CompletionServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class,transport_name",
[
(CompletionServiceClient, "grpc"),
(CompletionServiceAsyncClient, "grpc_asyncio"),
],
)
def test_completion_service_client_from_service_account_info(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("retail.googleapis.com:443")
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.CompletionServiceGrpcTransport, "grpc"),
(transports.CompletionServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_completion_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name",
[
(CompletionServiceClient, "grpc"),
(CompletionServiceAsyncClient, "grpc_asyncio"),
],
)
def test_completion_service_client_from_service_account_file(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("retail.googleapis.com:443")
def test_completion_service_client_get_transport_class():
transport = CompletionServiceClient.get_transport_class()
available_transports = [
transports.CompletionServiceGrpcTransport,
]
assert transport in available_transports
transport = CompletionServiceClient.get_transport_class("grpc")
assert transport == transports.CompletionServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(CompletionServiceClient, transports.CompletionServiceGrpcTransport, "grpc"),
(
CompletionServiceAsyncClient,
transports.CompletionServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
CompletionServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CompletionServiceClient),
)
@mock.patch.object(
CompletionServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CompletionServiceAsyncClient),
)
def test_completion_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(CompletionServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(CompletionServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is provided
options = client_options.ClientOptions(
api_audience="https://language.googleapis.com"
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience="https://language.googleapis.com",
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
CompletionServiceClient,
transports.CompletionServiceGrpcTransport,
"grpc",
"true",
),
(
CompletionServiceAsyncClient,
transports.CompletionServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
CompletionServiceClient,
transports.CompletionServiceGrpcTransport,
"grpc",
"false",
),
(
CompletionServiceAsyncClient,
transports.CompletionServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
CompletionServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CompletionServiceClient),
)
@mock.patch.object(
CompletionServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CompletionServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_completion_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class", [CompletionServiceClient, CompletionServiceAsyncClient]
)
@mock.patch.object(
CompletionServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CompletionServiceClient),
)
@mock.patch.object(
CompletionServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(CompletionServiceAsyncClient),
)
def test_completion_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(CompletionServiceClient, transports.CompletionServiceGrpcTransport, "grpc"),
(
CompletionServiceAsyncClient,
transports.CompletionServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_completion_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
CompletionServiceClient,
transports.CompletionServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
CompletionServiceAsyncClient,
transports.CompletionServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_completion_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
def test_completion_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.retail_v2.services.completion_service.transports.CompletionServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = CompletionServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
CompletionServiceClient,
transports.CompletionServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
CompletionServiceAsyncClient,
transports.CompletionServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_completion_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"retail.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="retail.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type",
[
completion_service.CompleteQueryRequest,
dict,
],
)
def test_complete_query(request_type, transport: str = "grpc"):
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.complete_query), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = completion_service.CompleteQueryResponse(
attribution_token="attribution_token_value",
)
response = client.complete_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == completion_service.CompleteQueryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, completion_service.CompleteQueryResponse)
assert response.attribution_token == "attribution_token_value"
def test_complete_query_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.complete_query), "__call__") as call:
client.complete_query()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == completion_service.CompleteQueryRequest()
@pytest.mark.asyncio
async def test_complete_query_async(
transport: str = "grpc_asyncio",
request_type=completion_service.CompleteQueryRequest,
):
client = CompletionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.complete_query), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
completion_service.CompleteQueryResponse(
attribution_token="attribution_token_value",
)
)
response = await client.complete_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == completion_service.CompleteQueryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, completion_service.CompleteQueryResponse)
assert response.attribution_token == "attribution_token_value"
@pytest.mark.asyncio
async def test_complete_query_async_from_dict():
await test_complete_query_async(request_type=dict)
def test_complete_query_field_headers():
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = completion_service.CompleteQueryRequest()
request.catalog = "catalog_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.complete_query), "__call__") as call:
call.return_value = completion_service.CompleteQueryResponse()
client.complete_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"catalog=catalog_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_complete_query_field_headers_async():
client = CompletionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = completion_service.CompleteQueryRequest()
request.catalog = "catalog_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.complete_query), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
completion_service.CompleteQueryResponse()
)
await client.complete_query(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"catalog=catalog_value",
) in kw["metadata"]
@pytest.mark.parametrize(
"request_type",
[
import_config.ImportCompletionDataRequest,
dict,
],
)
def test_import_completion_data(request_type, transport: str = "grpc"):
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.import_completion_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.import_completion_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == import_config.ImportCompletionDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_import_completion_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.import_completion_data), "__call__"
) as call:
client.import_completion_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == import_config.ImportCompletionDataRequest()
@pytest.mark.asyncio
async def test_import_completion_data_async(
transport: str = "grpc_asyncio",
request_type=import_config.ImportCompletionDataRequest,
):
client = CompletionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.import_completion_data), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.import_completion_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == import_config.ImportCompletionDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_import_completion_data_async_from_dict():
await test_import_completion_data_async(request_type=dict)
def test_import_completion_data_field_headers():
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = import_config.ImportCompletionDataRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.import_completion_data), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.import_completion_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_import_completion_data_field_headers_async():
client = CompletionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = import_config.ImportCompletionDataRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.import_completion_data), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.import_completion_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CompletionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.CompletionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CompletionServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.CompletionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = CompletionServiceClient(
client_options=options,
transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = CompletionServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.CompletionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CompletionServiceClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CompletionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CompletionServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CompletionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.CompletionServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.CompletionServiceGrpcTransport,
transports.CompletionServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
],
)
def test_transport_kind(transport_name):
transport = CompletionServiceClient.get_transport_class(transport_name)(
credentials=ga_credentials.AnonymousCredentials(),
)
assert transport.kind == transport_name
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.CompletionServiceGrpcTransport,
)
def test_completion_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.CompletionServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_completion_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.retail_v2.services.completion_service.transports.CompletionServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.CompletionServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"complete_query",
"import_completion_data",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
# Catch all for all remaining methods and properties
remainder = [
"kind",
]
for r in remainder:
with pytest.raises(NotImplementedError):
getattr(transport, r)()
def test_completion_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.retail_v2.services.completion_service.transports.CompletionServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CompletionServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_completion_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.retail_v2.services.completion_service.transports.CompletionServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CompletionServiceTransport()
adc.assert_called_once()
def test_completion_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CompletionServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.CompletionServiceGrpcTransport,
transports.CompletionServiceGrpcAsyncIOTransport,
],
)
def test_completion_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.CompletionServiceGrpcTransport,
transports.CompletionServiceGrpcAsyncIOTransport,
],
)
def test_completion_service_transport_auth_gdch_credentials(transport_class):
host = "https://language.com"
api_audience_tests = [None, "https://language2.com"]
api_audience_expect = [host, "https://language2.com"]
for t, e in zip(api_audience_tests, api_audience_expect):
with mock.patch.object(google.auth, "default", autospec=True) as adc:
gdch_mock = mock.MagicMock()
type(gdch_mock).with_gdch_audience = mock.PropertyMock(
return_value=gdch_mock
)
adc.return_value = (gdch_mock, None)
transport_class(host=host, api_audience=t)
gdch_mock.with_gdch_audience.assert_called_once_with(e)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.CompletionServiceGrpcTransport, grpc_helpers),
(transports.CompletionServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_completion_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"retail.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="retail.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.CompletionServiceGrpcTransport,
transports.CompletionServiceGrpcAsyncIOTransport,
],
)
def test_completion_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_completion_service_host_no_port(transport_name):
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="retail.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == ("retail.googleapis.com:443")
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_completion_service_host_with_port(transport_name):
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="retail.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == ("retail.googleapis.com:8000")
def test_completion_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CompletionServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_completion_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CompletionServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.CompletionServiceGrpcTransport,
transports.CompletionServiceGrpcAsyncIOTransport,
],
)
def test_completion_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.CompletionServiceGrpcTransport,
transports.CompletionServiceGrpcAsyncIOTransport,
],
)
def test_completion_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_completion_service_grpc_lro_client():
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_completion_service_grpc_lro_async_client():
client = CompletionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsAsyncClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_catalog_path():
project = "squid"
location = "clam"
catalog = "whelk"
expected = "projects/{project}/locations/{location}/catalogs/{catalog}".format(
project=project,
location=location,
catalog=catalog,
)
actual = CompletionServiceClient.catalog_path(project, location, catalog)
assert expected == actual
def test_parse_catalog_path():
expected = {
"project": "octopus",
"location": "oyster",
"catalog": "nudibranch",
}
path = CompletionServiceClient.catalog_path(**expected)
# Check that the path construction is reversible.
actual = CompletionServiceClient.parse_catalog_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = CompletionServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = CompletionServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CompletionServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(
folder=folder,
)
actual = CompletionServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = CompletionServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CompletionServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(
organization=organization,
)
actual = CompletionServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = CompletionServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CompletionServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(
project=project,
)
actual = CompletionServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = CompletionServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CompletionServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
actual = CompletionServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = CompletionServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CompletionServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.CompletionServiceTransport, "_prep_wrapped_messages"
) as prep:
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.CompletionServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = CompletionServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = CompletionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = CompletionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(CompletionServiceClient, transports.CompletionServiceGrpcTransport),
(
CompletionServiceAsyncClient,
transports.CompletionServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
| {
"content_hash": "f88fbbe1cebe62f3f0a2d1140b3f7a78",
"timestamp": "",
"source": "github",
"line_count": 1752,
"max_line_length": 121,
"avg_line_length": 36.0513698630137,
"alnum_prop": 0.6479053861499002,
"repo_name": "googleapis/python-retail",
"id": "f2db9d032de402290ac0d8ea4823ffdbda96e98b",
"size": "63762",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/gapic/retail_v2/test_completion_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "7420556"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
"""Workbook is the top-level container for all document information."""
from openpyxl.compat import deprecated
from openpyxl.compat import OrderedDict
from openpyxl.worksheet import Worksheet
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.utils.datetime import CALENDAR_WINDOWS_1900
from openpyxl.utils.exceptions import ReadOnlyWorkbookException
from openpyxl.writer.write_only import WriteOnlyWorksheet, save_dump
from openpyxl.writer.excel import save_workbook
from openpyxl.styles.cell_style import StyleArray
from openpyxl.styles.named_styles import NamedStyle
from openpyxl.chartsheet import Chartsheet
from .defined_name import DefinedName, DefinedNameList
from openpyxl.packaging.core import DocumentProperties
from openpyxl.packaging.relationship import RelationshipList
from .protection import DocumentSecurity
class Workbook(object):
"""Workbook is the container for all other parts of the document."""
_read_only = False
_data_only = False
def __init__(self,
write_only=False,
):
self._sheets = []
self._active_sheet_index = 0
self.defined_names = DefinedNameList()
self._external_links = []
self.properties = DocumentProperties()
self.security = DocumentSecurity()
self.__write_only = write_only
self.shared_strings = IndexedList()
self._setup_styles()
self.loaded_theme = None
self.vba_archive = None
self.is_template = False
self._differential_styles = []
self._drawings = []
self._charts = []
self._images = []
self.code_name = None
self.excel_base_date = CALENDAR_WINDOWS_1900
self.encoding = "utf-8"
if not self.write_only:
self._sheets.append(Worksheet(self))
self.rels = RelationshipList()
def _setup_styles(self):
"""Bootstrap styles"""
from openpyxl.styles.alignment import Alignment
from openpyxl.styles.borders import DEFAULT_BORDER
from openpyxl.styles.fills import DEFAULT_EMPTY_FILL, DEFAULT_GRAY_FILL
from openpyxl.styles.fonts import DEFAULT_FONT
from openpyxl.styles.protection import Protection
from openpyxl.styles.colors import COLOR_INDEX
from openpyxl.styles.named_styles import NamedStyles
self._fonts = IndexedList()
self._fonts.add(DEFAULT_FONT)
self._alignments = IndexedList([Alignment()])
self._borders = IndexedList()
self._borders.add(DEFAULT_BORDER)
self._fills = IndexedList()
self._fills.add(DEFAULT_EMPTY_FILL)
self._fills.add(DEFAULT_GRAY_FILL)
self._number_formats = IndexedList()
self._protections = IndexedList([Protection()])
self._colors = COLOR_INDEX
self._cell_styles = IndexedList([StyleArray()])
self._named_styles = NamedStyles([NamedStyle(font=DEFAULT_FONT, builtinId=0)])
@property
def read_only(self):
return self._read_only
@property
def data_only(self):
return self._data_only
@property
def write_only(self):
return self.__write_only
@deprecated("Use the .active property")
def get_active_sheet(self):
"""Returns the current active sheet."""
return self.active
@property
def active(self):
"""Get the currently active sheet"""
return self._sheets[self._active_sheet_index]
@active.setter
def active(self, value):
"""Set the active sheet"""
self._active_sheet_index = value
def create_sheet(self, title=None, index=None):
"""Create a worksheet (at an optional index).
:param title: optional title of the sheet
:type tile: unicode
:param index: optional position at which the sheet will be inserted
:type index: int
"""
if self.read_only:
raise ReadOnlyWorkbookException('Cannot create new sheet in a read-only workbook')
if self.write_only :
new_ws = WriteOnlyWorksheet(parent_workbook=self, title=title)
else:
new_ws = Worksheet(parent=self, title=title)
self._add_sheet(sheet=new_ws, index=index)
return new_ws
def _add_sheet(self, sheet, index=None):
"""Add an worksheet (at an optional index)."""
if not isinstance(sheet, (Worksheet, Chartsheet)):
raise TypeError("Cannot be added to a workbook")
if sheet.parent != self:
raise ValueError("You cannot add worksheets from another workbook.")
if index is None:
self._sheets.append(sheet)
else:
self._sheets.insert(index, sheet)
def remove_sheet(self, worksheet):
"""Remove a worksheet from this workbook."""
self._sheets.remove(worksheet)
def create_chartsheet(self, title=None, index=None):
if self.read_only:
raise ReadOnlyWorkbookException("Cannot create new sheet in a read-only workbook")
cs = Chartsheet(parent=self, title=title)
self._add_sheet(cs, index)
return cs
@deprecated("Use wb[sheetname]")
def get_sheet_by_name(self, name):
"""Returns a worksheet by its name.
:param name: the name of the worksheet to look for
:type name: string
"""
return self[name]
def __contains__(self, key):
return key in set(self.sheetnames)
def get_index(self, worksheet):
"""Return the index of the worksheet."""
return self.worksheets.index(worksheet)
def __getitem__(self, key):
"""Returns a worksheet by its name.
:param name: the name of the worksheet to look for
:type name: string
"""
for sheet in self.worksheets:
if sheet.title == key:
return sheet
raise KeyError("Worksheet {0} does not exist.".format(key))
def __delitem__(self, key):
sheet = self[key]
self.remove_sheet(sheet)
def __iter__(self):
return iter(self.worksheets)
@deprecated("Use wb.sheetnames")
def get_sheet_names(self):
return self.sheetnames
@property
def worksheets(self):
return [s for s in self._sheets if isinstance(s, Worksheet)]
@property
def chartsheets(self):
return [s for s in self._sheets if isinstance(s, Chartsheet)]
@property
def sheetnames(self):
"""Returns the list of the names of worksheets in the workbook.
Names are returned in the worksheets order.
:rtype: list of strings
"""
return [s.title for s in self._sheets]
def create_named_range(self, name, worksheet=None, value=None, scope=None):
"""Create a new named_range on a worksheet"""
defn = DefinedName(name=name, localSheetId=scope)
if worksheet is not None:
defn.value = "{0}!{1}".format(worksheet.title, value)
else:
defn.value = value
self.defined_names.append(defn)
@deprecated("Use workbook.defined_names.definedName")
def get_named_ranges(self):
"""Return all named ranges"""
return self.defined_names.definedName
@deprecated("Use workbook.defined_names.append")
def add_named_range(self, named_range):
"""Add an existing named_range to the list of named_ranges."""
self.defined_names.append(named_range)
@deprecated("Use workbook.defined_names[name]")
def get_named_range(self, name):
"""Return the range specified by name."""
return self.defined_names[name]
@deprecated("Use del workbook.defined_names[name]")
def remove_named_range(self, named_range):
"""Remove a named_range from this workbook."""
del self.defined_names[named_range]
def save(self, filename):
"""Save the current workbook under the given `filename`.
Use this function instead of using an `ExcelWriter`.
.. warning::
When creating your workbook using `write_only` set to True,
you will only be able to call this function once. Subsequents attempts to
modify or save the file will raise an :class:`openpyxl.shared.exc.WorkbookAlreadySaved` exception.
"""
if self.read_only:
raise TypeError("""Workbook is read-only""")
if self.write_only:
save_dump(self, filename)
else:
save_workbook(self, filename)
@property
def style_names(self):
"""
List of named styles
"""
return [s.name for s in self._named_styles]
| {
"content_hash": "cc07be117fd0067baa33b633e6962724",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 110,
"avg_line_length": 30.273356401384085,
"alnum_prop": 0.63481540747514,
"repo_name": "aragos/tichu-tournament",
"id": "a21d8f3ed8deb3a052de4faa09e18ce72bf43dec",
"size": "8749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/openpyxl/workbook/workbook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "8008"
},
{
"name": "CSS",
"bytes": "1695"
},
{
"name": "HTML",
"bytes": "63890"
},
{
"name": "JavaScript",
"bytes": "320642"
},
{
"name": "Python",
"bytes": "3432940"
}
],
"symlink_target": ""
} |
API_UTIL_NAMESPACE = 'json_schema_compiler::util'
class UtilCCHelper(object):
"""A util class that generates code that uses
tools/json_schema_compiler/util.cc.
"""
def __init__(self, type_manager):
self._type_manager = type_manager
def PopulateArrayFromDictionary(self, array_prop, src, name, dst):
"""Generates code to get an array from a src.name into dst.
src: DictionaryValue*
dst: std::vector or scoped_ptr<std::vector>
"""
prop = array_prop.item_type
sub = {
'namespace': API_UTIL_NAMESPACE,
'name': name,
'src': src,
'dst': dst,
}
sub['type'] = self._type_manager.GetCppType(prop),
if array_prop.optional:
val = ('%(namespace)s::PopulateOptionalArrayFromDictionary'
'(*%(src)s, "%(name)s", &%(dst)s)')
else:
val = ('%(namespace)s::PopulateArrayFromDictionary'
'(*%(src)s, "%(name)s", &%(dst)s)')
return val % sub
def PopulateArrayFromList(self, array_prop, src, dst, optional):
"""Generates code to get an array from src into dst.
src: ListValue*
dst: std::vector or scoped_ptr<std::vector>
"""
prop = array_prop.item_type
sub = {
'namespace': API_UTIL_NAMESPACE,
'src': src,
'dst': dst,
'type': self._type_manager.GetCppType(prop),
}
if optional:
val = '%(namespace)s::PopulateOptionalArrayFromList(*%(src)s, &%(dst)s)'
else:
val = '%(namespace)s::PopulateArrayFromList(*%(src)s, &%(dst)s)'
return val % sub
def CreateValueFromArray(self, array_prop, src, optional):
"""Generates code to create a scoped_pt<Value> from the array at src.
src: std::vector or scoped_ptr<std::vector>
"""
prop = array_prop.item_type
sub = {
'namespace': API_UTIL_NAMESPACE,
'src': src,
'type': self._type_manager.GetCppType(prop),
}
if optional:
val = '%(namespace)s::CreateValueFromOptionalArray(%(src)s)'
else:
val = '%(namespace)s::CreateValueFromArray(%(src)s)'
return val % sub
def GetIncludePath(self):
return '#include "tools/json_schema_compiler/util.h"'
| {
"content_hash": "49f23aaa620b6e72a1f2b9220f6321ea",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 28.706666666666667,
"alnum_prop": 0.6079888527635857,
"repo_name": "plxaye/chromium",
"id": "8d490ba5ba345877f1aea32a1765a5b3e9475985",
"size": "2320",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "src/tools/json_schema_compiler/util_cc_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1176633"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "75195981"
},
{
"name": "C#",
"bytes": "36335"
},
{
"name": "C++",
"bytes": "172360762"
},
{
"name": "CSS",
"bytes": "740648"
},
{
"name": "Dart",
"bytes": "12620"
},
{
"name": "Emacs Lisp",
"bytes": "12454"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3671513"
},
{
"name": "JavaScript",
"bytes": "16204541"
},
{
"name": "Max",
"bytes": "39069"
},
{
"name": "Mercury",
"bytes": "10299"
},
{
"name": "Objective-C",
"bytes": "1133728"
},
{
"name": "Objective-C++",
"bytes": "5771619"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "166372"
},
{
"name": "Python",
"bytes": "11650532"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3641"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Ruby",
"bytes": "14575"
},
{
"name": "Shell",
"bytes": "1426780"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "TeX",
"bytes": "43554"
},
{
"name": "VimL",
"bytes": "4953"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "14650"
}
],
"symlink_target": ""
} |
"""
Avalam agent.
Copyright (C) 2015, <<<<<<<<<<< YOUR NAMES HERE >>>>>>>>>>>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import avalam
import minimax
################
# My Variables #
################
#On board (ligne, collumn)
#TOPLEFT UP TOPRIGHT LEFT RIGHT DOWNLEFT DOWN DOWNRIGHT
directions = [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]
##############
# My Methods #
##############
def inBounds(board, pos):
return 0 <= pos[0] and pos[0] < len(board.m) and 0 <= pos[1] and pos[1] < len(board.m[0])
def getIntegerSign(int):
if(int > 0):
return 1
elif (int < 0):
return -1
return 0
def calculate_maxMinMaxDepth(steps, time_left):
if(time_left == None):
return 2
if(time_left < 300):
return 2
else:
return (int)(2 + (steps/20))
#Pre the two tower are adjacent
def couldTowerXJumpOverTowerY(X, Y):
#Drop if same sign or if can't jump on each other due to value too big
if X == 0 or Y == 0 or (abs(X) + abs(Y)) > 5:
return False
return True
def isTowerAIsolatedFromEnnemyThroughTowerB(board, tower, posX, posY):
towerCheck = board.m[posX][posY]
for dir in directions:
testX = posX + dir[0]
testY = posY + dir[1]
if inBounds(board, (testX, testY)) and not getIntegerSign(board.m[testX][testY]) == getIntegerSign(towerCheck):
if(abs(board.m[testX][testY]) + abs(towerCheck) + abs(tower) <= board.max_height):
return False
return True
def isTowerIsolated(board, posX, posY):
#Calculate remaining value for tower
tower = board.m[posX][posY]
#See if the tower is isolated
for dir in directions:
testX = posX + dir[0]
testY = posY + dir[1]
if inBounds(board, (testX, testY)) and couldTowerXJumpOverTowerY(board.m[testX][testY], tower):
if not(getIntegerSign(board.m[testX][testY]) == getIntegerSign(tower) and isTowerAIsolatedFromEnnemyThroughTowerB(board, tower, testX, testY)):
return False
return True
#Changer aime allier qui le complete et hais ennemis qui le complete
def calculateNeighborScore(board, posX, posY):
#Calculate tower's color
color = getIntegerSign(board.m[posX][posY])
#See if the tower is isolated
counter = 0
for dir in directions:
testX = posX + dir[0]
testY = posY + dir[1]
if inBounds(board, (testX, testY)) and getIntegerSign(board.m[testX][testY]) == color:
counter += getIntegerSign(board.m[testX][testY])
return counter
def calculate_accurate_score(board):
score = 0
#Score for possession of undisputable tower
for i in range(board.rows):
for j in range(board.columns):
#Empty position don't count in the score
if board.m[i][j] == 0:
pass
#Score for possession of undisputable max level tower
elif board.m[i][j] == -board.max_height or board.m[i][j] == board.max_height:
score += getIntegerSign(board.m[i][j])*12 # 15 points only for max level tower so merging two isolated tower is not considered a better move
#Score for possesion of an undisputable isolated tower
if(isTowerIsolated(board, i, j)):
score += getIntegerSign(board.m[i][j])*10 # 10 point for each isolated tower
#Score for having an ennemy tower surrounded by only tower of yours that can jump on it
#Basic score for having more tower than the opponnent
else:
score += calculateNeighborScore(board, i, j)
return score
############
# My Agent #
############
class Agent:
"""This is the skeleton of an agent to play the Avalam game."""
def __init__(self, name="Basic Agent"):
self.name = name
def successors(self, state):
"""The successors function must return (or yield) a list of
pairs (a, s) in which a is the action played to reach the
state s; s is the new state, i.e. a triplet (b, p, st) where
b is the new board after the action a has been played,
p is the player to play the next move and st is the next
step number.
"""
#Return result
(oldBoard, oldPlayer, oldStepNbr) = state
for action in oldBoard.get_actions():
newBoard = oldBoard.clone()
newBoard.play_action(action)
yield (action, (newBoard, -oldPlayer, oldStepNbr+1))
#Depth depending on time
def cutoff(self, state, depth):
"""The cutoff function returns true if the alpha-beta/minimax
search has to stop; false otherwise.
"""
(oldBoard, oldPlayer, oldStepNbr) = state
if depth >= self.maxMinMaxDepth or oldBoard.is_finished():
return True
else:
return False
def evaluate(self, state):
"""The evaluate function must return an integer value
representing the utility function of the board.
"""
(oldBoard, oldPlayer, oldStepNbr) = state
return calculate_accurate_score(oldBoard)
def play(self, board, player, step, time_left):
"""This function is used to play a move according
to the board, player and time left provided as input.
It must return an action representing the move the player
will perform.
"""
self.time_left = time_left
self.maxMinMaxDepth = calculate_maxMinMaxDepth(step, time_left)
newBoard = avalam.Board(board.get_percepts(player==avalam.PLAYER2))
state = (newBoard, player, step)
return minimax.search(state, self)
if __name__ == "__main__":
avalam.agent_main(Agent())
| {
"content_hash": "0da8b703c1fcdce7e37823d9f5d65881",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 156,
"avg_line_length": 36.30813953488372,
"alnum_prop": 0.6267413931144916,
"repo_name": "fthuin/artificial-intelligence",
"id": "3ebde2de94360c0572fd7c7a21fd8be8d6097f2a",
"size": "6268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assignment3/Code/agent_simple_working_evaluation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "1692"
},
{
"name": "JavaScript",
"bytes": "24008"
},
{
"name": "Makefile",
"bytes": "5668"
},
{
"name": "Python",
"bytes": "478895"
},
{
"name": "Shell",
"bytes": "6361"
},
{
"name": "TeX",
"bytes": "43377"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from itsdangerous import TimestampSigner
import config
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
token_signer = TimestampSigner(config.SECRET_KEY)
from app.mod_auth.controller import mod_auth
from app.mod_bucketlists.controller import mod_bucketlists
app.register_blueprint(mod_bucketlists)
app.register_blueprint(mod_auth)
| {
"content_hash": "e9320a23b4bacf683b30016e278ee3b8",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 58,
"avg_line_length": 23.77777777777778,
"alnum_prop": 0.8084112149532711,
"repo_name": "andela-brotich/CP2-bucket-list-api",
"id": "39de9d451638178b003b5431162fea5f238fd339",
"size": "428",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "35153"
}
],
"symlink_target": ""
} |
from flask import redirect, url_for
from flask_admin.base import AdminIndexView, expose
from flask_admin.contrib.sqla import ModelView
from flask_admin.form import SecureForm
from wtforms.validators import IPAddress, NumberRange
class GroupView(ModelView):
form_base_class = SecureForm
column_searchable_list = ['name']
column_labels = dict(name='Group Name',
description='Description', )
class HostView(ModelView):
form_base_class = SecureForm
can_view_details = True
column_searchable_list = ['ip_address', 'port']
column_filters = ['is_active']
column_exclude_list = ('description',)
column_labels = dict(ip_address='IP Address(Support IPv4 and IPv6)',
ip_address_alias='IP Alternative Name',
port='Port(Support TCP only)',
port_alias='Port Alternative Name',
description='Description',
is_active='Is It Active?', )
form_args = dict(ip_address=dict(validators=[IPAddress(ipv4=True, ipv6=True)]),
port=dict(validators=[NumberRange(min=0, max=65535, message='Port must be in range 0-65535')]), )
class AdminView(AdminIndexView):
@expose('/')
def index(self):
return redirect(url_for('host.index_view'))
| {
"content_hash": "987129dd2dabd22d3a39de0743483d65",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 118,
"avg_line_length": 39.3235294117647,
"alnum_prop": 0.6305160807778609,
"repo_name": "luuquangvu/fastmonitor",
"id": "e85656ad5138c2ebfc58d92b9700a3cc5a0e1639",
"size": "1384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "21570"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "7889"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import numpy as np
from . import BPoly, PPoly
from .polyint import _isscalar
__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip",
"Akima1DInterpolator"]
class PchipInterpolator(object):
"""PCHIP 1-d monotonic cubic interpolation
x and y are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. `x` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. `y`'s length along the interpolation
axis must be equal to the length of `x`. If N-D array, use axis
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
See Also
--------
Akima1DInterpolator
Notes
-----
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at x_k.
Preserves monotonicity in the interpolation data and does not overshoot
if the data is not smooth.
Determines the derivatives at the points x_k, d_k, by using PCHIP algorithm:
Let m_k be the slope of the kth segment (between k and k+1)
If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
else use weighted harmonic mean:
w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
where h_k is the spacing between x_k and x_{k+1}.
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x = np.asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
x = x.astype(float)
y = np.asarray(y)
if not np.issubdtype(y.dtype, np.inexact):
y = y.astype(float)
axis = axis % y.ndim
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
yp = np.rollaxis(y, axis)
dk = self._find_derivatives(xp, yp)
data = np.hstack((yp[:, None, ...], dk[:, None, ...]))
self._bpoly = BPoly.from_derivatives(x, data, orders=None,
extrapolate=extrapolate)
self.axis = axis
def __call__(self, x, der=0, extrapolate=None):
"""
Evaluate the PCHIP interpolant or its derivative.
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
der : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
"""
out = self._bpoly(x, der, extrapolate)
return self._reshaper(x, out)
def derivative(self, der=1):
"""
Construct a piecewise polynomial representing the derivative.
Parameters
----------
der : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
Piecewise polynomial of order k2 = k - der representing the derivative
of this polynomial.
"""
t = object.__new__(self.__class__)
t.axis = self.axis
t._bpoly = self._bpoly.derivative(der)
return t
def roots(self):
"""
Return the roots of the interpolated function.
"""
return (PPoly.from_bernstein_basis(self._bpoly)).roots()
def _reshaper(self, x, out):
x = np.asarray(x)
l = x.ndim
transp = (tuple(range(l, l+self.axis)) + tuple(range(l)) +
tuple(range(l+self.axis, out.ndim)))
return out.transpose(transp)
@staticmethod
def _edge_case(m0, d1, out):
m0 = np.atleast_1d(m0)
d1 = np.atleast_1d(d1)
mask = (d1 != 0) & (m0 != 0)
out[mask] = 1.0/(1.0/m0[mask]+1.0/d1[mask])
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:,None]
y = y[:,None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
smk = np.sign(mk)
condition = ((smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0))
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = 1.0/(w1+w2)*(w1/mk[1:] + w2/mk[:-1])
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0/whmean[~condition]
# For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless
# one of d_1 or m_0 is 0, then choose d_0 = 0
PchipInterpolator._edge_case(mk[0],dk[1], dk[0])
PchipInterpolator._edge_case(mk[-1],dk[-2], dk[-1])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : integer or list
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P(x, der=der)
else:
return [P(x, nu) for nu in der]
# Backwards compatibility
pchip = PchipInterpolator
class Akima1DInterpolator(PPoly):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of *y* along the first axis must
be equal to the length of *x*.
axis : int, optional
Specifies the axis of *y* along which to interpolate. Interpolation
defaults to the last axis of *y*.
Methods
-------
__call__
See Also
--------
PchipInterpolator
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# http://www.mathworks.de/matlabcentral/fileexchange/1814-akima-interpolation
if np.any(np.diff(x) < 0.):
raise ValueError("x must be strictly ascending")
if x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if x.size != y.shape[0]:
raise ValueError("x.shape must equal y.shape[0]")
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = np.diff(x)
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the indices where the the slope at breakpoint is defined:
id_ = np.nonzero(f12 > 1e-9 * np.max(f12))[0]
# set the slope at breakpoint
t[id_] = (f1[id_] * m[id_ + 1] + f2[id_] * m[id_ + 2]) / f12[id_]
# calculate the higher order coefficients
c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx
d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2
coeff = np.zeros((4, x.size - 1) + y.shape[1:])
coeff[3] = y[:-1]
coeff[2] = t[:-1]
coeff[1] = c
coeff[0] = d
super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False)
def extend(self):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolor. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
| {
"content_hash": "67d48f50bfee53a171b3651a77565c9f",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 85,
"avg_line_length": 33.12893982808023,
"alnum_prop": 0.5708354955889985,
"repo_name": "witcxc/scipy",
"id": "dbd3f15c413fc45ac5b14a1585b7a64ac5c82988",
"size": "11562",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scipy/interpolate/_monotone.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4158513"
},
{
"name": "C++",
"bytes": "3625507"
},
{
"name": "CSS",
"bytes": "2624"
},
{
"name": "FORTRAN",
"bytes": "5559392"
},
{
"name": "Makefile",
"bytes": "10154"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "8065099"
},
{
"name": "Shell",
"bytes": "1580"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
from sqf.base_type import ParserType
from sqf.keywords import BINARY_OPERATORS, UNARY_OPERATORS, OP_COMPARISON, PREPROCESSORS_UNARY
class EndToken:
pass
def _normalize(item):
return str(item).lower()
def nud(token, parser):
n_token = _normalize(token)
if n_token in UNARY_OPERATORS:
return parser.container([token, parser.expression(100)])
elif str(token) in PREPROCESSORS_UNARY:
return parser.container([token, parser.expression(100)])
elif str(token) == '#define':
arg = parser.expression(100)
args = parser.expression(100)
func = parser.expression(100)
return parser.container([token, arg, args, func])
elif str(token).isupper():
# heuristic to catch global defines with arguments
if str(parser.next)[0] == '(':
return parser.container([token, parser.expression(100)])
return token
def get_lbp(token):
n_token = _normalize(token)
if token == EndToken:
return 0
elif n_token == '=':
return 0.8
elif n_token == 'private':
return 0.9
elif n_token in ('||', 'or'):
return 1
elif n_token in {'&&', 'and'}:
return 2
elif n_token in set(x.value for x in OP_COMPARISON):
return 3
elif n_token in {'*', '/', '%', 'mod', 'atan2'}:
return 7
elif n_token in {'+', 'max', 'min', '-'}:
return 6
elif n_token == 'else':
return 5
elif n_token == '^': # it is a binary, but it has higher precedence
return 8
elif n_token in BINARY_OPERATORS:
return 4
elif n_token == '#':
return 9
elif n_token in UNARY_OPERATORS:
return 10
else:
return 0.1
class Parser:
def __init__(self, container):
self.next = None
self.container = container
self.tokens = []
self.cumulator = []
self.iterator = self._iterator()
def _iterator(self):
for token in self.tokens:
if isinstance(token, ParserType):
self.cumulator.append(token)
else:
yield token
yield EndToken
def expression(self, rbp=0):
current = self.next
cum_prefix = self.cumulator
self.cumulator = []
try:
self.next = next(self.iterator)
except StopIteration:
if len(cum_prefix + self.cumulator) == 1:
return (cum_prefix + self.cumulator)[0]
return self.container(cum_prefix + self.cumulator)
left = nud(current, self)
if cum_prefix + self.cumulator:
left = self.container(cum_prefix + [left] + self.cumulator)
self.cumulator = []
while rbp < get_lbp(self.next):
current = self.next
self.next = next(self.iterator)
if self.next is EndToken:
return self.container([left, current])
right = self.expression(get_lbp(current))
left = self.container([left, current, right])
return left
def parse(self, tokens):
if len(tokens) == 1:
return tokens[0]
self.tokens = tokens
self.iterator = self._iterator()
self.next = next(self.iterator)
return self.expression()
def parse_exp(tokens, container=list):
return Parser(container).parse(tokens)
| {
"content_hash": "2207a684f38d37bd97cddfeb6fbee38a",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 94,
"avg_line_length": 28.30252100840336,
"alnum_prop": 0.5712589073634204,
"repo_name": "LordGolias/sqf",
"id": "a670256bfad27eac99243771fa81b768fc204eb2",
"size": "3368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqf/parser_exp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "450443"
},
{
"name": "SQF",
"bytes": "32"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'ApiKey', fields ['key']
db.create_unique(u'oscarapi_apikey', ['key'])
def backwards(self, orm):
# Removing unique constraint on 'ApiKey', fields ['key']
db.delete_unique(u'oscarapi_apikey', ['key'])
models = {
u'oscarapi.apikey': {
'Meta': {'object_name': 'ApiKey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['oscarapi'] | {
"content_hash": "495d6d1d44d75863d333cb4a3a8b28ea",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 101,
"avg_line_length": 29.88888888888889,
"alnum_prop": 0.6059479553903345,
"repo_name": "lijoantony/django-oscar-api",
"id": "176c04adb061e2666fc3af2b36490ef87fca6abc",
"size": "831",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oscarapi/south_migrations/0002_auto__add_unique_apikey_key.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "145163"
}
],
"symlink_target": ""
} |
from .curver import Curver | {
"content_hash": "70cf58763c559c2d24f1261faf67e92e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 26,
"avg_line_length": 26,
"alnum_prop": 0.8461538461538461,
"repo_name": "kieranrcampbell/curver-python",
"id": "95cddacdb34c9ead3607d73248574e316a5d0bbf",
"size": "52",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "curver/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5194"
}
],
"symlink_target": ""
} |
"""
Polygons.py
"""
import gdal, ogr
from gdalconst import *
import argparse
from osgeo import osr
def getArgs():
parser = argparse.ArgumentParser(
description = "Convert raster to polygons"
)
parser.add_argument(
"-i",
"--input",
type = str,
required = True,
help = "input raster file"
)
parser.add_argument(
"-b",
"--band",
type = str,
required = False,
help = "Band (indexing starts at 1). Default is band 1."
)
parser.add_argument(
"-o",
"--output",
type = str,
required = True,
help = "Output file name"
)
parser.add_argument(
"-v",
"--verbose",
action = "store_true",
help = "Print status updates while executing"
)
return parser.parse_args()
def main():
args = getArgs()
if args.verbose:
print args
raster = gdal.Open(args.input, GA_ReadOnly)
if args.band:
band = raster.GetRasterBand(args.band)
else:
band = raster.GetRasterBand(1)
driver = ogr.GetDriverByName("ESRI Shapefile")
vector = driver.CreateDataSource(args.output)
projection = osr.SpatialReference()
projection.ImportFromWkt(raster.GetProjectionRef())
layer = vector.CreateLayer("out", srs = projection)
field = "class"
field_def = ogr.FieldDefn(field, ogr.OFTInteger)
layer.CreateField(field_def)
field_type_example = 0
maskband = None
options = []
output = gdal.Polygonize(band, maskband, layer, field_type_example, options)
band = None
raster = None
shapefile = None
if __name__ == "__main__":
main() | {
"content_hash": "2c28d891911be0131204c2928156be4e",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 77,
"avg_line_length": 17.890243902439025,
"alnum_prop": 0.6734832992501704,
"repo_name": "jeffreywolf/threshold",
"id": "0f75a3c42fcade86573c7b09403801a2661d1db9",
"size": "1492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polygons.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7545"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class WagtailMenusConfig(AppConfig):
name = 'wagtailmenus'
verbose_name = 'WagtailMenus'
| {
"content_hash": "5e0749af7530764d10ec3d7ebb410e92",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 36,
"avg_line_length": 22.166666666666668,
"alnum_prop": 0.7518796992481203,
"repo_name": "ababic/wagtailmenus",
"id": "45ae7d85f93e1695ce08acdb85e15e1b51395141",
"size": "133",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtailmenus/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "906"
},
{
"name": "HTML",
"bytes": "24091"
},
{
"name": "JavaScript",
"bytes": "307"
},
{
"name": "Python",
"bytes": "364188"
}
],
"symlink_target": ""
} |
import sys, inspect
import numpy as np
from ..data.mfstructure import DatumType
from ..data import mfdata
from collections import OrderedDict
from ..mfbase import ExtFileAction, MFDataException
from ...datbase import DataType
from .mfdatautil import convert_data, to_string
from .mffileaccess import MFFileAccessScalar
from .mfdatastorage import DataStorage, DataStructureType, DataStorageType
class MFScalar(mfdata.MFData):
"""
Provides an interface for the user to access and update MODFLOW
scalar data.
Parameters
----------
sim_data : MFSimulationData
data contained in the simulation
structure : MFDataStructure
describes the structure of the data
data : list or ndarray
actual data
enable : bool
enable/disable the array
path : tuple
path in the data dictionary to this MFArray
dimensions : MFDataDimensions
dimension information related to the model, package, and array
Attributes
----------
data_type : DataType
type of data stored in the scalar
plotable : bool
if the scalar is plotable
dtype : numpy.dtype
the scalar's numpy data type
data : variable
calls get_data with default parameters
Methods
-------
has_data : () : bool
Returns whether this object has data associated with it.
get_data : () : ndarray
Returns the data associated with this object.
set_data : (data : ndarray/list, multiplier : float)
Sets the contents of the data to "data" with
multiplier "multiplier".
load : (first_line : string, file_handle : file descriptor,
block_header : MFBlockHeader, pre_data_comments : MFComment) :
tuple (bool, string)
Loads data from first_line (the first line of data) and open file
file_handle which is pointing to the second line of data. Returns a
tuple with the first item indicating whether all data was read
and the second item being the last line of text read from the file.
get_file_entry : () : string
Returns a string containing the data.
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(
self,
sim_data,
model_or_sim,
structure,
data=None,
enable=True,
path=None,
dimensions=None,
):
super(MFScalar, self).__init__(
sim_data, model_or_sim, structure, enable, path, dimensions
)
self._data_type = self.structure.data_item_structures[0].type
self._data_storage = self._new_storage()
if data is not None:
self.set_data(data)
@property
def data_type(self):
return DataType.scalar
@property
def plotable(self):
return False
@property
def dtype(self):
if self.structure.type == DatumType.double_precision:
return np.float64
elif self.structure.type == DatumType.integer:
return np.int32
elif (
self.structure.type == DatumType.recarray
or self.structure.type == DatumType.record
or self.structure.type == DatumType.repeating_record
):
for data_item_struct in self.structure.data_item_structures:
if data_item_struct.type == DatumType.double_precision:
return np.float64
elif data_item_struct.type == DatumType.integer:
return np.int32
return None
def has_data(self):
try:
return self._get_storage_obj().has_data()
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"checking for data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
None,
self._simulation_data.debug,
ex,
)
@property
def data(self):
return self.get_data()
def get_data(self, apply_mult=False, **kwargs):
try:
return self._get_storage_obj().get_data(apply_mult=apply_mult)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"getting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
None,
self._simulation_data.debug,
ex,
)
def set_data(self, data):
self._resync()
if self.structure.type == DatumType.record:
if data is not None:
if (
not isinstance(data, list)
or isinstance(data, np.ndarray)
or isinstance(data, tuple)
):
data = [data]
else:
while (
isinstance(data, list)
or isinstance(data, np.ndarray)
or isinstance(data, tuple)
):
data = data[0]
if (isinstance(data, list) or isinstance(data, tuple)) and len(
data
) > 1:
self._add_data_line_comment(data[1:], 0)
storage = self._get_storage_obj()
data_struct = self.structure.data_item_structures[0]
try:
converted_data = convert_data(
data, self._data_dimensions, self._data_type, data_struct
)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
comment = 'Could not convert data "{}" to type ' '"{}".'.format(
data, self._data_type
)
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"converting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
comment,
self._simulation_data.debug,
ex,
)
try:
storage.set_data(converted_data, key=self._current_key)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
comment = 'Could not set data "{}" to type ' '"{}".'.format(
data, self._data_type
)
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"setting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
comment,
self._simulation_data.debug,
ex,
)
def add_one(self):
datum_type = self.structure.get_datum_type()
if datum_type == int or datum_type == np.int32:
if self._get_storage_obj().get_data() is None:
try:
self._get_storage_obj().set_data(1)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
comment = "Could not set data to 1"
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"setting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
comment,
self._simulation_data.debug,
ex,
)
else:
try:
current_val = self._get_storage_obj().get_data()
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"getting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
None,
self._simulation_data.debug,
ex,
)
try:
self._get_storage_obj().set_data(current_val + 1)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
comment = 'Could increment data "{}" by one' ".".format(
current_val
)
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"setting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
comment,
self._simulation_data.debug,
ex,
)
else:
message = (
"{} of type {} does not support add one "
"operation.".format(
self._data_name, self.structure.get_datum_type()
)
)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"adding one to scalar",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
message,
self._simulation_data.debug,
)
def get_file_entry(
self,
values_only=False,
one_based=False,
ext_file_action=ExtFileAction.copy_relative_paths,
):
storage = self._get_storage_obj()
try:
if storage is None or self._get_storage_obj().get_data() is None:
return ""
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"getting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
None,
self._simulation_data.debug,
ex,
)
if (
self.structure.type == DatumType.keyword
or self.structure.type == DatumType.record
):
try:
data = storage.get_data()
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"getting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
None,
self._simulation_data.debug,
ex,
)
if self.structure.type == DatumType.keyword:
if data is not None and data != False:
# keyword appears alone
return "{}{}\n".format(
self._simulation_data.indent_string,
self.structure.name.upper(),
)
else:
return ""
elif self.structure.type == DatumType.record:
text_line = []
index = 0
for data_item in self.structure.data_item_structures:
if (
data_item.type == DatumType.keyword
and data_item.optional == False
):
if isinstance(data, list) or isinstance(data, tuple):
if len(data) > index and (
data[index] is not None and data[index] != False
):
text_line.append(data_item.name.upper())
if (
isinstance(data[index], str)
and data_item.name.upper()
!= data[index].upper()
and data[index] != ""
):
# since the data does not match the keyword
# assume the keyword was excluded
index -= 1
else:
if data is not None and data != False:
text_line.append(data_item.name.upper())
else:
if data is not None and data != "":
if isinstance(data, list) or isinstance(data, tuple):
if len(data) > index:
if (
data[index] is not None
and data[index] != False
):
current_data = data[index]
else:
break
elif data_item.optional == True:
break
else:
message = (
"Missing expected data. Data "
"size is {}. Index {} not"
"found.".format(len(data), index)
)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"getting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
message,
self._simulation_data.debug,
)
else:
current_data = data
if data_item.type == DatumType.keyword:
if (
current_data is not None
and current_data != False
):
if (
isinstance(data[index], str)
and data[index] == "#"
):
# if data has been commented out,
# keep the comment
text_line.append(data[index])
text_line.append(data_item.name.upper())
else:
try:
text_line.append(
to_string(
current_data,
self._data_type,
self._simulation_data,
self._data_dimensions,
data_item=data_item,
)
)
except Exception as ex:
message = (
'Could not convert "{}" of type '
'"{}" to a string'
".".format(current_data, self._data_type)
)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"converting data to string",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
message,
self._simulation_data.debug,
)
index += 1
text = self._simulation_data.indent_string.join(text_line)
return "{}{}\n".format(self._simulation_data.indent_string, text)
else:
data_item = self.structure.data_item_structures[0]
try:
if one_based:
if self.structure.type != DatumType.integer:
message = (
'Data scalar "{}" can not be one_based '
"because it is not an integer"
".".format(self.structure.name)
)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"storing one based integer",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
message,
self._simulation_data.debug,
)
data = self._get_storage_obj().get_data() + 1
else:
data = self._get_storage_obj().get_data()
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"getting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
None,
self._simulation_data.debug,
)
try:
# data
values = to_string(
data,
self._data_type,
self._simulation_data,
self._data_dimensions,
data_item=data_item,
)
except Exception as ex:
message = (
'Could not convert "{}" of type "{}" '
"to a string.".format(data, self._data_type)
)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"converting data to string",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
message,
self._simulation_data.debug,
)
if values_only:
return "{}{}".format(
self._simulation_data.indent_string, values
)
else:
# keyword + data
return "{}{}{}{}\n".format(
self._simulation_data.indent_string,
self.structure.name.upper(),
self._simulation_data.indent_string,
values,
)
def load(
self,
first_line,
file_handle,
block_header,
pre_data_comments=None,
external_file_info=None,
):
super(MFScalar, self).load(
first_line,
file_handle,
block_header,
pre_data_comments=None,
external_file_info=None,
)
self._resync()
file_access = MFFileAccessScalar(
self.structure,
self._data_dimensions,
self._simulation_data,
self._path,
self._current_key,
)
return file_access.load_from_package(
first_line,
file_handle,
self._get_storage_obj(),
self._data_type,
self._keyword,
pre_data_comments,
)
def _new_storage(self, stress_period=0):
return DataStorage(
self._simulation_data,
self._model_or_sim,
self._data_dimensions,
self.get_file_entry,
DataStorageType.internal_array,
DataStructureType.scalar,
stress_period=stress_period,
data_path=self._path,
)
def _get_storage_obj(self):
return self._data_storage
def plot(self, filename_base=None, file_extension=None, **kwargs):
"""
Helper method to plot scalar objects
Parameters:
scalar : flopy.mf6.data.mfscalar object
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
Returns:
axes: list matplotlib.axes object
"""
from flopy.plot.plotutil import PlotUtilities
if not self.plotable:
raise TypeError("Scalar values are not plotable")
axes = PlotUtilities._plot_scalar_helper(
self,
filename_base=filename_base,
file_extension=file_extension,
**kwargs
)
return axes
class MFScalarTransient(MFScalar, mfdata.MFTransient):
"""
Provides an interface for the user to access and update MODFLOW transient
scalar data.
Parameters
----------
sim_data : MFSimulationData
data contained in the simulation
structure : MFDataStructure
describes the structure of the data
data : list or ndarray
actual data
enable : bool
enable/disable the array
path : tuple
path in the data dictionary to this MFArray
dimensions : MFDataDimensions
dimension information related to the model, package, and array
Methods
-------
add_transient_key : (transient_key : int)
Adds a new transient time allowing data for that time to be stored and
retrieved using the key "transient_key"
add_one :(transient_key : int)
Adds one to the data stored at key "transient_key"
get_data : (key : int) : ndarray
Returns the data associated with "key".
set_data : (data : ndarray/list, multiplier : float, key : int)
Sets the contents of the data at time "key" to
"data" with multiplier "multiplier".
load : (first_line : string, file_handle : file descriptor,
block_header : MFBlockHeader, pre_data_comments : MFComment) :
tuple (bool, string)
Loads data from first_line (the first line of data) and open file
file_handle which is pointing to the second line of data. Returns a
tuple with the first item indicating whether all data was read
and the second item being the last line of text read from the file.
get_file_entry : (key : int) : string
Returns a string containing the data at time "key".
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(
self,
sim_data,
model_or_sim,
structure,
enable=True,
path=None,
dimensions=None,
):
super(MFScalarTransient, self).__init__(
sim_data=sim_data,
model_or_sim=model_or_sim,
structure=structure,
enable=enable,
path=path,
dimensions=dimensions,
)
self._transient_setup(self._data_storage)
self.repeating = True
@property
def data_type(self):
return DataType.transientscalar
@property
def plotable(self):
if self.model is None:
return False
else:
return True
def add_transient_key(self, key):
super(MFScalarTransient, self).add_transient_key(key)
if isinstance(key, int):
stress_period = key
else:
stress_period = 1
self._data_storage[key] = super(MFScalarTransient, self)._new_storage(
stress_period
)
def add_one(self, key=0):
self._update_record_prep(key)
super(MFScalarTransient, self).add_one()
def has_data(self, key=None):
if key is None:
data_found = False
for sto_key in self._data_storage.keys():
self.get_data_prep(sto_key)
data_found = (
data_found or super(MFScalarTransient, self).has_data()
)
if data_found:
break
else:
self.get_data_prep(key)
data_found = super(MFScalarTransient, self).has_data()
return data_found
def get_data(self, key=0, **kwargs):
self.get_data_prep(key)
return super(MFScalarTransient, self).get_data()
def set_data(self, data, key=None):
if isinstance(data, dict) or isinstance(data, OrderedDict):
# each item in the dictionary is a list for one stress period
# the dictionary key is the stress period the list is for
for key, list_item in data.items():
self._set_data_prep(list_item, key)
super(MFScalarTransient, self).set_data(list_item)
else:
self._set_data_prep(data, key)
super(MFScalarTransient, self).set_data(data)
def get_file_entry(
self, key=None, ext_file_action=ExtFileAction.copy_relative_paths
):
if key is None:
file_entry = []
for sto_key in self._data_storage.keys():
if self.has_data(sto_key):
self._get_file_entry_prep(sto_key)
text_entry = super(MFScalarTransient, self).get_file_entry(
ext_file_action=ext_file_action
)
file_entry.append(text_entry)
if file_entry > 1:
return "\n\n".join(file_entry)
elif file_entry == 1:
return file_entry[0]
else:
return ""
else:
self._get_file_entry_prep(key)
return super(MFScalarTransient, self).get_file_entry(
ext_file_action=ext_file_action
)
def load(
self,
first_line,
file_handle,
block_header,
pre_data_comments=None,
external_file_info=None,
):
self._load_prep(block_header)
return super(MFScalarTransient, self).load(
first_line, file_handle, pre_data_comments, external_file_info
)
def _new_storage(self, stress_period=0):
return OrderedDict()
def _get_storage_obj(self):
if (
self._current_key is None
or self._current_key not in self._data_storage
):
return None
return self._data_storage[self._current_key]
def plot(
self,
filename_base=None,
file_extension=None,
kper=0,
fignum=None,
**kwargs
):
"""
Plot transient scalar model data
Parameters
----------
transientscalar : flopy.mf6.data.mfdatascalar.MFScalarTransient object
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
**kwargs : dict
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
inactive : bool
Boolean used to determine if a black overlay in inactive
cells in a layer will be displayed. (default is True)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
kper : str
MODFLOW zero-based stress period number to return. If
kper='all' then data for all stress period will be
extracted. (default is zero).
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis is returned.
"""
from flopy.plot.plotutil import PlotUtilities
if not self.plotable:
raise TypeError("Simulation level packages are not plotable")
axes = PlotUtilities._plot_transient2d_helper(
self,
filename_base=filename_base,
file_extension=file_extension,
kper=kper,
fignum=fignum,
**kwargs
)
return axes
| {
"content_hash": "64a397f47283da6aa8c327fb9b20c35d",
"timestamp": "",
"source": "github",
"line_count": 891,
"max_line_length": 79,
"avg_line_length": 36.36026936026936,
"alnum_prop": 0.4566163533660524,
"repo_name": "aleaf/flopy",
"id": "2c88480d008d5705662d7a768bfae998eab991c0",
"size": "32397",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flopy/mf6/data/mfdatascalar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "5469342"
},
{
"name": "Shell",
"bytes": "2562"
}
],
"symlink_target": ""
} |
'''
Created on Apr 4, 2017
@author: benelot
'''
import gym
import gym, gym.spaces, gym.utils, gym.utils.seeding
import numpy as np
import pybullet as p
def state_fields_of_pose_of(body_id, link_id=-1): # a method you will most probably need a lot to get pose and orientation
if link_id == -1:
(x,y,z), (a,b,c,d) = p.getBasePositionAndOrientation(body_id)
else:
(x,y,z), (a,b,c,d),_,_,_,_ = p.getLinkState(body_id, link_id)
return np.array([x,y,z,a,b,c,d])
class PybulletMujocoEnv(gym.Env):
def __init__(self, model_xml, robot_name, timestep, frame_skip, action_dim, obs_dim, repeats):
self.action_space = gym.spaces.Box(-1.0, 1.0, shape=(action_dim,))
float_max = np.finfo(np.float32).max
# obs space for problem is (R, obs_dim)
# R = number of repeats
# obs_dim d tuple
self.state_shape = (repeats, obs_dim)
self.observation_space = gym.spaces.Box(-float_max, float_max, shape=self.state_shape)
# no state until reset.
self.state = np.empty(self.state_shape, dtype=np.float32)
self.frame_skip = frame_skip
self.timestep = timestep
self.model_xml = model_xml
self.parts, self.joints, = self.getScene(p.loadMJCF(model_xml))
self.robot_name = robot_name
self.dt = timestep * frame_skip
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': int(np.round(1.0 / timestep / frame_skip))
}
self._seed()
def getScene(self, bodies):
parts = {}
joints = {}
for i in range(len(bodies)):
for j in range(p.getNumJoints(bodies[i])):
_,joint_name,_,_,_,_,_,_,_,_,_,_,part_name = p.getJointInfo(bodies[i],j)
joints[joint_name] = Joint(bodies,i,j)
joints[joint_name].disable_motor()
parts[part_name] = BodyPart(bodies,i,j)
return parts, joints
def _seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
class BodyPart:
def __init__(self, bodies, bodyIndex, bodyPartIndex):
self.bodies = bodies
self.bodyIndex = bodyIndex
self.bodyPartIndex = bodyPartIndex
self.initialPosition = self.current_position()
self.initialOrientation = self.current_orientation()
def get_pose(self):
return state_fields_of_pose_of(self.bodies[self.bodyIndex], self.bodyPartIndex)
def current_position(self):
return self.get_pose()[:3]
def current_orientation(self):
return self.get_pose()[3:]
def reset_position(self, position):
p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], position, self.get_orientation())
def reset_orientation(self, orientation):
p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], self.get_position(), orientation)
def reset_pose(self, position, orientation):
p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], position, orientation)
class Joint:
def __init__(self, bodies, bodyIndex, jointIndex):
self.bodies = bodies
self.bodyIndex = bodyIndex
self.jointIndex = jointIndex
_,_,_,_,_,_,_,_,self.lowerLimit, self.upperLimit,_,_,_ = p.getJointInfo(self.bodies[self.bodyIndex], self.jointIndex)
def set_state(self, x, vx):
p.resetJointState(self.bodies[self.bodyIndex], self.jointIndex, x, vx)
def get_state(self):
x, vx,_,_ = p.getJointState(self.bodies[self.bodyIndex],self.jointIndex)
return x, vx
def set_position(self, position):
p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,p.POSITION_CONTROL, targetPosition=position)
def set_velocity(self, velocity):
p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,p.VELOCITY_CONTROL, targetVelocity=velocity)
def set_torque(self, torque):
p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,p.TORQUE_CONTROL, force=torque)
def reset_position(self, position, velocity):
self.set_position(position)
self.set_velocity(velocity)
self.disable_motor()
def disable_motor(self):
p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,controlMode=p.VELOCITY_CONTROL, force=0) | {
"content_hash": "c2ee46bcda999c8ecf97e4307e53bbcd",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 125,
"avg_line_length": 38.741379310344826,
"alnum_prop": 0.6290609701824655,
"repo_name": "benelot/bullet-gym",
"id": "5149d02112ac8fa5b665d5a143ba1b5d808d676d",
"size": "4494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bullet-gym-primitive/envs/MJCFCommon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "718069"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
} |
from errno import EACCES, EPERM
import os
from shutil import rmtree as shutil_rmtree
import stat
import sys
def rmtree(path):
kwargs = {}
if sys.platform == 'win32':
kwargs['onerror'] = _onerror_windows
return shutil_rmtree(path, **kwargs)
def _onerror_windows(function, path, excinfo):
if isinstance(excinfo[1], OSError) and excinfo[1].errno in (EACCES, EPERM):
os.chmod(path, stat.S_IWRITE)
function(path)
| {
"content_hash": "9550e9a6b1be1d32b39b4b7015226d11",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.6792035398230089,
"repo_name": "dirk-thomas/vcstool",
"id": "c8869ae80044028680a46bb28450b7fbc4ae9b74",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vcstool/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "384"
},
{
"name": "Python",
"bytes": "126487"
},
{
"name": "Shell",
"bytes": "814"
}
],
"symlink_target": ""
} |
from flask_security import RoleMixin
from app import config
db = config.db
class Role(db.Document, RoleMixin):
name = db.StringField(max_length=80, unique=True)
description = db.StringField(max_length=255)
| {
"content_hash": "9acf9b011856cc00972069c0ac63c905",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 53,
"avg_line_length": 24.11111111111111,
"alnum_prop": 0.7511520737327189,
"repo_name": "pony012/PruebaServicioCucea",
"id": "de4258553a596dc191bcf0aaf21d1c6ad709b1b6",
"size": "217",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "app/models/Role.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1567"
},
{
"name": "HTML",
"bytes": "3954"
},
{
"name": "Python",
"bytes": "10661"
},
{
"name": "Shell",
"bytes": "477"
}
],
"symlink_target": ""
} |
import sys, logging, os
from raygun4py import raygunprovider
# Hook the Raygun logging handler up:
logger = logging.getLogger("mylogger")
rgHandler = raygunprovider.RaygunHandler("paste_your_api_key_here")
logger.addHandler(rgHandler)
def log_exception(exc_type, exc_value, exc_traceback):
logger.error("A python error occurred", exc_info=(exc_type, exc_value, exc_traceback))
print("Logging: %s" % exc_value)
sys.excepthook = log_exception
## Example exception:
def buggyMethod():
raise Exception("Test exception sent via built-in handler")
buggyMethod() | {
"content_hash": "3042819f5512bf3ffb6621cf1ec4b5f1",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 90,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.7526132404181185,
"repo_name": "Osmose/raygun4py",
"id": "d19e52caf702715c8229b81a715ddeaea3dbb4c7",
"size": "574",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python3/samples/sampleWithLogging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60321"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import glob
import os.path
# We need to import this prior to importing cffi to fix prebuilding the
# extension modules
from nacl import _cffi_fix # noqa
from cffi import FFI
from cffi.verifier import Verifier
__all__ = ["ffi"]
HEADERS = glob.glob(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "*.h")
)
# Build our FFI instance
ffi = FFI()
# Add all of our header files, but sort first for consistency of the
# hash that CFFI generates and uses in the .so filename (the order of
# glob() results cannot be relied on)
for header in sorted(HEADERS):
with open(header, "r") as hfile:
ffi.cdef(hfile.read())
# TODO: Can we use the ABI of libsodium for this instead?
ffi.verifier = Verifier(
ffi,
"#include <sodium.h>",
# We need to link to the sodium library
libraries=["sodium"],
# Our ext_package is nacl so look for it
ext_package="nacl._lib",
)
class Library(object):
def __init__(self, ffi):
self.ffi = ffi
self._initialized = False
# This prevents the compile_module() from being called, the module
# should have been compiled by setup.py
def _compile_module(*args, **kwargs):
raise RuntimeError("Cannot compile module during runtime")
self.ffi.verifier.compile_module = _compile_module
def __getattr__(self, name):
if not self._initialized:
self._lib = self.ffi.verifier.load_library()
self._initialized = True
# redirect attribute access to the underlying lib
attr = getattr(self._lib, name)
# Go ahead and assign the returned value to this class so we don't
# need to do this lookup again
setattr(self, name, attr)
return attr
lib = Library(ffi)
| {
"content_hash": "d2ac7532b54f362f8fe74a3a358ad029",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 74,
"avg_line_length": 24.905405405405407,
"alnum_prop": 0.6516549104720565,
"repo_name": "alex/pynacl",
"id": "d30f16c5fe61a570e56af24f288d6447bd767e29",
"size": "2444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nacl/_lib/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "17958"
},
{
"name": "C",
"bytes": "558078"
},
{
"name": "C++",
"bytes": "15818"
},
{
"name": "Python",
"bytes": "76544"
},
{
"name": "Shell",
"bytes": "341585"
},
{
"name": "Visual Basic",
"bytes": "294"
}
],
"symlink_target": ""
} |
import sys
import unittest
from alphatwirl.selection import build_selection
from alphatwirl.selection.modules import All
from alphatwirl.selection.modules import Any
from alphatwirl.selection.modules import Not
from alphatwirl.selection.modules.LambdaStr import LambdaStr
##__________________________________________________________________||
class MockFactoryDispatcher(object):
def __call__(self, **kargs):
return kargs
##__________________________________________________________________||
class Test_buildSelection(unittest.TestCase):
def setUp(self):
self.module = sys.modules['alphatwirl.selection.funcs']
self._org_FactoryDispatcher = self.module.FactoryDispatcher
self.module.FactoryDispatcher = MockFactoryDispatcher()
def tearDown(self):
self.module.FactoryDispatcher = self._org_FactoryDispatcher
def test_call_kargs(self):
kargs = dict(
arg1 = 10,
arg2 = 20,
level = dict(factory = 'test_level1', arg2 = 2, arg3 = 3)
)
obj = build_selection(**kargs)
self.assertIsNot(kargs, obj)
obj.pop('AllClass')
obj.pop('AnyClass')
obj.pop('NotClass')
obj.pop('LambdaStrClass')
obj.pop('aliasDict')
self.assertEqual(kargs, obj)
def test_call_default_modules(self):
obj = build_selection(
arg1 = 10,
arg2 = 20,
level = dict(factory = 'test_level1', arg2 = 2, arg3 = 3)
)
self.assertIs(All, obj.pop('AllClass'))
self.assertIs(Any, obj.pop('AnyClass'))
self.assertIs(Not, obj.pop('NotClass'))
self.assertIs(LambdaStr, obj.pop('LambdaStrClass'))
obj.pop('aliasDict')
expected = dict(
arg1 = 10,
arg2 = 20,
level = dict(factory = 'test_level1', arg2 = 2, arg3 = 3)
)
self.assertEqual(expected, obj)
##__________________________________________________________________||
| {
"content_hash": "dff464a7340de3f1734b9fdcdbc18666",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 70,
"avg_line_length": 30.923076923076923,
"alnum_prop": 0.5477611940298508,
"repo_name": "TaiSakuma/AlphaTwirl",
"id": "76eb8de9a5a98a491c29a23406f00aad969822b4",
"size": "2010",
"binary": false,
"copies": "1",
"ref": "refs/heads/v0.9.x",
"path": "tests/unit/selection/test_build_selection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "562011"
},
{
"name": "R",
"bytes": "1222"
}
],
"symlink_target": ""
} |
import sublime
from php_coverage.debug import debug_message
sublime3 = int(sublime.version()) >= 3000
if sublime3:
set_timeout_async = sublime.set_timeout_async
else:
debug_message("Adding Sublime 3 polyfills")
set_timeout_async = sublime.set_timeout
| {
"content_hash": "05563104f39d3efb82eb80f3a6761387",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 49,
"avg_line_length": 24.181818181818183,
"alnum_prop": 0.7443609022556391,
"repo_name": "bradfeehan/SublimePHPCoverage",
"id": "ed5a05552f360625744ab5f0263a347c49dd526a",
"size": "266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "php_coverage/helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "0"
},
{
"name": "Python",
"bytes": "114863"
}
],
"symlink_target": ""
} |
import platform
import socket
import time
import rpm
import logging
class Info(object):
## This class take the information of the rpms attacking to the RPM's api and host information
def __init__(self):
pass
def rpm_getinfo(self):
## Catch all rpm installed in the system only for CentOS/RedHat/Fedora
rpm_collect = []
rpm_struct = {}
try:
logging.debug('Getting information of installed RPM')
db = rpm.TransactionSet()
rpm_packages = db.dbMatch()
except:
logging.critical('Error getting information about rpms installed')
raise
for package in rpm_packages:
rpm_struct['name'] = package['name']
rpm_struct['version'] = package['version']
rpm_struct['release'] = package['release']
rpm_struct['date'] = package.sprintf("%{INSTALLTID:date}")
rpm_struct['review_date'] = time.ctime(time.time())
rpm_struct['deleted'] = 'false'
rpm_struct['ip'] = socket.gethostbyname(socket.gethostname())
if rpm_struct not in rpm_collect:
rpm_collect.append(rpm_struct.copy())
return sorted(rpm_collect)
def catcher(self):
## Catch information of the node
packages = []
info_host = {}
info_host['fqdn'] = socket.getfqdn()
info_host['ip'] = socket.gethostbyname(socket.gethostname())
info_host['date'] = time.ctime(time.time())
info_host['system'] = platform.system()
info_host['SO_release'] = platform.release()
info_host['SO_version'] = platform.version()
info_host['SO_distribution'] = platform.dist()
packages = self.rpm_getinfo()
return info_host, packages
| {
"content_hash": "67f078e30797dc3f2fe1ea7e53c1a062",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 98,
"avg_line_length": 31.941176470588236,
"alnum_prop": 0.6519337016574586,
"repo_name": "padajuan/rpmController",
"id": "85372c4b8799db11be144e0fc136605a15005579",
"size": "1629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/rpmController/rpm_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Puppet",
"bytes": "4675"
},
{
"name": "Python",
"bytes": "14595"
},
{
"name": "Ruby",
"bytes": "400"
},
{
"name": "Shell",
"bytes": "3710"
}
],
"symlink_target": ""
} |
import numpy as np
import math
import onnx
from onnx import helper, TensorProto, mapping
import torch
import torchvision
import tvm.topi.testing
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
import scipy
import tvm.testing
def get_input_data_shape_dict(graph_def, input_data):
if isinstance(input_data, list):
input_names = {}
shape_dict = {}
for i, _ in enumerate(input_data):
input_names[i] = graph_def.graph.input[i].name
shape_dict[input_names[i]] = input_data[i].shape
else:
input_names = graph_def.graph.input[0].name
shape_dict = {input_names: input_data.shape}
return input_names, shape_dict
def get_tvm_output_with_vm(
graph_def, input_data, target, ctx, opset=None, freeze_params=False, convert_to_static=False
):
""" Generic function to execute and get tvm output with vm executor"""
if not isinstance(input_data, list):
input_data = [input_data]
_, shape_dict = get_input_data_shape_dict(graph_def, input_data)
mod, params = relay.frontend.from_onnx(
graph_def, shape_dict, opset=opset, freeze_params=freeze_params
)
if convert_to_static:
from tvm.relay import transform
mod = transform.DynamicToStatic()(mod)
ex = relay.create_executor("vm", mod=mod, ctx=ctx, target=target)
result = ex.evaluate()(*input_data)
if isinstance(result, tvm.runtime.NDArray):
return result.asnumpy()
return [r.asnumpy() for r in result]
def get_tvm_output(
graph_def, input_data, target, ctx, output_shape=None, output_dtype="float32", opset=None
):
""" Generic function to execute and get tvm output"""
target = "llvm"
input_names, shape_dict = get_input_data_shape_dict(graph_def, input_data)
mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset)
with tvm.transform.PassContext(opt_level=1):
graph, lib, params = relay.build(mod, target, params=params)
ctx = tvm.cpu(0)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
if isinstance(input_data, list):
for i, e in enumerate(input_names):
# Its possible for some onnx inputs to not be needed in the tvm
# module, confirm its present before setting.
try:
m.set_input(input_names[i], tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
except:
continue
else:
m.set_input(input_names, tvm.nd.array(input_data.astype(input_data.dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
if isinstance(output_shape, list) and isinstance(output_dtype, list):
tvm_output_list = []
for i, _ in enumerate(output_shape):
tvm_output = m.get_output(i)
tvm_output_list.append(tvm_output.asnumpy())
return tvm_output_list
else:
tvm_output = m.get_output(0)
return tvm_output.asnumpy()
def get_onnxruntime_output(model, inputs, dtype="float32"):
import onnxruntime.backend
rep = onnxruntime.backend.prepare(model, "CPU")
if isinstance(inputs, list) and len(inputs) > 1:
return rep.run(inputs)
elif isinstance(inputs, list) and len(inputs) == 1:
inp = inputs[0]
else:
inp = inputs
return rep.run(inp.astype(dtype))[0]
def verify_with_ort_with_inputs(
model,
inputs,
out_shape=None,
targets=None,
use_vm=False,
opset=None,
freeze_params=False,
convert_to_static=False,
dtype="float32",
rtol=1e-5,
atol=1e-5,
):
def flatten(out):
if isinstance(out, list) and len(out) == 1:
out = out[0]
if isinstance(out, np.ndarray):
return out.flatten()
return out
ort_out = get_onnxruntime_output(model, inputs, dtype)
if targets is None:
targets = [tgt for (tgt, _) in tvm.testing.enabled_targets()]
for target in targets:
ctx = tvm.context(target, 0)
if use_vm:
tvm_out = get_tvm_output_with_vm(
model,
inputs,
target,
ctx,
opset=opset,
freeze_params=freeze_params,
convert_to_static=convert_to_static,
)
else:
tvm_out = get_tvm_output(model, inputs, target, ctx, out_shape, dtype, opset=opset)
tvm.testing.assert_allclose(flatten(ort_out), flatten(tvm_out), rtol=rtol, atol=atol)
def verify_with_ort(
model,
input_shapes,
out_shape=None,
targets=None,
use_vm=False,
opset=None,
freeze_params=False,
convert_to_static=False,
dtype="float32",
rtol=1e-5,
atol=1e-5,
):
inputs = [np.random.uniform(size=ishape).astype(dtype) for ishape in input_shapes]
verify_with_ort_with_inputs(
model,
inputs,
out_shape=out_shape,
targets=targets,
use_vm=use_vm,
opset=opset,
freeze_params=freeze_params,
convert_to_static=convert_to_static,
dtype=dtype,
rtol=rtol,
atol=atol,
)
def make_constant_node(name, data_type, dims, vals):
return helper.make_node(
"Constant",
inputs=[],
outputs=[name],
value=helper.make_tensor(name=name, data_type=data_type, dims=dims, vals=vals),
)
@tvm.testing.uses_gpu
def test_reshape():
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])
graph = helper.make_graph(
[ref_node, reshape_node],
"reshape_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="reshape_test")
for target, ctx in tvm.testing.enabled_targets():
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, "float32")
tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
# TODO(mbrookhart): enable once VM supports heterogenous execution
# @tvm.testing.uses_gpu
def test_expand():
def _test_expand(name, data, shape, ref_data, dtype="int32"):
shape_array = np.array(shape)
if dtype == "int32":
shape_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["shape"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=shape_array.shape,
vals=shape_array.flatten().astype("int32"),
),
)
elif dtype == "int64":
shape_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["shape"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT64,
dims=shape_array.shape,
vals=shape_array.flatten().astype("int64"),
),
)
else:
raise "Invalid dtype"
expand_node = helper.make_node("Expand", ["in", "shape"], ["out"])
graph = helper.make_graph(
[shape_node, expand_node],
"expand_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(data.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_data.shape))],
)
model = helper.make_model(graph, producer_name=name)
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(model, data, target, ctx, freeze_params=True)
tvm.testing.assert_allclose(ref_data, tvm_out)
in_shape = (3, 1)
shape = (3, 4)
data = np.random.uniform(size=in_shape).astype(np.float32)
ref_data = np.tile(data, 4)
_test_expand("expand_with_dim_unchanged_test", data, shape, ref_data, "int32")
_test_expand("expand_with_dim_unchanged_test", data, shape, ref_data, "int64")
in_shape = (3, 1)
shape = (2, 1, 6)
data = np.random.uniform(size=in_shape).astype(np.float32)
ref_data = data * np.ones(shape, dtype=np.float32)
_test_expand("expand_with_dim_changed_test", data, shape, ref_data, "int32")
_test_expand("expand_with_dim_changed_test", data, shape, ref_data, "int64")
def verify_depth_to_space(inshape, outshape, mode, blockSize):
node = onnx.helper.make_node("DepthToSpace", inputs=["x"], outputs=["y"], blocksize=blockSize)
graph = helper.make_graph(
[node],
"depth_to_space_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="depth_to_space_test")
verify_with_ort(model, [inshape], outshape)
@tvm.testing.uses_gpu
def test_depth_to_space():
# current onnx.checker use OpSet-1 version of DepthToSpace, which doesn't have a mode argument.
# TO-DO, we can add mode arguement to test CRD mode and DCR mode
# in the future when we update to a newer onnx version.
verify_depth_to_space((1, 8, 2, 3), (1, 2, 4, 6), mode="CRD", blockSize=2)
def verify_space_to_depth(inshape, outshape, blockSize):
node = onnx.helper.make_node("SpaceToDepth", inputs=["x"], outputs=["y"], blocksize=blockSize)
graph = helper.make_graph(
[node],
"space_to_depth_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="space_to_depth_test")
verify_with_ort(model, [inshape], outshape)
@tvm.testing.uses_gpu
def test_space_to_depth():
verify_space_to_depth((1, 1, 4, 6), (1, 4, 2, 3), 2)
@tvm.testing.uses_gpu
def test_shape():
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])
shape_node = helper.make_node("Shape", ["out"], ["final_out"])
graph = helper.make_graph(
[ref_node, reshape_node, shape_node],
"shape_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("final_out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="shape_test")
for target, ctx in tvm.testing.enabled_targets():
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, "int32")
tvm.testing.assert_allclose(ref_shape, tvm_out)
def _test_power_iteration(x_shape, y_shape):
if isinstance(y_shape, int):
y_shape = [y_shape]
x = np.random.uniform(size=x_shape).astype(np.float32)
y = np.random.uniform(size=y_shape).astype(np.float32)
np_res = np.power(x, y).astype(np.float32)
res = helper.make_node("Pow", ["x", "y"], ["out"])
graph = helper.make_graph(
[res],
"power_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(np_res.shape))],
)
model = helper.make_model(graph, producer_name="power_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x, y], target, ctx, np_res.shape)
tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_power():
_test_power_iteration((1, 3), (1))
_test_power_iteration((2, 3), (2, 3))
_test_power_iteration((2, 3), (1, 3))
def verify_range(start, limit, delta, dtype):
dtype_map = {
"float32": TensorProto.FLOAT,
"int32": TensorProto.INT32,
"int64": TensorProto.INT64,
}
dtype_onnx = dtype_map[dtype]
y = helper.make_node("Range", ["start", "limit", "delta"], ["output"])
graph = helper.make_graph(
[y],
"range_test",
inputs=[
helper.make_tensor_value_info("start", dtype_onnx, []),
helper.make_tensor_value_info("limit", dtype_onnx, []),
helper.make_tensor_value_info("delta", dtype_onnx, []),
],
outputs=[
helper.make_tensor_value_info(
"output", dtype_onnx, np.arange(start, limit, delta).shape
)
],
)
model = helper.make_model(graph, producer_name="range_test")
inputs = [np.array(x).astype(dtype) for x in [start, limit, delta]]
verify_with_ort_with_inputs(model, inputs, use_vm=True)
@tvm.testing.uses_gpu
def test_range():
for t in ["float32", "int32", "int64"]:
verify_range(0, 10, 1, t)
verify_range(2, 8, 2, t)
verify_range(-3, 6, 4, t)
verify_range(-2, -7, -1, t)
@tvm.testing.uses_gpu
def test_squeeze():
in_shape = (1, 3, 1, 3, 1, 1)
out_shape = (3, 3)
y = helper.make_node("Squeeze", ["in"], ["out"], axes=[0, 2, 4, 5])
graph = helper.make_graph(
[y],
"squeeze_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="squeeze_test")
for target, ctx in tvm.testing.enabled_targets():
x = np.random.uniform(size=in_shape).astype("float32")
tvm_out = get_tvm_output(model, x, target, ctx, out_shape, "float32")
tvm.testing.assert_allclose(out_shape, tvm_out.shape)
@tvm.testing.uses_gpu
def test_flatten():
in_shape = (1, 3, 4, 4)
axis = 1
ref_shape = (1, 48)
flatten_node = helper.make_node("Flatten", ["in"], ["out"], axis=axis)
graph = helper.make_graph(
[flatten_node],
"flatten_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="flatten_test")
for target, ctx in tvm.testing.enabled_targets():
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, "float32")
tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
@tvm.testing.uses_gpu
def test_unsqueeze():
in_shape = (3, 3)
axis = (0, 3, 4)
out_shape = (1, 3, 3, 1, 1)
y = helper.make_node("Unsqueeze", ["in"], ["out"], axes=list(axis))
graph = helper.make_graph(
[y],
"squeeze_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="squeeze_test")
for target, ctx in tvm.testing.enabled_targets():
x = np.random.uniform(size=in_shape).astype("float32")
tvm_out = get_tvm_output(model, x, target, ctx, out_shape, "float32")
tvm.testing.assert_allclose(out_shape, tvm_out.shape)
def verify_gather(in_shape, indices, axis, dtype):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int32")
out_np = np.take(x, indices, axis=axis)
y = helper.make_node("Gather", ["in", "indices"], ["out"], axis=axis)
graph = helper.make_graph(
[y],
"gather_test",
inputs=[
helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_np.shape))],
)
model = helper.make_model(graph, producer_name="gather_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x, indices], target, ctx, out_np.shape)
tvm.testing.assert_allclose(out_np, tvm_out)
@tvm.testing.uses_gpu
def test_gather():
verify_gather((4,), [1], 0, "int32")
verify_gather((1, 4), [0], 0, "int32")
verify_gather((4,), [[[1, 0], [0, 1]]], 0, "float32")
verify_gather((2, 2), [[[1, 0], [0, 1]]], 1, "int32")
verify_gather((3, 3, 3), [[[1, 0]]], -1, "int32")
verify_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, "float32")
def verify_gatherelements(in_shape, indices, axis):
x = np.random.uniform(size=in_shape).astype("float32")
indices = np.array(indices, dtype="int32")
y = helper.make_node("GatherElements", ["data", "indices"], ["output"], axis=axis)
graph = helper.make_graph(
[y],
"gather_elements_test",
inputs=[
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)),
],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="gather_elements_test")
verify_with_ort_with_inputs(model, [x, indices])
@tvm.testing.uses_gpu
def test_gatherelements():
verify_gatherelements((4,), [3, 0, 2, 1], 0)
verify_gatherelements((2, 2), [[1, 0], [0, 1]], 0)
verify_gatherelements((2, 2), [[0, 0], [1, 0]], 1)
verify_gatherelements((2, 2), [[1, 0], [0, 1]], 1)
indices = [
[[1, 0, 0], [1, 0, 1], [0, 1, 1]],
[[1, 1, 1], [1, 2, 1], [1, 0, 1]],
[[1, 2, 1], [1, 2, 1], [1, 2, 1]],
]
verify_gatherelements((3, 3, 3), indices, 2)
def verify_scatter(in_shape, indices, axis):
x = np.random.uniform(size=in_shape).astype("float32")
indices = np.array(indices, dtype="int32")
updates = np.random.uniform(size=indices.shape).astype("float32")
y = helper.make_node("ScatterElements", ["data", "indices", "updates"], ["output"], axis=axis)
graph = helper.make_graph(
[y],
"scatter_test",
inputs=[
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)),
helper.make_tensor_value_info("updates", TensorProto.FLOAT, list(indices.shape)),
],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="scatter_test")
verify_with_ort_with_inputs(model, [x, indices, updates])
@tvm.testing.uses_gpu
def test_scatter():
verify_scatter((4,), [1], 0)
verify_scatter((1, 4), [[0]], 0)
verify_scatter((4,), [2, 3], 0)
verify_scatter((2, 2), [[1, 0], [0, 1]], 1)
verify_scatter((3, 3, 3), [[[-1, -3]]], -1)
verify_scatter((4, 3, 5, 6), [[[[2, 1, 0, 0]]]], 0)
def _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None):
if axes:
y = helper.make_node("Slice", ["in"], ["out"], axes=axes, starts=starts, ends=ends)
else:
y = helper.make_node("Slice", ["in"], ["out"], starts=starts, ends=ends)
graph = helper.make_graph(
[y],
"slice_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="slice_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, "float32", opset=1)
tvm.testing.assert_allclose(outdata, tvm_out)
def _test_slice_iteration_v10(indata, outdata, **attrs):
starts = attrs["starts"]
ends = attrs["ends"]
axes = None if "axes" not in attrs else attrs["axes"]
steps = None if "steps" not in attrs else attrs["steps"]
starts = np.asarray(starts)
ends = np.asarray(ends)
inputs = [
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("starts", TensorProto.INT64, list(starts.shape)),
helper.make_tensor_value_info("ends", TensorProto.INT64, list(ends.shape)),
]
initializer = [
helper.make_tensor("starts", TensorProto.INT64, list(starts.shape), starts),
helper.make_tensor("ends", TensorProto.INT64, list(ends.shape), ends),
]
nodes = []
if "add_noop_to_input_attrs" in attrs:
def add_noop_to_input_attr(attr_name, attr):
output_name = attr_name + "_output"
ref_shape = list(np.array(attr).shape)
ref_shape.insert(0, 1)
ref_shape = tuple(ref_shape)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in_" + attr_name],
value=onnx.helper.make_tensor(
name="const_tensor__1_" + attr_name,
data_type=onnx.TensorProto.INT64,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
in_shape = np.array(attr).shape
in_array = np.array(in_shape)
ref_node2 = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["input_shape_" + attr_name],
value=onnx.helper.make_tensor(
name="const_tensor__2_" + attr_name,
data_type=onnx.TensorProto.INT64,
dims=in_array.shape,
vals=in_array.flatten().astype(int),
),
)
reshape1_node = helper.make_node(
"Reshape", [attr_name, "ref_in_" + attr_name], ["reshape_" + attr_name]
)
reshape2_node = helper.make_node(
"Reshape", ["reshape_" + attr_name, "input_shape_" + attr_name], [output_name]
)
return [ref_node, ref_node2, reshape1_node, reshape2_node]
slice_inputs = []
for attr_name in ["starts", "ends", "axes", "steps"]:
if attr_name not in attrs:
continue
if "add_noop_to_input_attrs" in attrs and attr_name in attrs["add_noop_to_input_attrs"]:
nodes.extend(add_noop_to_input_attr(attr_name, attrs[attr_name]))
slice_inputs.append(attr_name + "_output")
else:
slice_inputs.append(attr_name)
if axes:
axes = np.asarray(axes)
inputs.append(helper.make_tensor_value_info("axes", TensorProto.INT32, list(axes.shape)))
initializer.append(helper.make_tensor("axes", TensorProto.INT32, list(axes.shape), axes))
if steps:
assert axes is not None and len(axes) == len(steps)
steps = np.asarray(steps)
inputs.append(helper.make_tensor_value_info("steps", TensorProto.INT32, list(axes.shape)))
initializer.append(helper.make_tensor("steps", TensorProto.INT32, list(steps.shape), steps))
y = helper.make_node("Slice", ["data", *slice_inputs], ["out"])
nodes.append(y)
graph = helper.make_graph(
nodes,
"slice_test",
inputs=inputs,
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
initializer=initializer,
)
model = helper.make_model(graph, producer_name="slice_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(model, indata, target, ctx, opset=10, freeze_params=True)
tvm.testing.assert_allclose(outdata, tvm_out)
# TODO(mbrookhart): enable once VM supports heterogenous execution
# @tvm.testing.uses_gpu
def test_slice():
x = np.random.randn(20, 10, 5).astype(np.float32)
_test_slice_iteration_v1(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))
_test_slice_iteration_v1(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))
_test_slice_iteration_v1(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))
_test_slice_iteration_v1(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))
_test_slice_iteration_v10(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))
_test_slice_iteration_v10(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))
_test_slice_iteration_v10(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))
_test_slice_iteration_v10(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))
_test_slice_iteration_v10(
x,
x[0:3, 0:10],
starts=(0, 0),
ends=(3, 10),
axes=(0, 1),
add_noop_to_input_attrs=["starts"],
)
_test_slice_iteration_v10(
x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4), add_noop_to_input_attrs=["ends"]
)
_test_slice_iteration_v10(
x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,), add_noop_to_input_attrs=["axes"]
)
_test_slice_iteration_v10(
x,
x[:, 0:-1],
starts=(0,),
ends=(-1,),
axes=(1,),
add_noop_to_input_attrs=["starts", "ends"],
)
_test_slice_iteration_v10(
x,
x[0:3, 0:10],
starts=(0, 0),
ends=(3, 10),
axes=(0, 1),
add_noop_to_input_attrs=["ends", "axes"],
)
_test_slice_iteration_v10(
x,
x[:, :, 3:4],
starts=(0, 0, 3),
ends=(20, 10, 4),
add_noop_to_input_attrs=["starts", "axes"],
)
_test_slice_iteration_v10(
x,
x[:, 1:1000],
starts=(1,),
ends=(1000,),
axes=(1,),
add_noop_to_input_attrs=["starts", "ends", "axes"],
)
x = np.random.randn(1, 1, 1, 128).astype(np.float32)
_test_slice_iteration_v10(
x, x, starts=(0, 0), ends=(9223372036854775807, 9223372036854775807), axes=(0, 3)
)
x = np.random.randn(4, 4).astype(np.float32)
_test_slice_iteration_v10(
x, x[:, 1::2], starts=(1,), ends=(9223372036854775807,), axes=(1,), steps=(2,)
)
_test_slice_iteration_v10(
x,
x[0::1, 1::2],
starts=(0, 1),
ends=(4, 4),
axes=(0, 1),
steps=(1, 2),
)
def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs):
indata = np.random.uniform(-1, 1, size=inshape).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ["in"], ["out"], **kwargs)
graph = helper.make_graph(
[y],
opname + "_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name=opname + "_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, dtype)
tvm.testing.assert_allclose(outdata, tvm_out)
@tvm.testing.uses_gpu
def test_floor():
_test_onnx_op_elementwise((2, 4, 5, 6), np.floor, {}, "float32", "Floor", {})
@tvm.testing.uses_gpu
def test_ceil():
_test_onnx_op_elementwise((2, 4, 5, 6), np.ceil, {}, "float32", "Ceil", {})
@tvm.testing.uses_gpu
def test_clip():
_test_onnx_op_elementwise(
(2, 4, 5, 6),
np.clip,
{"a_min": -1.0, "a_max": 1.0},
"float32",
"Clip",
{"min": -1.0, "max": 1.0},
)
@tvm.testing.uses_gpu
def test_clip_min_max_as_inputs():
input_shape = (2, 4, 5, 6)
nodes = [
make_constant_node("min", onnx.TensorProto.FLOAT, (), [0.0]),
make_constant_node("max", onnx.TensorProto.FLOAT, (), [6.0]),
]
input_names = ["in", "min", "max"]
nodes.append(helper.make_node("Clip", inputs=input_names, outputs=["out"]))
graph = helper.make_graph(
nodes,
"clip_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(input_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_shape))],
)
model = helper.make_model(graph, producer_name="clip_test")
verify_with_ort(model, [input_shape], input_shape)
@tvm.testing.uses_gpu
def test_round():
_test_onnx_op_elementwise((2, 4, 5, 6), np.round, {}, "float32", "Round", {})
def _test_finite_ops(inshape, outfunc, npargs, dtype, opname, kwargs):
indata = np.random.choice(a=[np.nan, np.inf, -np.inf, 0.5, 1.0, 0], size=inshape).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ["in"], ["out"], **kwargs)
graph = helper.make_graph(
[y],
opname + "_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name=opname + "_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, dtype)
tvm.testing.assert_allclose(outdata, tvm_out)
@tvm.testing.uses_gpu
def test_isinf():
_test_finite_ops((2, 4, 5, 6), np.isinf, {}, "float32", "IsInf", {})
@tvm.testing.uses_gpu
def test_isnan():
_test_finite_ops((2, 4, 5, 6), np.isnan, {}, "float32", "IsNaN", {})
def verify_gather_nd(in_shape, indices, dtype):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int32")
out_np = tvm.topi.testing.gather_nd_python(x, indices)
y = helper.make_node("GatherND", ["in", "indices"], ["out"])
graph = helper.make_graph(
[y],
"gather_test",
inputs=[
helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_np.shape))],
)
model = helper.make_model(graph, producer_name="gather_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x, indices], target, ctx, out_np.shape)
tvm.testing.assert_allclose(out_np, tvm_out)
@tvm.testing.uses_gpu
def test_gather_nd():
verify_gather_nd((2, 2), [[0, 0], [1, 1]], "int32")
verify_gather_nd((3, 3, 3), [[0, 1], [1, 0]], "float32")
verify_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], "float32")
# TODO(mbrookhart): enable once VM supports heterogenous execution
# @tvm.testing.uses_gpu
def test_onehot():
indices_shape = [10]
indices_array = np.random.randint(low=0, high=9, size=indices_shape, dtype="int32")
depth = 10
values = np.asarray([0, 1]).astype("int32")
out_np = np.eye(depth)[indices_array.reshape(-1)]
onehot_node = helper.make_node("OneHot", ["indices", "depth", "values"], ["out"])
graph = helper.make_graph(
[onehot_node],
"onehot_test",
inputs=[
helper.make_tensor_value_info("indices", TensorProto.INT32, indices_shape),
helper.make_tensor_value_info("depth", TensorProto.INT32, [1]),
helper.make_tensor_value_info("values", TensorProto.INT32, values.shape),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, out_np.shape)],
)
model = helper.make_model(graph, producer_name="onehot_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(
model, [indices_array, np.array([depth]).astype("int32"), values], target, ctx
)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_matmul():
a_shape = (4, 3)
b_shape = (3, 4)
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
out_np = np.matmul(a_array, b_array)
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph(
[mul_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_np.shape))],
)
model = helper.make_model(graph, producer_name="matmul_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [a_array, b_array], target, ctx, out_np.shape)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
def verify_batch_matmul(a_shape, b_shape, out_shape, target, ctx):
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph(
[mul_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, out_shape)],
)
model = helper.make_model(graph, producer_name="matmul_test")
onnx_out = get_onnxruntime_output(model, [a_array, b_array], "float32")[0]
tvm_out = get_tvm_output_with_vm(model, [a_array, b_array], target, ctx)
tvm.testing.assert_allclose(onnx_out, tvm_out, rtol=1e-5, atol=1e-5)
# TODO(mbrookhart): enable cuda once VM supports heterogenous execution
@tvm.testing.parametrize_targets("llvm")
def test_batch_matmul(target, ctx):
verify_batch_matmul((2, 3, 4, 3), (2, 3, 3, 4), (2, 3, 4, 4), target, ctx)
verify_batch_matmul((2, 4, 3), (3, 4), (2, 4, 4), target, ctx)
verify_batch_matmul((2, 3, 4, 3), (3, 4), (2, 3, 4, 4), target, ctx)
# Test implicit broadcasting.
verify_batch_matmul((4, 3), (2, 3, 4), (2, 4, 4), target, ctx)
verify_batch_matmul((2, 4, 3), (1, 3, 4), (2, 4, 4), target, ctx)
verify_batch_matmul((1, 4, 3), (2, 3, 4), (2, 4, 4), target, ctx)
def verify_simple_dynamic_model(a_shape, b_shape, target, ctx):
def verify_model(ex, a_shape, b_shape):
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
# matmul
out_np = np.matmul(a_array, b_array)
# relu
out_np[out_np < 0] = 0
tvm_out = ex.evaluate()(a_array, b_array).asnumpy()
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
relu_node = helper.make_node("Relu", ["out"], ["relu"])
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
# matmul
out_np = np.matmul(a_array, b_array)
graph = helper.make_graph(
[mul_node, relu_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("relu", TensorProto.FLOAT, list(out_np.shape))],
)
model = helper.make_model(graph, producer_name="matmul_test")
a_anys = [relay.Any()] * len(a_shape)
b_anys = [relay.Any()] * len(b_shape)
mod, params = relay.frontend.from_onnx(model, {"a": a_anys, "b": b_anys})
ex = relay.create_executor("vm", mod=mod, ctx=ctx, target=target)
verify_model(ex, a_shape, b_shape)
verify_model(ex, [a * 2 for a in a_shape], [b * 2 for b in b_shape])
verify_model(ex, [a * 3 for a in a_shape], [b * 3 for b in b_shape])
# TODO(mbrookhart): enable cuda once VM supports heterogenous execution
@tvm.testing.parametrize_targets("llvm")
def test_batch_matmul_dynamic_model(target, ctx):
verify_simple_dynamic_model((2, 3, 4, 3), (2, 3, 3, 4), target, ctx)
verify_simple_dynamic_model((2, 4, 3), (3, 4), target, ctx)
verify_simple_dynamic_model((2, 3, 4, 3), (3, 4), target, ctx)
def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):
in_array = np.random.uniform(size=shape).astype(dtype)
if alpha == None and beta == None and bias == None:
alpha = 0.0001
beta = 0.75
bias = 1.0
node = onnx.helper.make_node("LRN", inputs=["in"], outputs=["out"], size=nsize)
else:
node = onnx.helper.make_node(
"LRN", inputs=["in"], outputs=["out"], alpha=alpha, beta=beta, bias=bias, size=nsize
)
graph = helper.make_graph(
[node],
"lrn_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))],
)
model = helper.make_model(graph, producer_name="lrn_test")
def _get_python_lrn():
square_sum = np.zeros(shape).astype(dtype)
for n, c, h, w in np.ndindex(in_array.shape):
square_sum[n, c, h, w] = sum(
in_array[
n,
max(0, c - int(math.floor((nsize - 1) / 2))) : min(
5, c + int(math.ceil((nsize - 1) / 2)) + 1
),
h,
w,
]
** 2
)
py_out = in_array / ((bias + (alpha / nsize) * square_sum) ** beta)
return py_out
for target, ctx in tvm.testing.enabled_targets():
input_name = model.graph.input[0].name
py_out = _get_python_lrn()
tvm_out = get_tvm_output(model, in_array, target, ctx, py_out.shape, "float32")
tvm.testing.assert_allclose(py_out, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_lrn():
verify_lrn((5, 5, 5, 5), 3, "float32")
verify_lrn((5, 5, 5, 5), 3, "float32", alpha=0.0002, beta=0.5, bias=2.0)
def verify_instance_norm(shape, axis=1):
def _get_python_instance_norm(x, gamma, beta, epsilon=1e-5):
dims_x = len(x.shape)
axis = tuple(range(2, dims_x))
mean = np.mean(x, axis=axis, keepdims=True)
var = np.var(x, axis=axis, keepdims=True)
dim_ones = (1,) * (dims_x - 2)
gamma = gamma.reshape(-1, *dim_ones)
beta = beta.reshape(-1, *dim_ones)
return gamma * (x - mean) / np.sqrt(var + epsilon) + beta
x = np.random.randn(*shape).astype(np.float32)
gamma = np.random.randn(shape[1]).astype(np.float32)
beta = np.random.randn(shape[1]).astype(np.float32)
epsilon = 1e-5
y = _get_python_instance_norm(x, gamma, beta, epsilon).astype(np.float32)
node = onnx.helper.make_node(
"InstanceNormalization",
inputs=["x", "gamma", "beta"],
outputs=["y"],
epsilon=epsilon,
)
graph = helper.make_graph(
[node],
"instance_norm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(shape)),
helper.make_tensor_value_info("gamma", TensorProto.FLOAT, (shape[1],)),
helper.make_tensor_value_info("beta", TensorProto.FLOAT, (shape[1],)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(shape))],
)
model = helper.make_model(graph, producer_name="instance_norm_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x, gamma, beta], target, ctx, shape, "float32")
tvm.testing.assert_allclose(y, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_instance_norm():
verify_instance_norm((2, 3, 4, 5))
verify_instance_norm((32, 64, 80, 64))
verify_instance_norm((8, 6, 5))
verify_instance_norm((8, 7, 6, 5, 4))
def _test_upsample_nearest():
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in"], ["out"], mode="nearest", scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = tvm.topi.testing.upsampling_python(in_array, (scale, scale), "NCHW")
graph = helper.make_graph(
[y],
"upsample_nearest_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_nearest_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, "float32")
tvm.testing.assert_allclose(out_array, tvm_out)
def _test_upsample3d_nearest():
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)
y = helper.make_node(
"Upsample", ["in"], ["out"], mode="nearest", scales=[1.0, 1.0, 2.0, 2.0, 2.0]
)
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = tvm.topi.testing.upsampling3d_python(in_array, (scale, scale, scale), "NCDHW")
graph = helper.make_graph(
[y],
"upsample_nearest_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_nearest_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, "float32")
tvm.testing.assert_allclose(out_array, tvm_out)
def _test_upsample_bilinear():
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in"], ["out"], mode="linear", scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = tvm.topi.testing.bilinear_resize_python(in_array, (3 * scale, 3 * scale), "NCHW")
graph = helper.make_graph(
[y],
"upsample_bilinear_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_bilinear_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, "float32")
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
def _test_upsample_bilinear_opset9():
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in", "scales"], ["out"], mode="linear")
scales = [1, 1, 2, 2]
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = tvm.topi.testing.bilinear_resize_python(in_array, (3 * scale, 3 * scale), "NCHW")
ref_node = helper.make_node(
"Constant",
inputs=[],
outputs=["const"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=scales,
vals=np.random.random(scales).flatten().astype(float),
),
)
shape_node = helper.make_node("Shape", ["const"], ["scales"])
graph = helper.make_graph(
[ref_node, shape_node, y],
"upsample_bilinear_opset9_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_bilinear_opset9_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(
model, [in_array], target, ctx, opset=9, freeze_params=True
)
def _test_upsample3d_trilinear():
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in", "scales"], ["out"], mode="linear")
scales = [1.0, 1.0, 2.0, 2.0, 2.0]
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = tvm.topi.testing.trilinear_resize3d_python(
in_array,
(3 * scale, 3 * scale, 3 * scale),
"NCDHW",
coordinate_transformation_mode="half_pixel",
)
ref_array = np.array(scales)
ref_node = helper.make_node(
"Constant",
inputs=[],
outputs=["scales"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=ref_array.shape,
vals=ref_array.flatten().astype(float),
),
)
graph = helper.make_graph(
[ref_node, y],
"upsample_trilinear_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_trilinear_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, "float32")
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
# TODO(mbrookhart): enable once VM supports heterogenous execution
# @tvm.testing.uses_gpu
def test_upsample():
_test_upsample_nearest()
_test_upsample_bilinear()
_test_upsample_bilinear_opset9()
_test_upsample3d_nearest()
_test_upsample3d_trilinear()
def _test_softmax(inshape, axis):
opname = "Softmax"
indata = np.random.uniform(size=inshape).astype(np.float32)
outshape = inshape
outdata = tvm.topi.testing.softmax_python(indata)
if isinstance(axis, int):
y = helper.make_node(opname, ["in"], ["out"], axis=axis)
elif axis is None:
y = helper.make_node(opname, ["in"], ["out"])
graph = helper.make_graph(
[y],
opname + "_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name=opname + "_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, indata, target, ctx, outshape, "float32")
tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_softmax():
_test_softmax((1, 10), None)
_test_softmax((1, 10), 1)
def verify_min(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.min((a_np1, a_np2, a_np3), axis=0)
min_node = helper.make_node("Min", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[min_node],
"Min_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(b_np.shape))],
)
model = helper.make_model(graph, producer_name="Min_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_forward_min():
verify_min((1, 3, 20, 20))
verify_min((20, 20))
def verify_max(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.max((a_np1, a_np2, a_np3), axis=0)
max_node = helper.make_node("Max", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[max_node],
"Max_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(b_np.shape))],
)
model = helper.make_model(graph, producer_name="Max_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_forward_max():
verify_max((1, 3, 20, 20))
verify_max((20, 20))
def verify_mean(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.mean((a_np1, a_np2, a_np3), axis=0)
mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[mean_node],
"Mean_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(b_np.shape))],
)
model = helper.make_model(graph, producer_name="Mean_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_forward_mean():
verify_mean((1, 3, 20, 20))
verify_mean((20, 20))
def verify_hardsigmoid(input_dim, alpha, beta):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.clip(a_np1 * alpha + beta, 0, 1)
hardsigmoid_node = helper.make_node("HardSigmoid", ["a_np1"], ["out"], alpha=alpha, beta=beta)
graph = helper.make_graph(
[hardsigmoid_node],
"HardSigmoid_test",
inputs=[helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(b_np.shape))],
)
model = helper.make_model(graph, producer_name="HardSigmoid_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_forward_hardsigmoid():
verify_hardsigmoid((1, 3, 20, 20), 0.5, 0.6)
verify_hardsigmoid((20, 20), 0.3, 0.4)
def verify_argmin(input_dim, axis=None, keepdims=None):
def _argmin_numpy(data, axis=0, keepdims=True):
result = np.argmin(data, axis=axis)
if keepdims == 1:
result = np.expand_dims(result, axis)
return result.astype(data.dtype)
a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)
if keepdims is None and axis is None:
b_np = _argmin_numpy(a_np1)
node = onnx.helper.make_node("ArgMin", inputs=["a_np1"], outputs=["out"])
elif axis is None:
b_np = _argmin_numpy(a_np1, keepdims=keepdims)
node = onnx.helper.make_node("ArgMin", inputs=["a_np1"], outputs=["out"], keepdims=keepdims)
elif keepdims is None:
b_np = _argmin_numpy(a_np1, axis=axis)
node = onnx.helper.make_node("ArgMin", inputs=["a_np1"], outputs=["out"], axis=axis)
else:
b_np = _argmin_numpy(a_np1, axis=axis, keepdims=keepdims)
node = onnx.helper.make_node(
"ArgMin", inputs=["a_np1"], outputs=["out"], axis=axis, keepdims=keepdims
)
graph = helper.make_graph(
[node],
"argmin_test",
inputs=[helper.make_tensor_value_info("a_np1", TensorProto.INT32, list(a_np1.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, list(b_np.shape))],
)
model = helper.make_model(graph, producer_name="argmin_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape, b_np.dtype)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
def verify_argmax(input_dim, axis=None, keepdims=None):
def _argmax_numpy(data, axis=0, keepdims=True):
result = np.argmax(data, axis=axis)
if keepdims == 1:
result = np.expand_dims(result, axis)
return result.astype(data.dtype)
a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)
if keepdims is None and axis is None:
b_np = _argmax_numpy(a_np1)
node = onnx.helper.make_node("ArgMax", inputs=["a_np1"], outputs=["out"])
elif axis is None:
b_np = _argmax_numpy(a_np1, keepdims=keepdims)
node = onnx.helper.make_node("ArgMax", inputs=["a_np1"], outputs=["out"], keepdims=keepdims)
elif keepdims is None:
b_np = _argmax_numpy(a_np1, axis=axis)
node = onnx.helper.make_node("ArgMax", inputs=["a_np1"], outputs=["out"], axis=axis)
else:
b_np = _argmax_numpy(a_np1, axis=axis, keepdims=keepdims)
node = onnx.helper.make_node(
"ArgMax", inputs=["a_np1"], outputs=["out"], axis=axis, keepdims=keepdims
)
graph = helper.make_graph(
[node],
"argmax_test",
inputs=[helper.make_tensor_value_info("a_np1", TensorProto.INT32, list(a_np1.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, list(b_np.shape))],
)
model = helper.make_model(graph, producer_name="argmax_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape, b_np.dtype)
tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_forward_arg_min_max():
"""Verify argmin and argmax"""
verify_argmin([3, 4, 4])
verify_argmax([3, 4, 4])
verify_argmin([3, 4, 4], axis=1)
verify_argmax([3, 4, 4], axis=0)
verify_argmin([3, 4, 4], keepdims=0)
verify_argmax([3, 4, 4], keepdims=1)
for axis in [None, 0, 1, 2]:
for keepdims in [None, True, False]:
verify_argmin([3, 4, 4], axis, keepdims)
verify_argmax([3, 4, 4], axis, keepdims)
def verify_constantofshape(input_dim, value, dtype):
out = np.empty(shape=input_dim, dtype=dtype)
out.fill(value)
fill_node = helper.make_node(
"ConstantOfShape",
["input"],
["output"],
value=helper.make_tensor(
"value", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], (1,), (value,)
),
)
inputs = [helper.make_tensor_value_info("input", TensorProto.FLOAT, input_dim)]
graph = helper.make_graph(
[fill_node],
"fill_test",
inputs,
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(out.shape))],
)
model = helper.make_model(graph, producer_name="fill_test")
for target, ctx in tvm.testing.enabled_targets():
input_np = np.array(input_dim).astype("float32")
tvm_out = get_tvm_output_with_vm(model, [input_np], target, ctx)
tvm.testing.assert_allclose(out, tvm_out, rtol=1e-5, atol=1e-5)
# TODO(mbrookhart): enable once VM supports heterogenous execution
# @tvm.testing.uses_gpu
def test_constantofshape():
verify_constantofshape((2, 3, 4, 5), 10, "float32")
verify_constantofshape((3, 3), 0, "int32")
verify_constantofshape((1, 2, 3), -1, "float32")
def verify_pad(indata, pads, mode="constant", value=0.0):
indata = np.array(indata).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]
# onnx graph
if mode in ["edge", "reflect"]:
outdata = np.pad(indata, pad_width=np_pads, mode=mode)
node = helper.make_node(
"Pad",
inputs=["input"],
outputs=["output"],
mode=mode,
pads=pads,
)
else:
outdata = np.pad(indata, pad_width=np_pads, mode="constant", constant_values=value)
node = helper.make_node(
"Pad", inputs=["input"], outputs=["output"], mode="constant", pads=pads, value=value
)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="pad_test")
# tvm result
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, "float32", opset=2)
tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
def verify_pad_v11(indata, pads, mode="constant", value=0.0):
indata = np.array(indata).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]
pads = np.array(pads)
# onnx graph
if mode in ["edge", "reflect"]:
inputs = [indata, pads]
outdata = np.pad(indata, pad_width=np_pads, mode=mode)
node = helper.make_node("Pad", inputs=["input", "pads"], outputs=["output"], mode=mode)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("pads", TensorProto.INT64, (len(pads),)),
],
initializer=[helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads)],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))
],
)
else:
inputs = [indata, pads, np.array([value]).astype("float32")]
outdata = np.pad(indata, pad_width=np_pads, mode="constant", constant_values=value)
node = helper.make_node(
"Pad", inputs=["input", "pads", "constant_value"], outputs=["output"], mode="constant"
)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("pads", TensorProto.INT64, (len(pads),)),
helper.make_tensor_value_info("constant_value", TensorProto.FLOAT, (1,)),
],
initializer=[
helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads),
helper.make_tensor("constant_value", TensorProto.FLOAT, (1,), [value]),
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))
],
)
model = helper.make_model(graph, producer_name="pad_test")
# tvm result
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(model, inputs, target, ctx, opset=11, freeze_params=False)
tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5)
# TODO(mbrookhart): enable once VM supports heterogenous execution
# @tvm.testing.uses_gpu
def test_pad():
verify_pad(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], "constant", 0.0)
verify_pad(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], "constant", 0.0)
verify_pad(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], "constant", 5.0)
verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "edge")
verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "reflect")
verify_pad_v11(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], "constant", 0.0)
verify_pad_v11(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], "constant", 0.0)
verify_pad_v11(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], "constant", 5.0)
verify_pad_v11(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "edge")
verify_pad_v11(
np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "reflect"
)
def verify_reduce_func(func, data, axis, keepdims):
inshape = data.shape
outshape = np.sum(data, axis=axis, keepdims=keepdims == 1).shape
if axis:
node = onnx.helper.make_node(
func, inputs=["x"], outputs=["y"], axes=axis, keepdims=keepdims
)
else:
node = onnx.helper.make_node(func, inputs=["x"], outputs=["y"], keepdims=keepdims)
graph = helper.make_graph(
[node],
"reduce_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="reduce_test")
verify_with_ort_with_inputs(model, [data], outshape)
@tvm.testing.uses_gpu
def test_all_reduce_funcs():
funcs = [
"ReduceMax",
"ReduceMean",
"ReduceMin",
"ReduceProd",
"ReduceSum",
"ReduceSumSquare",
"ReduceLogSum",
"ReduceLogSumExp",
"ReduceL1",
"ReduceL2",
]
for func in funcs:
for keepdims in [True, False]:
verify_reduce_func(
func, np.random.randn(3, 2, 2).astype(np.float32), axis=None, keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 2, 3).astype(np.float32), axis=None, keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3).astype(np.float32), axis=(1,), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1, 2), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1,), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(1, 3, 4, 1).astype(np.float32), axis=(1,), keepdims=keepdims
)
def verify_split(indata, outdatas, split, axis=0, pass_split=True):
indata = np.array(indata).astype(np.float32)
outdatas = [np.array(o).astype(np.float32) for o in outdatas]
if split:
split_index = range(len(split))
else:
split_index = range(len(outdatas))
if pass_split:
node = helper.make_node(
"Split",
inputs=["input"],
outputs=["output_{}".format(i) for i in range(len(split_index))],
axis=axis,
split=split,
)
else:
node = helper.make_node(
"Split",
inputs=["input"],
outputs=["output_{}".format(i) for i in range(len(split_index))],
axis=axis,
)
graph = helper.make_graph(
[node],
"split_test",
inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))],
outputs=[
helper.make_tensor_value_info(
"output_{}".format(i), TensorProto.FLOAT, list(outdatas[i].shape)
)
for i in range(len(split_index))
],
)
model = helper.make_model(graph, producer_name="split_test")
import onnxruntime.backend
rep = onnxruntime.backend.prepare(model, "CPU")
onnx_out = rep.run(indata)
for target, ctx in tvm.testing.enabled_targets():
output_shape = [o.shape for o in outdatas]
output_type = ["float32", "float32", "float32"]
tvm_out = get_tvm_output(model, indata, target, ctx, output_shape, output_type)
for o, t in zip(onnx_out, tvm_out):
tvm.testing.assert_allclose(o, t)
@tvm.testing.uses_gpu
def test_split():
# 1D
verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [2, 2, 2], 0)
verify_split(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [2, 2, 2], 0, False
)
verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0], [4.0, 5.0, 6.0]], [2, 1, 3], 0)
# 2D
verify_split(
[[1.0, 2.0, 3.0, 4.0], [7.0, 8.0, 9.0, 10.0]],
[[[1.0, 2.0], [7.0, 8.0]], [[3.0, 4.0], [9.0, 10.0]]],
[2, 2],
1,
)
# Split evenly (unstack)
verify_split([1, 2, 3], [[1], [2], [3]], False, 0, False)
@tvm.testing.uses_gpu
def test_binary_ops():
in_shape = (1, 2, 3, 3)
dtype = "float32"
out_shape = in_shape
def verify_binary_ops(op, x, y, out_np, x_name="in1", y_name="in2", broadcast=None):
if broadcast is None:
z = helper.make_node(op, [x_name, y_name], ["out"])
else:
z = helper.make_node(op, [x_name, y_name], ["out"], broadcast=1)
graph = helper.make_graph(
[z],
"_test",
inputs=[
helper.make_tensor_value_info(x_name, TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info(y_name, TensorProto.FLOAT, list(in_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x, y], target, ctx)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
x = np.random.uniform(size=in_shape).astype(dtype)
y = np.random.uniform(size=in_shape).astype(dtype)
z = np.random.uniform(size=(3,)).astype(dtype)
verify_binary_ops("Add", x, y, x + y, broadcast=None)
verify_binary_ops("Add", x, z, x + z, broadcast=True)
verify_binary_ops("Sub", x, y, x - y, broadcast=None)
verify_binary_ops("Sub", x, z, x - z, broadcast=True)
verify_binary_ops("Mul", x, y, x * y, broadcast=None)
verify_binary_ops("Mul", x, z, x * z, broadcast=True)
verify_binary_ops("Mul", x, x, x * x, x_name="in1", y_name="in1", broadcast=None)
verify_binary_ops("Div", x, y, x / y, broadcast=None)
verify_binary_ops("Div", x, z, x / z, broadcast=True)
verify_binary_ops("Sum", x, y, x + y, broadcast=None)
verify_binary_ops("Greater", x, y, x > y, broadcast=True)
verify_binary_ops("Less", x, y, x < y, broadcast=True)
verify_binary_ops("Equal", x, y, x == y, broadcast=True)
@tvm.testing.uses_gpu
def test_single_ops():
in_shape = (1, 2, 3, 3)
dtype = "float32"
out_shape = in_shape
def verify_single_ops(op, x, out_np, rtol=1e-5, atol=1e-5):
z = helper.make_node(op, ["in1"], ["out"])
graph = helper.make_graph(
[z],
"_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.FLOAT, list(in_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x], target, ctx)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=rtol, atol=atol)
x = np.random.uniform(size=in_shape).astype(dtype)
verify_single_ops("Neg", x, -x)
verify_single_ops("Abs", x, np.abs(x))
verify_single_ops("Reciprocal", x, 1 / x)
verify_single_ops("Sqrt", x, np.sqrt(x))
verify_single_ops("Relu", x, np.maximum(x, 0))
verify_single_ops("Exp", x, np.exp(x))
verify_single_ops("Log", x, np.log(x))
verify_single_ops("Log", x, np.log(x))
verify_single_ops("ACos", x, np.arccos(x))
verify_single_ops("ACosh", x, np.arccosh(x))
verify_single_ops("ASin", x, np.arcsin(x))
verify_single_ops("ASinh", x, np.arcsinh(x))
verify_single_ops("ATan", x, np.arctan(x))
verify_single_ops("ATanh", x, np.arctanh(x))
verify_single_ops("Cos", x, np.cos(x))
verify_single_ops("Cosh", x, np.cosh(x))
verify_single_ops("Sin", x, np.sin(x))
verify_single_ops("Sinh", x, np.sinh(x))
verify_single_ops("Tan", x, np.tan(x))
verify_single_ops("Tanh", x, np.tanh(x))
verify_single_ops("Sigmoid", x, 1 / (1 + np.exp(-x)))
verify_single_ops("Softsign", x, x / (1 + np.abs(x)))
verify_single_ops("SoftPlus", x, np.log(1 + np.exp(x)))
@tvm.testing.uses_gpu
def test_leaky_relu():
def leaky_relu_x(x, alpha):
return np.where(x >= 0, x, x * alpha)
_test_onnx_op_elementwise(
(2, 4, 5, 6), leaky_relu_x, {"alpha": 0.25}, "float32", "LeakyRelu", {"alpha": 0.25}
)
@tvm.testing.uses_gpu
def test_elu():
def elu_x(x, alpha):
return np.where(x > 0, x, alpha * (np.exp(x) - 1.0))
_test_onnx_op_elementwise(
(2, 4, 5, 6), elu_x, {"alpha": 0.25}, "float32", "Elu", {"alpha": 0.25}
)
@tvm.testing.uses_gpu
def test_selu():
def selu_x(x, alpha, gamma):
return gamma * np.where(x > 0, x, alpha * (np.exp(x) - 1.0))
_test_onnx_op_elementwise(
(2, 4, 5, 6),
selu_x,
{"alpha": 0.25, "gamma": 0.3},
"float32",
"Selu",
{"alpha": 0.25, "gamma": 0.3},
)
@tvm.testing.uses_gpu
def test_prelu():
def verify_prelu(x_shape, a_shape):
node = helper.make_node("PRelu", inputs=["X", "slope"], outputs=["Y"])
graph = helper.make_graph(
[node],
"prelu_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("slope", TensorProto.FLOAT, list(a_shape)),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(x_shape))],
)
model = helper.make_model(graph, producer_name="prelu_test")
verify_with_ort(model, [x_shape, a_shape], list(x_shape))
verify_prelu([3, 4, 5, 6], [1, 4, 1, 1])
verify_prelu([1, 8, 5, 6], [1, 8, 1, 1])
verify_prelu([2, 12, 16, 16], [1, 12, 1, 1])
verify_prelu([2, 12, 16, 16], [1]) # Test alpha broadcasting.
@tvm.testing.uses_gpu
def test_ThresholdedRelu():
def ThresholdedRelu_x(x, alpha):
out_np = np.clip(x, alpha, np.inf)
out_np[out_np == alpha] = 0
return out_np
_test_onnx_op_elementwise(
(2, 4, 5, 6),
ThresholdedRelu_x,
{"alpha": 0.25},
"float32",
"ThresholdedRelu",
{"alpha": 0.25},
)
@tvm.testing.uses_gpu
def test_ScaledTanh():
def ScaledTanh_x(x, alpha, beta):
return alpha * np.tanh(beta * x)
_test_onnx_op_elementwise(
(2, 4, 5, 6),
ScaledTanh_x,
{"alpha": 0.25, "beta": 0.3},
"float32",
"ScaledTanh",
{"alpha": 0.25, "beta": 0.3},
)
@tvm.testing.uses_gpu
def test_ParametricSoftplus():
def ParametricSoftplus_x(x, alpha, beta):
return alpha * np.log(np.exp(beta * x) + 1)
_test_onnx_op_elementwise(
(2, 4, 5, 6),
ParametricSoftplus_x,
{"alpha": 0.25, "beta": 0.3},
"float32",
"ParametricSoftplus",
{"alpha": 0.25, "beta": 0.3},
)
@tvm.testing.uses_gpu
def test_Scale():
def Scale_x(x, scale):
return scale * x
_test_onnx_op_elementwise(
(2, 4, 5, 6), Scale_x, {"scale": 0.25}, "float32", "Scale", {"scale": 0.25}
)
@tvm.testing.uses_gpu
def test_LogSoftmax():
_test_onnx_op_elementwise(
(1, 4), tvm.topi.testing.log_softmax_python, {}, "float32", "LogSoftmax", {"axis": 1}
)
def check_torch_conversion(model, input_size):
dummy_input = torch.randn(*input_size)
file_name = "{}.onnx".format(model.__name__)
# Set verbose=True for more output
torch.onnx.export(model(), dummy_input, file_name, export_params=True, verbose=False)
onnx_model = onnx.load(file_name)
input_data = np.random.uniform(size=input_size).astype("int32")
verify_with_ort_with_inputs(onnx_model, [input_data])
@tvm.testing.uses_gpu
def test_resnet():
check_torch_conversion(torchvision.models.resnet18, (1, 3, 224, 224))
# check_torch_conversion(torchvision.models.resnet101, (1,3,224,224))
# def test_alexnet():
# Torch's ONNX export does not support the adaptive pooling used by AlexNet?
# check_torch_conversion(torchvision.models.alexnet, (1,3,224,224))
# Torch's ONNX export does not support the adaptive pooling used by vgg16?
# def test_vgg16():
# check_torch_conversion(torchvision.models.vgg16, (1,3,224,224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_squeezenet():
# # Torch's ONNX export does not support the max pooling used by Squezenet
# check_torch_conversion(torchvision.models.squeezenet1_0, (1,3,224,224))
@tvm.testing.uses_gpu
def test_densenet():
check_torch_conversion(torchvision.models.densenet161, (1, 3, 224, 224))
@tvm.testing.uses_gpu
def test_inception():
check_torch_conversion(torchvision.models.inception_v3, (1, 3, 224, 224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_googlenet():
# check_torch_conversion(torchvision.models.googlenet, (1,3,224,224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_shufflenetv2():
# check_torch_conversion(torchvision.models.shufflenetv2, (1,3,224,224))
@tvm.testing.uses_gpu
def test_sign():
def Sign_x(x):
return np.sign(x)
_test_onnx_op_elementwise((3, 4, 5, 6), Sign_x, {}, "float32", "Sign", {})
def verify_not(indata, dtype):
x = indata.astype(dtype)
outdata = np.logical_not(x)
node = helper.make_node(
"Not",
inputs=["in"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"not_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.BOOL, list(x.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="not_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x], target, ctx, outdata.shape)
tvm.testing.assert_allclose(outdata, tvm_out)
@tvm.testing.uses_gpu
def test_not():
# 2d
verify_not(indata=(np.random.randn(3, 4) > 0), dtype=bool)
# 3d
verify_not(indata=(np.random.randn(3, 4, 5) > 0), dtype=bool)
# 4d
verify_not(indata=(np.random.randn(3, 4, 5, 6) > 0), dtype=bool)
def verify_and(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_and(x, y)
node = helper.make_node(
"And",
inputs=["in1", "in2"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"and_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="and_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x, y], target, ctx, outdata.shape)
tvm.testing.assert_allclose(outdata, tvm_out)
@tvm.testing.uses_gpu
def test_and():
# 2d
x = np.random.randn(3, 4) > 0
y = np.random.randn(3, 4) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(3, 4, 5) > 0
verify_and(indata=[x, y], dtype=bool)
# 4d
x = np.random.randn(3, 4, 5, 6) > 0
y = np.random.randn(3, 4, 5, 6) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d vs 1d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(5) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d vs 2d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(4, 5) > 0
verify_and(indata=[x, y], dtype=bool)
def verify_tile_v1(indata, outdata, **kwargs):
node = helper.make_node("Tile", inputs=["in"], outputs=["out"], **kwargs)
graph = helper.make_graph(
[node],
"tile_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="tile_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [indata], target, ctx, outdata.shape, opset=1)
tvm.testing.assert_allclose(outdata, tvm_out)
def verify_tile_v6(indata, repeats, outdata):
node = helper.make_node("Tile", inputs=["input", "repeats"], outputs=["out"])
graph = helper.make_graph(
[node],
"tile_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("repeats", TensorProto.INT64, list(repeats.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="tile_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output_with_vm(model, [indata, repeats], target, ctx, opset=6)
tvm.testing.assert_allclose(outdata, tvm_out)
# TODO(mbrookhart): enable once VM supports heterogenous execution
# @tvm.testing.uses_gpu
def test_tile():
x = np.random.rand(2, 3, 4, 5).astype(np.float32)
repeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)
z = np.tile(x, repeats)
verify_tile_v1(x, z, repeats=repeats)
verify_tile_v6(x, repeats, z)
def verify_erf(indata, outdata):
node = helper.make_node("Erf", inputs=["in"], outputs=["out"])
graph = helper.make_graph(
[node],
"erf_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="erf_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [indata], target, ctx, outdata.shape)
tvm.testing.assert_allclose(outdata, tvm_out)
@tvm.testing.uses_gpu
def test_erf():
x = np.random.rand(2, 3, 4, 6).astype(np.float32)
z = scipy.special.erf(x)
verify_erf(x, z)
def verify_where(condition, x, y, dtype, outdata):
node = helper.make_node("Where", inputs=["condition", "x", "y"], outputs=["out"])
graph = helper.make_graph(
[node],
"where_test",
inputs=[
helper.make_tensor_value_info("condition", TensorProto.BOOL, list(condition.shape)),
helper.make_tensor_value_info("x", dtype, list(x.shape)),
helper.make_tensor_value_info("y", dtype, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", dtype, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="where_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [condition, x, y], target, ctx, outdata.shape)
tvm.testing.assert_allclose(outdata, tvm_out)
@tvm.testing.uses_gpu
def test_where():
condition = np.array([[1, 0], [1, 1]], dtype=np.bool)
x = np.array([[1, 2], [3, 4]], dtype=np.int64)
y = np.array([[9, 8], [7, 6]], dtype=np.int64)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.INT64, outdata)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[9, 8], [7, 6]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array(1, dtype=np.float32)
y = np.array([2], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array([2], dtype=np.float32)
y = np.array(1, dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
condition = np.array(1, dtype=np.bool)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[5, 6], [7, 8]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[1], [7]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
def verify_or(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_or(x, y)
node = helper.make_node(
"Or",
inputs=["in1", "in2"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"or_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="or_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x, y], target, ctx, outdata.shape)
tvm.testing.assert_allclose(outdata, tvm_out)
@tvm.testing.uses_gpu
def test_or():
# 2d
x = np.random.randn(3, 4) > 0
y = np.random.randn(3, 4) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(3, 4, 5) > 0
verify_or(indata=[x, y], dtype=bool)
# 4d
x = np.random.randn(3, 4, 5, 6) > 0
y = np.random.randn(3, 4, 5, 6) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d vs 1d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(5) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d vs 2d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(4, 5) > 0
verify_or(indata=[x, y], dtype=bool)
@tvm.testing.uses_gpu
def test_batch_norm():
def verify_batch_norm(in_shape):
batchnorm = onnx.helper.make_node(
"BatchNormalization", inputs=["x", "scale", "B", "mean", "var"], outputs=["Y"]
)
graph = helper.make_graph(
[batchnorm],
"batchnorm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("var", TensorProto.FLOAT, [in_shape[1]]),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="batchnorm_test")
# X, scale, b, mean, var
inshapes = [in_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]
verify_with_ort(model, inshapes, in_shape)
verify_batch_norm([1, 3, 224, 224])
verify_batch_norm([1, 3, 24, 24])
verify_batch_norm([16, 3, 24, 24])
verify_batch_norm([16, 16, 24, 24])
verify_batch_norm([16, 16, 10, 10])
# TODO(mbrookhart): enable once VM supports heterogenous execution
# @tvm.testing.uses_gpu
def test_batch_norm_dynamic_subgraph():
def verify_batch_norm_dynamic_subgraph(in_shape, o_shape):
batchnorm = onnx.helper.make_node(
"BatchNormalization", inputs=["x", "scale", "B", "mean", "var"], outputs=["Y"]
)
shape_node = helper.make_node("Shape", ["Y"], ["shape"])
reshape_node = helper.make_node("Reshape", ["in", "shape"], ["out"])
graph = helper.make_graph(
[batchnorm, shape_node, reshape_node],
"batchnorm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("in", TensorProto.FLOAT, list(o_shape)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("var", TensorProto.FLOAT, [in_shape[1]]),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="batchnorm_test")
# X, inp, scale, b, mean, var
inshapes = [in_shape, o_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]
verify_with_ort(model, inshapes, in_shape, use_vm=True)
verify_batch_norm_dynamic_subgraph([16, 16, 10, 10], [160, 160])
def verify_conv(
x_shape,
w_shape,
y_shape,
padding,
kernel_shape,
strides,
dilations,
auto_pad="NOTSET",
unset_pad=False,
):
if unset_pad:
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
)
elif padding is None:
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
auto_pad=auto_pad,
)
else:
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
pads=padding,
)
graph = helper.make_graph(
[node],
"conv_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))],
)
model = helper.make_model(graph, producer_name="conv_test")
verify_with_ort(model, [x_shape, w_shape], y_shape, use_vm=True, convert_to_static=True)
@tvm.testing.uses_gpu
def test_conv():
def repeat(N, D):
return tuple([N for _ in range(D)])
for D in [1, 2, 3]:
# Convolution with padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(1, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution with assymetric padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(4, D),
repeat(0, D) + repeat(1, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution without padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution with autopadding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with valid autopadding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="VALID",
)
# Convolution with unset padding
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
True,
)
# Convolution with non uniform stride
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
None,
repeat(3, D),
repeat(2, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with dilation
verify_conv(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(2, D),
repeat(3, D),
repeat(1, D),
repeat(2, D),
)
def verify_convtranspose_with_padding(
x_shape,
w_shape,
y_shape,
padding,
kernel_shape,
strides,
dilations,
auto_pad="NOTSET",
unset_pad=False,
):
if unset_pad:
node = helper.make_node(
"ConvTranspose",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
group=1,
)
elif padding is None:
node = helper.make_node(
"ConvTranspose",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
group=1,
auto_pad=auto_pad,
)
else:
node = helper.make_node(
"ConvTranspose",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
group=1,
pads=padding,
)
graph = helper.make_graph(
[node],
"convtranspose_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))],
)
model = helper.make_model(graph, producer_name="conv_test")
verify_with_ort(model, [x_shape, w_shape], y_shape, use_vm=True, convert_to_static=True)
def verify_convtranspose(x_shape, w_shape, y_shape, p):
node = onnx.helper.make_node(
"ConvTranspose",
inputs=["x", "W"],
outputs=["y"],
strides=[3, 2],
group=1,
kernel_shape=[3, 3],
pads=p,
)
graph = helper.make_graph(
[node],
"verify_convtranspose_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))],
)
model = helper.make_model(graph, producer_name="convtranspose_trest")
verify_with_ort(model, [x_shape, w_shape], y_shape)
@tvm.testing.uses_gpu
def test_convtranspose():
# Convolution Transpose with padding
# (1, 1, 3, 3) input tensor
# (1, 2, 3, 3) tensor for convolution weights
# (1, 2, 7, 3) output tensor
# [1, 2, 1, 2] list for pads
verify_convtranspose((1, 1, 3, 3), (1, 2, 3, 3), (1, 2, 7, 3), [1, 2, 1, 2])
def repeat(N, D):
return tuple([N for _ in range(D)])
# TODO(mbrookhart): onnxruntime in CI only supports 2D,
# find something else to test 1D and 3D against
for D in [2]:
# Convolution with padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(1, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution without padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(7, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
)
# Convolution with autopadding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with valid autopadding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(7, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="VALID",
)
# Convolution with unset padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(7, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
True,
)
# Convolution with non uniform stride
verify_convtranspose_with_padding(
(1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(9, D),
None,
repeat(3, D),
repeat(2, D),
repeat(1, D),
auto_pad="SAME_UPPER",
)
# Convolution with dilation
# TODO(mbrookhart): Relay doesn't currently support convtranspose with dilation
# verify_convtranspose_with_padding(
# (1, 1) + repeat(5, D),
# (1, 1) + repeat(3, D),
# (1, 1) + repeat(5, D),
# 2 * repeat(2, D),
# repeat(3, D),
# repeat(1, D),
# repeat(2, D),
# )
@tvm.testing.uses_gpu
def test_unsqueeze_constant():
from torch.nn import Linear, Sequential, Module
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
import tempfile
with tempfile.NamedTemporaryFile() as fp:
file_name = fp.name
input_size = (1, 16, 32, 32)
dummy_input = torch.randn(*input_size)
layer = Sequential(Flatten(), Linear(16 * 32 * 32, 64))
torch.onnx.export(layer, dummy_input, file_name, export_params=True)
onnx_model = onnx.load(file_name)
relay.frontend.from_onnx(onnx_model, {"0": input_size})
def verify_pooling(x_shape, kernel_shape, strides, pads, out_shape, mode, auto_pad="NOTSET"):
print(x_shape, kernel_shape, strides, mode, pads, auto_pad)
x_np = np.random.uniform(size=x_shape).astype("float32")
if mode == "max":
node_type = "MaxPool"
elif mode == "average":
node_type = "AveragePool"
else:
raise ValueError("Pool method {} is not supported.".format(mode))
pool_node = helper.make_node(
node_type, inputs=["x"], outputs=["y"], kernel_shape=kernel_shape, strides=strides
)
if pads is None:
pad_attr = helper.make_attribute("auto_pad", auto_pad)
else:
pad_attr = helper.make_attribute("pads", pads)
pool_node.attribute.append(pad_attr)
if mode == "max":
storage_attr = helper.make_attribute("storage_order", 0)
pool_node.attribute.append(storage_attr)
graph = helper.make_graph(
[pool_node],
"pooling_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="pooling_test")
verify_with_ort(model, [x_shape], out_shape, use_vm=True, convert_to_static=True)
@tvm.testing.uses_gpu
def test_pooling():
for mode in ["max", "average"]:
# Pool1D
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[1],
pads=[1, 1],
out_shape=[1, 1, 32],
mode=mode,
)
# Pool2D
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[1, 1],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 32, 32],
mode=mode,
)
# Pool1D with stride
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=[1, 1],
out_shape=[1, 1, 16],
mode=mode,
)
# Pool2D with stride
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 16, 16],
mode=mode,
)
# Pool1D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=None,
out_shape=[1, 1, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
# Pool2D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=None,
out_shape=[1, 1, 16, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
# Pool3D with stride
verify_pooling(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1],
out_shape=[1, 1, 16, 16, 16],
mode=mode,
)
# Pool3D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=None,
out_shape=[1, 1, 16, 16, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
def verify_mod(x_shape, y_shape, fmod, out_shape, dtype="float32"):
x_np = np.random.uniform(-100.0, 100.0, x_shape).astype(dtype)
y_np = np.random.uniform(-100.0, 100.0, y_shape).astype(dtype)
y_np = np.where(y_np == 0, 1, y_np) # remove 0's to avoid division by zero error
mod_node = helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=fmod)
onnx_dtype = TensorProto.FLOAT if dtype == "float32" else TensorProto.INT32
graph = helper.make_graph(
[mod_node],
"mod_test",
inputs=[
helper.make_tensor_value_info("x", onnx_dtype, list(x_shape)),
helper.make_tensor_value_info("y", onnx_dtype, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("z", onnx_dtype, list(out_shape))],
)
model = helper.make_model(graph, producer_name="mod_test")
verify_with_ort_with_inputs(model, [x_np, y_np], out_shape)
@tvm.testing.uses_gpu
def test_mod():
# Mod
verify_mod(
x_shape=[1, 32, 32], y_shape=[1, 1, 32], fmod=0, out_shape=(1, 32, 32), dtype="int32"
)
verify_mod(
x_shape=[1, 32, 32, 32],
y_shape=[1, 32, 32, 32],
fmod=0,
out_shape=(1, 32, 32, 32),
dtype="int32",
)
# fmod
verify_mod(
x_shape=[1, 32, 32], y_shape=[1, 32, 32], fmod=1, out_shape=(1, 32, 32), dtype="int32"
)
verify_mod(x_shape=[1, 1, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 1, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
verify_mod(
x_shape=[1, 32, 32, 32],
y_shape=[1, 32, 32, 32],
fmod=1,
out_shape=(1, 32, 32, 32),
dtype="int32",
)
verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
def verify_xor(x_shape, y_shape):
x_np = np.random.choice(a=[False, True], size=x_shape).astype("bool")
y_np = np.random.choice(a=[False, True], size=y_shape).astype("bool")
np_out = np.logical_xor(x_np, y_np)
out_shape = np_out.shape
xor_node = helper.make_node("Xor", inputs=["x", "y"], outputs=["z"])
onnx_dtype = TensorProto.BOOL
graph = helper.make_graph(
[xor_node],
"xor_test",
inputs=[
helper.make_tensor_value_info("x", onnx_dtype, list(x_shape)),
helper.make_tensor_value_info("y", onnx_dtype, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("z", onnx_dtype, list(out_shape))],
)
model = helper.make_model(graph, producer_name="xor_test")
for target, ctx in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, [x_np, y_np], target, ctx, out_shape)
tvm.testing.assert_allclose(np_out, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_xor():
# XOR
verify_xor(x_shape=[1, 32, 32], y_shape=[1, 32, 32])
# Xor broadcast
verify_xor(x_shape=[1, 32, 32], y_shape=[1, 1, 32])
def verify_max_roi_pool(x_shape, rois_shape, pooled_shape, spatial_scale, out_shape):
if spatial_scale is None:
pool_node = helper.make_node(
"MaxRoiPool", inputs=["x", "rois"], outputs=["y"], pooled_shape=pooled_shape
)
else:
pool_node = helper.make_node(
"MaxRoiPool",
inputs=["x", "rois"],
outputs=["y"],
pooled_shape=pooled_shape,
spatial_scale=spatial_scale,
)
graph = helper.make_graph(
[pool_node],
"pool_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("rois", TensorProto.FLOAT, list(rois_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="pool_test")
verify_with_ort(model, [x_shape, rois_shape], out_shape)
@tvm.testing.uses_gpu
def test_max_roi_pool():
verify_max_roi_pool(
x_shape=[1, 3, 6, 6],
rois_shape=[3, 5],
pooled_shape=[1, 1],
spatial_scale=None,
out_shape=[3, 3, 1, 1],
)
verify_max_roi_pool(
x_shape=[1, 3, 10, 10],
rois_shape=[4, 5],
pooled_shape=[2, 2],
spatial_scale=2.0,
out_shape=[4, 3, 2, 2],
)
def verify_lppool(x_shape, kernel_shape, p, strides, pads, out_shape, auto_pad="NOTSET"):
if pads is None:
pool_node = helper.make_node(
"LpPool",
inputs=["x"],
outputs=["y"],
kernel_shape=kernel_shape,
p=p,
auto_pad=auto_pad,
strides=strides,
)
else:
pool_node = helper.make_node(
"LpPool",
inputs=["x"],
outputs=["y"],
kernel_shape=kernel_shape,
p=p,
pads=pads,
strides=strides,
)
graph = helper.make_graph(
[pool_node],
"lppool_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="lppool_test")
verify_with_ort(model, [x_shape], out_shape, use_vm=True, convert_to_static=True)
@tvm.testing.uses_gpu
def test_lppool():
# Pool1D
verify_lppool(
x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[1], pads=[1, 1], out_shape=[1, 1, 32]
)
# Pool2D
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[1, 1],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 32, 32],
)
# Pool1D with stride
verify_lppool(
x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[2], pads=[1, 1], out_shape=[1, 1, 16]
)
# Pool2D with stride
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[2, 2],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 16, 16],
)
# Pool1D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32],
kernel_shape=[3],
p=2,
strides=[2],
pads=None,
out_shape=[1, 1, 16],
auto_pad="SAME_UPPER",
)
# Pool2D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[2, 2],
pads=None,
out_shape=[1, 1, 16, 16],
auto_pad="SAME_UPPER",
)
# Pool3D with stride
verify_lppool(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
p=2,
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1],
out_shape=[1, 1, 16, 16, 16],
)
# Pool3D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
p=2,
strides=[2, 2, 2],
pads=None,
out_shape=[1, 1, 16, 16, 16],
auto_pad="SAME_UPPER",
)
def verify_rnn(
seq_length,
batch_size,
input_size,
hidden_size,
rnn_type="LSTM",
use_bias=False,
activations=None,
alphas=None,
betas=None,
use_initial_state=False,
use_peep=False,
linear_before_reset=False,
):
if rnn_type == "LSTM":
multiplier = 4
elif rnn_type == "GRU":
multiplier = 3
else:
raise NotImplementedError("%s RNNs not yet supported." % rnn_type)
x_np = np.random.uniform(size=(seq_length, batch_size, input_size)).astype("float32")
w_np = np.random.uniform(size=(1, multiplier * hidden_size, input_size)).astype("float32")
r_np = np.random.uniform(size=(1, multiplier * hidden_size, hidden_size)).astype("float32")
input_names = ["X", "W", "R"]
input_tensors = [
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_np.shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_np.shape)),
helper.make_tensor_value_info("R", TensorProto.FLOAT, list(r_np.shape)),
]
input_values = [x_np, w_np, r_np]
if use_bias:
b_np = np.random.uniform(size=(1, multiplier * 2 * hidden_size)).astype("float32")
input_names.append("B")
input_tensors.append(
helper.make_tensor_value_info("B", TensorProto.FLOAT, [1, multiplier * 2 * hidden_size])
)
input_values.append(b_np)
if use_initial_state:
assert use_bias == True, "Initial states must have bias specified."
sequence_np = np.repeat(seq_length, batch_size).astype("int32")
input_names.append("sequence_lens")
input_tensors.append(
helper.make_tensor_value_info("sequence_lens", TensorProto.INT32, [batch_size])
)
input_values.append(sequence_np)
initial_h_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype("float32")
input_names.append("initial_h")
input_tensors.append(
helper.make_tensor_value_info(
"initial_h", TensorProto.FLOAT, [1, batch_size, hidden_size]
)
)
input_values.append(initial_h_np)
if rnn_type == "LSTM":
initial_c_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype("float32")
input_names.append("initial_c")
input_tensors.append(
helper.make_tensor_value_info(
"initial_c", TensorProto.FLOAT, [1, batch_size, hidden_size]
)
)
input_values.append(initial_c_np)
if use_peep and rnn_type == "LSTM":
assert use_initial_state == True, "Peepholes require initial state to be specified."
p_np = np.random.uniform(size=(1, 3 * hidden_size)).astype("float32")
input_names.append("P")
input_tensors.append(
helper.make_tensor_value_info("P", TensorProto.FLOAT, [1, 3 * hidden_size])
)
input_values.append(p_np)
Y_shape = [seq_length, 1, batch_size, hidden_size]
Y_h_shape = [1, batch_size, hidden_size]
outputs = ["Y", "Y_h"]
graph_outputs = [
helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(Y_shape)),
helper.make_tensor_value_info("Y_h", TensorProto.FLOAT, list(Y_h_shape)),
]
output_shapes = [Y_shape, Y_h_shape]
if rnn_type == "LSTM":
Y_c_shape = [1, batch_size, hidden_size]
outputs.append("Y_c")
graph_outputs.append(
helper.make_tensor_value_info("Y_c", TensorProto.FLOAT, list(Y_c_shape))
)
output_shapes.append(Y_c_shape)
rnn_node = helper.make_node(
rnn_type, inputs=input_names, outputs=outputs, hidden_size=hidden_size
)
if activations is not None:
activations_attr = helper.make_attribute("activations", activations)
rnn_node.attribute.append(activations_attr)
if alphas is not None:
alphas_attr = helper.make_attribute("activation_alpha", alphas)
rnn_node.attribute.append(alphas_attr)
if betas is not None:
betas_attr = helper.make_attribute("activation_beta", betas)
rnn_node.attribute.append(betas_attr)
if linear_before_reset and rnn_type == "GRU":
lbr_attr = helper.make_attribute("linear_before_reset", 1)
rnn_node.attribute.append(lbr_attr)
graph = helper.make_graph([rnn_node], "rnn_test", inputs=input_tensors, outputs=graph_outputs)
model = helper.make_model(graph, producer_name="rnn_test")
for target, ctx in tvm.testing.enabled_targets():
onnx_out = get_onnxruntime_output(model, input_values, "float32")
tvm_out = get_tvm_output(
model,
input_values,
target,
ctx,
output_shapes,
output_dtype=["float32"] * len(output_shapes),
)
for o_out, t_out in zip(onnx_out, tvm_out):
tvm.testing.assert_allclose(o_out, t_out, rtol=5e-3, atol=5e-3)
@tvm.testing.uses_gpu
def test_lstm():
# No bias.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=32, use_bias=False, rnn_type="LSTM"
)
# large batch.
verify_rnn(
seq_length=4, batch_size=8, input_size=16, hidden_size=32, use_bias=True, rnn_type="LSTM"
)
# Non power of two.
verify_rnn(
seq_length=3, batch_size=3, input_size=16, hidden_size=40, use_bias=True, rnn_type="LSTM"
)
# Long sequence.
verify_rnn(
seq_length=8, batch_size=1, input_size=16, hidden_size=32, use_bias=True, rnn_type="LSTM"
)
# Large hidden.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=128, use_bias=True, rnn_type="LSTM"
)
# Large input.
verify_rnn(
seq_length=2, batch_size=1, input_size=64, hidden_size=32, use_bias=True, rnn_type="LSTM"
)
# Different activation testing.
# Default value hardsigmoid.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "Tanh", "Tanh"],
rnn_type="LSTM",
)
# Multiple parameterized activations.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "LeakyRelu", "Tanh"],
alphas=[2.0, 0.5],
betas=[0.3],
rnn_type="LSTM",
)
# All parameterized with new Affine activation.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "LeakyRelu", "Affine"],
alphas=[2.0, 0.5, 0.8],
betas=[0.3, 0.1],
rnn_type="LSTM",
)
# Testing with initial state and peepholes
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
rnn_type="LSTM",
)
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
use_peep=True,
rnn_type="LSTM",
)
@tvm.testing.uses_gpu
def test_gru():
# No bias.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=32, use_bias=False, rnn_type="GRU"
)
# large batch.
verify_rnn(
seq_length=4,
batch_size=8,
input_size=16,
hidden_size=32,
use_bias=True,
rnn_type="GRU",
linear_before_reset=True,
)
# Non power of two.
verify_rnn(
seq_length=3, batch_size=3, input_size=16, hidden_size=40, use_bias=True, rnn_type="GRU"
)
# Long sequence.
verify_rnn(
seq_length=8, batch_size=1, input_size=16, hidden_size=32, use_bias=True, rnn_type="GRU"
)
# Large hidden.
verify_rnn(
seq_length=2, batch_size=1, input_size=16, hidden_size=128, use_bias=True, rnn_type="GRU"
)
# Large input.
verify_rnn(
seq_length=2, batch_size=1, input_size=64, hidden_size=32, use_bias=True, rnn_type="GRU"
)
# Different activation testing.
# Default value hardsigmoid.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "Softsign"],
rnn_type="GRU",
)
# Multiple parameterized activations.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "LeakyRelu"],
alphas=[2.0, 0.5],
betas=[0.3],
rnn_type="GRU",
)
# All parameterized with new Affine activation.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=["HardSigmoid", "Affine"],
alphas=[2.0, 0.8],
betas=[0.3, 0.1],
rnn_type="GRU",
)
# Testing with initial state
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
rnn_type="GRU",
)
# TODO(mbrookhart): enable once VM supports heterogenous execution
# @tvm.testing.uses_gpu
def test_resize():
def verify(ishape, oshape, scales, mode, coord_trans):
nodes = [
make_constant_node("roi", onnx.TensorProto.FLOAT, (0,), []),
make_constant_node("scales", onnx.TensorProto.FLOAT, (len(scales),), scales),
]
input_names = ["X", "roi", "scales"]
if oshape != []:
nodes.append(
make_constant_node("sizes", onnx.TensorProto.INT64, (len(oshape),), oshape)
)
input_names.append("sizes")
nodes.append(
helper.make_node(
"Resize",
inputs=input_names,
outputs=["Y"],
mode=mode,
coordinate_transformation_mode=coord_trans,
)
)
if oshape == []:
oshape = [round(dim * scale) for (dim, scale) in zip(ishape, scales)]
graph = helper.make_graph(
nodes,
"resize_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, ishape)],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, oshape)],
)
model = helper.make_model(graph, producer_name="resize_test")
verify_with_ort(model, [ishape], oshape, use_vm=True, opset=11, freeze_params=True)
# upsampling
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "nearest", "asymmetric")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "linear", "align_corners")
verify([1, 16, 32, 32], [1, 16, 64, 64], [], "linear", "half_pixel")
# downsampling
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "nearest", "asymmetric")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "linear", "align_corners")
verify([1, 16, 32, 32], [1, 16, 16, 16], [], "linear", "half_pixel")
# scales are specified instead of sizes
verify([1, 16, 32, 32], [], [1, 1, 2, 2], "nearest", "asymmetric")
verify([1, 16, 32, 32], [], [1, 1, 0.5, 0.5], "linear", "half_pixel")
@tvm.testing.uses_gpu
def test_nonzero():
def verify_nonzero(indata, outdata, dtype):
node = helper.make_node(
"NonZero",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"nonzero_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.INT64, list(indata.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.INT64, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="nonzero_test")
verify_with_ort_with_inputs(
model, [indata], targets=["llvm"], dtype="int64", use_vm=True, opset=9
)
input_data = np.array([[1, 0], [1, 1]], dtype=np.int64)
result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 1], [0, 0, 1]]
verify_nonzero(input_data, result, dtype=np.int64)
input_data = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.int64)
result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 2, 2], [0, 1, 0, 1]]
verify_nonzero(input_data, result, dtype=np.int64)
@tvm.testing.uses_gpu
def test_topk():
def verify_topk(input_dims, K, axis=-1):
output_dims = list(input_dims)
output_dims[axis] = K
node = helper.make_node(
"TopK", inputs=["X", "K"], outputs=["Values", "Indicies"], axis=axis
)
graph = helper.make_graph(
[node],
"topk_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(input_dims)),
helper.make_tensor_value_info(
"K",
TensorProto.INT64,
[
1,
],
),
],
outputs=[
helper.make_tensor_value_info("Values", TensorProto.FLOAT, output_dims),
helper.make_tensor_value_info("Indicies", TensorProto.INT64, output_dims),
],
)
model = helper.make_model(graph, producer_name="topk_test")
indata = np.random.uniform(-10, 10, input_dims).astype(np.float32)
onnx_out = get_onnxruntime_output(model, [indata, np.array([K])])
for target, ctx in [("llvm", tvm.cpu())]:
tvm_out = get_tvm_output_with_vm(model, [indata, np.array(K)], target, ctx)
tvm.testing.assert_allclose(onnx_out, tvm_out, rtol=1e-05, atol=1e-05)
for n in [12, 32]:
for shape in [[n], [n, n], [n, n, n]]:
for k in [1, 5, 10]:
verify_topk(shape, k)
verify_topk([n, n, n], 5, 0)
verify_topk([n, n, n], 5, 1)
verify_topk([n, n, n], 5, 2)
@tvm.testing.uses_gpu
def test_roi_align():
def verify_roi_align(
input_dims, num_roi, output_height, output_width, sampling_ratio=0, spatial_scale=1.0
):
output_dims = [num_roi, input_dims[1], output_height, output_width]
node = helper.make_node(
"RoiAlign",
inputs=["X", "rois", "batch_indicies"],
outputs=["Y"],
mode="avg",
output_height=output_height,
output_width=output_width,
sampling_ratio=sampling_ratio,
spatial_scale=spatial_scale,
)
graph = helper.make_graph(
[node],
"roialign_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(input_dims)),
helper.make_tensor_value_info("rois", TensorProto.FLOAT, [num_roi, 4]),
helper.make_tensor_value_info(
"batch_indicies",
TensorProto.INT64,
[
num_roi,
],
),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, output_dims)],
)
model = helper.make_model(graph, producer_name="roialign_test")
np_data = np.random.uniform(size=input_dims).astype("float32")
np_rois = np.random.uniform(size=[num_roi, 4]).astype("float32") * input_dims[2]
np_batch_indicies = np.random.randint(low=0, high=input_dims[0], size=num_roi)
verify_with_ort_with_inputs(model, [np_data, np_rois, np_batch_indicies], output_dims)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((4, 4, 16, 32), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 8, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 8, 8), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 16, 5, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 12), 8, 7, 3, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=0.5)
verify_roi_align((3, 4, 12, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.5)
verify_roi_align((5, 4, 16, 14), 32, 7, 7, sampling_ratio=1, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=2, spatial_scale=1.0)
def verify_cond_loop():
y_in = helper.make_tensor_value_info("y_in", TensorProto.FLOAT, [1])
y_out = helper.make_tensor_value_info("y_out", TensorProto.FLOAT, [1])
scan_out = helper.make_tensor_value_info("scan_out", TensorProto.FLOAT, [1])
cond_in = helper.make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = helper.make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = helper.make_tensor_value_info("iter_count", TensorProto.INT64, [])
y = np.array([-2]).astype(np.float32)
five_const_node = helper.make_node(
"Constant",
inputs=[],
outputs=["five"],
value=helper.make_tensor(
name="const_tensor_five", data_type=TensorProto.FLOAT, dims=(), vals=[5]
),
)
iter_cast_node = helper.make_node(
"Cast", inputs=["iter_count"], outputs=["iter_cast"], to=onnx.TensorProto.FLOAT
)
y_add_node = helper.make_node("Add", inputs=["y_in", "iter_cast"], outputs=["y_out"])
less_node = helper.make_node("Less", inputs=["y_out", "five"], outputs=["cond_less"])
squeeze_node = helper.make_node("Squeeze", inputs=["cond_less"], outputs=["cond_squeeze"])
cond_cast_node = helper.make_node(
"Cast", inputs=["cond_squeeze"], outputs=["cond_out"], to=onnx.TensorProto.BOOL
)
scan_identity_node = helper.make_node("Identity", inputs=["y_out"], outputs=["scan_out"])
loop_body = helper.make_graph(
[
five_const_node,
iter_cast_node,
y_add_node,
less_node,
squeeze_node,
cond_cast_node,
scan_identity_node,
],
"loop_body",
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out],
)
loop_node = helper.make_node(
"Loop", inputs=["trip_count", "cond", "y"], outputs=["res_y", "res_scan"], body=loop_body
)
trip_count = np.array(5).astype(np.int64)
res_y = np.array([13]).astype(np.float32)
cond = np.array(1).astype(np.bool)
loop_graph = onnx.helper.make_graph(
[loop_node],
"loop_outer",
inputs=[
onnx.helper.make_tensor_value_info("trip_count", onnx.TensorProto.INT64, []),
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [1]),
],
outputs=[
onnx.helper.make_tensor_value_info("res_y", onnx.TensorProto.FLOAT, [1]),
onnx.helper.make_tensor_value_info("res_scan", onnx.TensorProto.FLOAT, [5, 1]),
],
)
loop_model = onnx.helper.make_model(loop_graph)
# Set a high trip count so that condition trips first.
trip_count = np.array(40).astype(np.int64)
cond = np.array(1).astype(np.bool)
input_vals = [trip_count, cond, y]
onnx_out = get_onnxruntime_output(loop_model, input_vals)
for target, ctx in [("llvm", tvm.cpu())]:
tvm_out = get_tvm_output_with_vm(loop_model, input_vals, target, ctx, freeze_params=True)
for i in range(len(tvm_out)):
tvm.testing.assert_allclose(onnx_out[i], tvm_out[i], rtol=1e-05, atol=1e-05)
def verify_count_loop():
y_in = helper.make_tensor_value_info("y_in", TensorProto.FLOAT, [1])
y_out = helper.make_tensor_value_info("y_out", TensorProto.FLOAT, [1])
scan_out = helper.make_tensor_value_info("scan_out", TensorProto.FLOAT, [1])
cond_in = helper.make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = helper.make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = helper.make_tensor_value_info("iter_count", TensorProto.INT64, [])
y = np.array([-2]).astype(np.float32)
iter_cast_node = helper.make_node(
"Cast", inputs=["iter_count"], outputs=["iter_cast"], to=onnx.TensorProto.FLOAT
)
y_add_node = helper.make_node("Add", inputs=["y_in", "iter_cast"], outputs=["y_out"])
identity_node = helper.make_node("Identity", inputs=["cond_in"], outputs=["cond_out"])
scan_identity_node = helper.make_node("Identity", inputs=["y_out"], outputs=["scan_out"])
loop_body = helper.make_graph(
[identity_node, iter_cast_node, y_add_node, scan_identity_node],
"loop_body",
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out],
)
loop_node = helper.make_node(
"Loop", inputs=["trip_count", "cond", "y"], outputs=["res_y", "res_scan"], body=loop_body
)
trip_count = np.array(5).astype(np.int64)
res_y = np.array([13]).astype(np.float32)
cond = np.array(1).astype(np.bool)
loop_graph = onnx.helper.make_graph(
[loop_node],
"loop_outer",
inputs=[
onnx.helper.make_tensor_value_info("trip_count", onnx.TensorProto.INT64, []),
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [1]),
],
outputs=[
onnx.helper.make_tensor_value_info("res_y", onnx.TensorProto.FLOAT, [1]),
onnx.helper.make_tensor_value_info("res_scan", onnx.TensorProto.FLOAT, [5, 1]),
],
)
loop_model = onnx.helper.make_model(loop_graph)
trip_count = np.array(5).astype(np.int64)
cond = np.array(1).astype(np.bool)
input_vals = [trip_count, cond, y]
onnx_out = get_onnxruntime_output(loop_model, input_vals)
for target, ctx in [("llvm", tvm.cpu())]:
tvm_out = get_tvm_output_with_vm(loop_model, input_vals, target, ctx, freeze_params=True)
for i in range(len(tvm_out)):
tvm.testing.assert_allclose(onnx_out[i], tvm_out[i], rtol=1e-05, atol=1e-05)
def test_loop():
# Test a loop that exits once a condition is met.
verify_cond_loop()
# Test a loop that exits after a fixed number of iterations.
verify_count_loop()
if __name__ == "__main__":
test_flatten()
test_reshape()
test_shape()
test_expand()
test_power()
test_squeeze()
test_unsqueeze()
test_slice()
test_floor()
test_ceil()
test_round()
test_isinf()
test_isnan()
test_clip()
test_clip_min_max_as_inputs()
test_onehot()
test_matmul()
test_gather()
test_gatherelements()
test_gather_nd()
test_scatter()
test_lrn()
test_instance_norm()
test_upsample()
test_forward_min()
test_forward_max()
test_forward_mean()
test_forward_hardsigmoid()
test_forward_arg_min_max()
test_softmax()
test_constantofshape()
test_all_reduce_funcs()
test_pad()
test_split()
test_binary_ops()
test_single_ops()
test_leaky_relu()
test_elu()
test_selu()
test_prelu()
test_ThresholdedRelu()
test_ScaledTanh()
test_ParametricSoftplus()
test_Scale()
test_LogSoftmax()
test_resnet()
test_inception()
test_densenet()
test_sign()
test_not()
test_and()
test_tile()
test_erf()
test_where()
test_or()
test_depth_to_space()
test_space_to_depth()
test_batch_norm()
test_batch_norm_dynamic_subgraph()
test_conv()
test_convtranspose()
test_unsqueeze_constant()
test_pooling()
test_lppool()
test_lstm()
test_gru()
test_resize()
test_nonzero()
test_topk()
test_mod()
test_xor()
test_max_roi_pool()
test_roi_align()
test_range()
test_loop()
| {
"content_hash": "0aba827c9d156e077a36ff364d99bcf8",
"timestamp": "",
"source": "github",
"line_count": 3873,
"max_line_length": 100,
"avg_line_length": 34.104053705138135,
"alnum_prop": 0.571253359579059,
"repo_name": "tqchen/tvm",
"id": "81b5186d0e261bd82677597ea9c19a67f6782749",
"size": "132870",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/python/frontend/onnx/test_forward.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4104"
},
{
"name": "C",
"bytes": "205781"
},
{
"name": "C++",
"bytes": "8124041"
},
{
"name": "CMake",
"bytes": "135007"
},
{
"name": "Cuda",
"bytes": "6677"
},
{
"name": "Go",
"bytes": "111558"
},
{
"name": "HTML",
"bytes": "2664"
},
{
"name": "Java",
"bytes": "200193"
},
{
"name": "JavaScript",
"bytes": "15075"
},
{
"name": "Makefile",
"bytes": "48206"
},
{
"name": "Objective-C",
"bytes": "18506"
},
{
"name": "Objective-C++",
"bytes": "56786"
},
{
"name": "Python",
"bytes": "10300435"
},
{
"name": "RenderScript",
"bytes": "1895"
},
{
"name": "Rust",
"bytes": "327078"
},
{
"name": "Shell",
"bytes": "157176"
},
{
"name": "TypeScript",
"bytes": "94435"
}
],
"symlink_target": ""
} |
from django.db import models
from localflavor.au.models import (AUBusinessNumberField, AUCompanyNumberField, AUPhoneNumberField, AUPostCodeField,
AUStateField, AUTaxFileNumberField)
class AustralianPlace(models.Model):
state = AUStateField(blank=True)
state_required = AUStateField()
state_default = AUStateField(default="NSW", blank=True)
postcode = AUPostCodeField(blank=True)
postcode_required = AUPostCodeField()
postcode_default = AUPostCodeField(default="2500", blank=True)
phone = AUPhoneNumberField(blank=True)
name = models.CharField(max_length=20)
abn = AUBusinessNumberField()
acn = AUCompanyNumberField()
tfn = AUTaxFileNumberField()
| {
"content_hash": "345fa1d234f2af0bfc0cc4c9bb14f917",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 116,
"avg_line_length": 40.44444444444444,
"alnum_prop": 0.7266483516483516,
"repo_name": "jieter/django-localflavor",
"id": "a8323f9ca035ff28f6cb3df45dd9e7490e4770f6",
"size": "728",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_au/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "896597"
}
],
"symlink_target": ""
} |
import datetime
import itertools
import ddt
import glanceclient.exc
import mock
from oslo_config import cfg
from cinder import context
from cinder import exception
from cinder.image import glance
from cinder import test
from cinder.tests.unit.glance import stubs as glance_stubs
CONF = cfg.CONF
class NullWriter(object):
"""Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
class TestGlanceSerializer(test.TestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'visibility': 'public',
'protected': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': [
{'device': 'bbb'},
{'device': 'yyy'}],
'block_device_mapping': [
{'device_name': '/dev/fake'},
{'device_name': '/dev/fake0'}]}}
converted_expected = {
'name': 'image1',
'visibility': 'public',
'protected': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings':
'[{"device": "bbb"}, '
'{"device": "yyy"}]',
'block_device_mapping':
'[{"device_name": "/dev/fake"}, '
'{"device_name": "/dev/fake0"}]'}}
converted = glance._convert_to_string(metadata)
self.assertEqual(converted_expected, converted)
self.assertEqual(metadata, glance._convert_from_string(converted))
@ddt.ddt
class TestGlanceImageService(test.TestCase):
"""Tests the Glance image service.
At a high level, the translations involved are:
1. Glance -> ImageService - This is needed so we can support
multiple ImageServices (Glance, Local, etc)
2. ImageService -> API - This is needed so we can support multiple
APIs (OpenStack, EC2)
"""
NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
class tzinfo(datetime.tzinfo):
@staticmethod
def utcoffset(*args, **kwargs):
return datetime.timedelta()
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
def setUp(self):
super(TestGlanceImageService, self).setUp()
client = glance_stubs.StubGlanceClient()
service_catalog = [{u'type': u'image', u'name': u'glance',
u'endpoints': [{
u'publicURL': u'http://example.com:9292'}]}]
self.service = self._create_image_service(client)
self.context = context.RequestContext('fake', 'fake', auth_token=True)
self.context.service_catalog = service_catalog
self.mock_object(glance.time, 'sleep', return_value=None)
def _create_image_service(self, client):
def _fake_create_glance_client(context, netloc, use_ssl, version):
return client
self.mock_object(glance, '_create_glance_client',
_fake_create_glance_client)
client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292)
return glance.GlanceImageService(client=client_wrapper)
@staticmethod
def _make_fixture(**kwargs):
fixture = {'name': None,
'properties': {},
'status': None,
'visibility': None,
'protected': None}
fixture.update(kwargs)
return fixture
def _make_datetime_fixture(self):
return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT,
updated_at=self.NOW_GLANCE_FORMAT,
deleted_at=self.NOW_GLANCE_FORMAT)
def test_get_api_servers(self):
result = glance.get_api_servers(self.context)
expected = (u'example.com:9292', False)
self.assertEqual(expected, next(result))
def test_get_api_servers_not_mounted_at_root_and_ssl(self):
service_catalog = [{u'type': u'image', u'name': u'glance',
u'endpoints': [{
u'publicURL': u'https://example.com/image'}]}]
self.context = context.RequestContext('fake', 'fake', auth_token=True)
self.context.service_catalog = service_catalog
result = glance.get_api_servers(self.context)
expected = (u'example.com/image', True)
self.assertEqual(expected, next(result))
def test_create_with_instance_id(self):
"""Ensure instance_id is persisted as an image-property."""
fixture = {'name': 'test image',
'is_public': False,
'protected': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'protected': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {'instance_id': '42', 'user_id': 'fake'},
'owner': None,
}
self.assertDictEqual(expected, image_meta)
image_metas = self.service.detail(self.context)
self.assertDictEqual(expected, image_metas[0])
def test_create_without_instance_id(self):
"""Test Creating images without instance_id.
Ensure we can create an image without having to specify an
instance_id. Public images are an example of an image not tied to an
instance.
"""
fixture = {'name': 'test image', 'is_public': False,
'protected': False}
image_id = self.service.create(self.context, fixture)['id']
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'protected': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
actual = self.service.show(self.context, image_id)
self.assertDictEqual(expected, actual)
def test_create(self):
fixture = self._make_fixture(name='test image')
num_images = len(self.service.detail(self.context))
image_id = self.service.create(self.context, fixture)['id']
self.assertIsNotNone(image_id)
self.assertEqual(num_images + 1,
len(self.service.detail(self.context)))
def test_create_and_show_non_existing_image(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
self.assertIsNotNone(image_id)
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
'bad image id')
def test_detail_private_image(self):
fixture = self._make_fixture(name='test image')
fixture['visibility'] = 'private'
fixture['protected'] = False
properties = {'owner_id': 'proj1'}
fixture['properties'] = properties
self.service.create(self.context, fixture)
proj = self.context.project_id
self.context.project_id = 'proj1'
image_metas = self.service.detail(self.context)
self.context.project_id = proj
self.assertEqual(1, len(image_metas))
self.assertEqual('test image', image_metas[0]['name'])
self.assertFalse(image_metas[0]['is_public'])
def test_detail_v1(self):
"""Confirm we send is_public = None as default when using Glance v1."""
self.override_config('glance_api_version', 1)
with mock.patch.object(self.service, '_client') as client_mock:
client_mock.return_value = []
result = self.service.detail(self.context)
self.assertListEqual([], result)
client_mock.call.assert_called_once_with(self.context, 'list',
filters={'is_public': 'none'})
def test_detail_v2(self):
"""Check we don't send is_public key by default with Glance v2."""
self.override_config('glance_api_version', 2)
with mock.patch.object(self.service, '_client') as client_mock:
client_mock.return_value = []
result = self.service.detail(self.context)
self.assertListEqual([], result)
client_mock.call.assert_called_once_with(self.context, 'list')
def test_detail_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[1])
self.assertEqual(8, len(image_metas))
i = 2
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'is_public': None,
'protected': None,
'name': 'TestImage %d' % (i),
'properties': {},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'owner': None,
}
self.assertDictEqual(expected, meta)
i = i + 1
def test_detail_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, limit=5)
self.assertEqual(5, len(image_metas))
def test_detail_default_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context)
for i, meta in enumerate(image_metas):
self.assertEqual(meta['name'], 'TestImage %d' % (i))
def test_detail_marker_and_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[3], limit=5)
self.assertEqual(5, len(image_metas))
i = 4
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'is_public': None,
'protected': None,
'name': 'TestImage %d' % (i),
'properties': {},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'owner': None,
}
self.assertDictEqual(expected, meta)
i = i + 1
def test_detail_invalid_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
self.assertRaises(exception.Invalid, self.service.detail,
self.context, marker='invalidmarker')
def test_update(self):
fixture = self._make_fixture(name='test image')
image = self.service.create(self.context, fixture)
image_id = image['id']
fixture['name'] = 'new image name'
self.service.update(self.context, image_id, fixture)
new_image_data = self.service.show(self.context, image_id)
self.assertEqual('new image name', new_image_data['name'])
def test_update_v2(self):
self.flags(glance_api_version=2)
self.test_update()
def test_update_with_data(self):
fixture = self._make_fixture(name='test image')
image = self.service.create(self.context, fixture)
image_id = image['id']
fixture['name'] = 'new image name'
data = '*' * 256
self.service.update(self.context, image_id, fixture, data=data)
new_image_data = self.service.show(self.context, image_id)
self.assertEqual(256, new_image_data['size'])
self.assertEqual('new image name', new_image_data['name'])
def test_update_with_data_v2(self):
self.flags(glance_api_version=2)
self.test_update_with_data()
@mock.patch.object(glance.GlanceImageService, '_translate_from_glance')
@mock.patch.object(glance.GlanceImageService, 'show')
@ddt.data(1, 2)
def test_update_purge_props(self, ver, show, translate_from_glance):
self.flags(glance_api_version=ver)
image_id = mock.sentinel.image_id
client = mock.Mock(call=mock.Mock())
service = glance.GlanceImageService(client=client)
image_meta = {'properties': {'k1': 'v1'}}
client.call.return_value = {'k1': 'v1'}
if ver == 2:
show.return_value = {'properties': {'k2': 'v2'}}
translate_from_glance.return_value = image_meta.copy()
ret = service.update(self.context, image_id, image_meta)
self.assertDictEqual(image_meta, ret)
if ver == 2:
client.call.assert_called_once_with(
self.context, 'update', image_id, k1='v1', remove_props=['k2'])
else:
client.call.assert_called_once_with(
self.context, 'update', image_id, properties={'k1': 'v1'},
purge_props=True)
translate_from_glance.assert_called_once_with(self.context,
{'k1': 'v1'})
def test_delete(self):
fixture1 = self._make_fixture(name='test image 1')
fixture2 = self._make_fixture(name='test image 2')
fixtures = [fixture1, fixture2]
num_images = len(self.service.detail(self.context))
self.assertEqual(0, num_images)
ids = []
for fixture in fixtures:
new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
num_images = len(self.service.detail(self.context))
self.assertEqual(2, num_images)
self.service.delete(self.context, ids[0])
num_images = len(self.service.detail(self.context))
self.assertEqual(1, num_images)
def test_show_passes_through_to_client(self):
fixture = self._make_fixture(name='image1', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'image1',
'is_public': True,
'protected': None,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
self.assertEqual(expected, image_meta)
def test_show_raises_when_no_authtoken_in_the_context(self):
fixture = self._make_fixture(name='image1',
is_public=False,
protected=False,
properties={'one': 'two'})
image_id = self.service.create(self.context, fixture)['id']
self.context.auth_token = False
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
image_id)
def test_detail_passes_through_to_client(self):
fixture = self._make_fixture(name='image10', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_metas = self.service.detail(self.context)
expected = [
{
'id': image_id,
'name': 'image10',
'is_public': True,
'protected': None,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
},
]
self.assertEqual(expected, image_metas)
def test_show_makes_datetimes(self):
fixture = self._make_datetime_fixture()
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
self.assertEqual(self.NOW_DATETIME, image_meta['created_at'])
self.assertEqual(self.NOW_DATETIME, image_meta['updated_at'])
def test_detail_makes_datetimes(self):
fixture = self._make_datetime_fixture()
self.service.create(self.context, fixture)
image_meta = self.service.detail(self.context)[0]
self.assertEqual(self.NOW_DATETIME, image_meta['created_at'])
self.assertEqual(self.NOW_DATETIME, image_meta['updated_at'])
def test_download_with_retries(self):
tries = [0]
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
if tries[0] == 0:
tries[0] = 1
raise glanceclient.exc.ServiceUnavailable('')
else:
return {}
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
# When retries are disabled, we should get an exception
self.flags(glance_num_retries=0)
self.assertRaises(exception.GlanceConnectionFailed,
service.download,
self.context,
image_id,
writer)
# Now lets enable retries. No exception should happen now.
tries = [0]
self.flags(glance_num_retries=1)
service.download(self.context, image_id, writer)
def test_client_forbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a Forbidden exception."""
def get(self, image_id):
raise glanceclient.exc.Forbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, writer)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPForbidden exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPForbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, writer)
def test_client_notfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a NotFound exception."""
def get(self, image_id):
raise glanceclient.exc.NotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, writer)
def test_client_httpnotfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPNotFound exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPNotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, writer)
@mock.patch('six.moves.builtins.open')
@mock.patch('shutil.copyfileobj')
@mock.patch('cinder.image.glance.get_api_servers',
return_value=itertools.cycle([(False, 'localhost:9292')]))
def test_download_from_direct_file(self, api_servers,
mock_copyfileobj, mock_open):
fixture = self._make_fixture(name='test image',
locations=[{'url': 'file:///tmp/test'}])
image_id = self.service.create(self.context, fixture)['id']
writer = NullWriter()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(glance_api_version=2)
self.service.download(self.context, image_id, writer)
mock_copyfileobj.assert_called_once_with(mock.ANY, writer)
@mock.patch('six.moves.builtins.open')
@mock.patch('shutil.copyfileobj')
@mock.patch('cinder.image.glance.get_api_servers',
return_value=itertools.cycle([(False, 'localhost:9292')]))
def test_download_from_direct_file_non_file(self, api_servers,
mock_copyfileobj, mock_open):
fixture = self._make_fixture(name='test image',
direct_url='swift+http://test/image')
image_id = self.service.create(self.context, fixture)['id']
writer = NullWriter()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(glance_api_version=2)
self.service.download(self.context, image_id, writer)
self.assertIsNone(mock_copyfileobj.call_args)
def test_glance_client_image_id(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
(_service, same_id) = glance.get_remote_image_service(self.context,
image_id)
self.assertEqual(same_id, image_id)
def test_glance_client_image_ref(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
image_url = 'http://something-less-likely/%s' % image_id
(service, same_id) = glance.get_remote_image_service(self.context,
image_url)
self.assertEqual(same_id, image_id)
self.assertEqual('something-less-likely', service._client.netloc)
for ipv6_url in ('[::1]', '::1', '[::1]:444'):
image_url = 'http://%s/%s' % (ipv6_url, image_id)
(service, same_id) = glance.get_remote_image_service(self.context,
image_url)
self.assertEqual(same_id, image_id)
self.assertEqual(ipv6_url, service._client.netloc)
def test_extracting_missing_attributes(self):
"""Verify behavior from glance objects that are missing attributes
This fakes the image class and is missing the checksum and name
attribute as the client would return if they're not set in the
database. Regression test for bug #1308058.
"""
class MyFakeGlanceImage(glance_stubs.FakeImage):
def __init__(self, metadata):
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'id', 'created_at',
'updated_at', 'deleted', 'status',
'min_disk', 'min_ram', 'is_public',
'visibility', 'protected']
raw = dict.fromkeys(IMAGE_ATTRIBUTES)
raw.update(metadata)
self.__dict__['raw'] = raw
metadata = {
'id': 1,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
}
image = MyFakeGlanceImage(metadata)
actual = glance._extract_attributes(image)
expected = {
'id': 1,
'name': None,
'is_public': None,
'protected': None,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
self.assertEqual(expected, actual)
@mock.patch('cinder.image.glance.CONF')
def test_v2_passes_visibility_param(self, config):
config.glance_api_version = 2
config.glance_num_retries = 0
metadata = {
'id': 1,
'size': 2,
'visibility': 'public',
}
image = glance_stubs.FakeImage(metadata)
client = glance_stubs.StubGlanceClient()
service = self._create_image_service(client)
service._image_schema = glance_stubs.FakeSchema()
actual = service._translate_from_glance('fake_context', image)
expected = {
'id': 1,
'name': None,
'visibility': 'public',
'protected': None,
'size': 2,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
'created_at': None,
'updated_at': None
}
self.assertEqual(expected, actual)
@mock.patch('cinder.image.glance.CONF')
def test_extracting_v2_boot_properties(self, config):
config.glance_api_version = 2
config.glance_num_retries = 0
metadata = {
'id': 1,
'size': 2,
'min_disk': 2,
'min_ram': 2,
'kernel_id': 'foo',
'ramdisk_id': 'bar',
}
image = glance_stubs.FakeImage(metadata)
client = glance_stubs.StubGlanceClient()
service = self._create_image_service(client)
service._image_schema = glance_stubs.FakeSchema()
actual = service._translate_from_glance('fake_context', image)
expected = {
'id': 1,
'name': None,
'visibility': None,
'protected': None,
'size': 2,
'min_disk': 2,
'min_ram': 2,
'disk_format': None,
'container_format': None,
'checksum': None,
'deleted': None,
'status': None,
'properties': {'kernel_id': 'foo',
'ramdisk_id': 'bar'},
'owner': None,
'created_at': None,
'updated_at': None
}
self.assertEqual(expected, actual)
def test_translate_to_glance(self):
self.flags(glance_api_version=1)
client = glance_stubs.StubGlanceClient()
service = self._create_image_service(client)
metadata = {
'id': 1,
'size': 2,
'min_disk': 2,
'min_ram': 2,
'properties': {'kernel_id': 'foo',
'ramdisk_id': 'bar',
'x_billinginfo': '123'},
}
actual = service._translate_to_glance(metadata)
expected = metadata
self.assertEqual(expected, actual)
def test_translate_to_glance_v2(self):
self.flags(glance_api_version=2)
client = glance_stubs.StubGlanceClient()
service = self._create_image_service(client)
metadata = {
'id': 1,
'size': 2,
'min_disk': 2,
'min_ram': 2,
'properties': {'kernel_id': 'foo',
'ramdisk_id': 'bar',
'x_billinginfo': '123'},
}
actual = service._translate_to_glance(metadata)
expected = {
'id': 1,
'size': 2,
'min_disk': 2,
'min_ram': 2,
'kernel_id': 'foo',
'ramdisk_id': 'bar',
'x_billinginfo': '123',
}
self.assertEqual(expected, actual)
class TestGlanceClientVersion(test.TestCase):
"""Tests the version of the glance client generated."""
@mock.patch('cinder.image.glance.glanceclient.Client')
def test_glance_version_by_flag(self, _mockglanceclient):
"""Test glance version set by flag is honoured."""
glance.GlanceClientWrapper('fake', 'fake_host', 9292)
self.assertEqual('1', _mockglanceclient.call_args[0][0])
self.flags(glance_api_version=2)
glance.GlanceClientWrapper('fake', 'fake_host', 9292)
self.assertEqual('2', _mockglanceclient.call_args[0][0])
CONF.reset()
@mock.patch('cinder.image.glance.glanceclient.Client')
def test_glance_version_by_arg(self, _mockglanceclient):
"""Test glance version set by arg to GlanceClientWrapper"""
glance.GlanceClientWrapper('fake', 'fake_host', 9292, version=1)
self.assertEqual('1', _mockglanceclient.call_args[0][0])
glance.GlanceClientWrapper('fake', 'fake_host', 9292, version=2)
self.assertEqual('2', _mockglanceclient.call_args[0][0])
@mock.patch('cinder.image.glance.glanceclient.Client')
@mock.patch('cinder.image.glance.get_api_servers',
return_value=itertools.cycle([(False, 'localhost:9292')]))
def test_call_glance_version_by_arg(self, api_servers, _mockglanceclient):
"""Test glance version set by arg to GlanceClientWrapper"""
glance_wrapper = glance.GlanceClientWrapper()
glance_wrapper.call('fake_context', 'method', version=2)
self.assertEqual('2', _mockglanceclient.call_args[0][0])
@mock.patch('cinder.image.glance.glanceclient.Client')
@mock.patch('cinder.image.glance.get_api_servers',
return_value=itertools.cycle([(False, 'localhost:9292')]))
def test_call_glance_over_quota(self, api_servers, _mockglanceclient):
"""Test glance version set by arg to GlanceClientWrapper"""
glance_wrapper = glance.GlanceClientWrapper()
fake_client = mock.Mock()
fake_client.images.method = mock.Mock(
side_effect=glanceclient.exc.HTTPOverLimit)
self.mock_object(glance_wrapper, 'client', fake_client)
self.assertRaises(exception.ImageLimitExceeded,
glance_wrapper.call, 'fake_context', 'method',
version=2)
def _create_failing_glance_client(info):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
info['num_calls'] += 1
if info['num_calls'] == 1:
raise glanceclient.exc.ServiceUnavailable('')
return {}
return MyGlanceStubClient()
class TestGlanceImageServiceClient(test.TestCase):
def setUp(self):
super(TestGlanceImageServiceClient, self).setUp()
self.context = context.RequestContext('fake', 'fake', auth_token=True)
self.mock_object(glance.time, 'sleep', return_value=None)
def test_create_glance_client(self):
self.flags(auth_strategy='keystone')
self.flags(glance_request_timeout=60)
class MyGlanceStubClient(object):
def __init__(inst, version, *args, **kwargs):
self.assertEqual('1', version)
self.assertEqual("http://fake_host:9292", args[0])
self.assertTrue(kwargs['token'])
self.assertEqual(60, kwargs['timeout'])
self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient)
client = glance._create_glance_client(self.context, 'fake_host:9292',
False)
self.assertIsInstance(client, MyGlanceStubClient)
def test_create_glance_client_auth_strategy_is_not_keystone(self):
self.flags(auth_strategy='noauth')
self.flags(glance_request_timeout=60)
class MyGlanceStubClient(object):
def __init__(inst, version, *args, **kwargs):
self.assertEqual('1', version)
self.assertEqual('http://fake_host:9292', args[0])
self.assertNotIn('token', kwargs)
self.assertEqual(60, kwargs['timeout'])
self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient)
client = glance._create_glance_client(self.context, 'fake_host:9292',
False)
self.assertIsInstance(client, MyGlanceStubClient)
def test_create_glance_client_glance_request_default_timeout(self):
self.flags(auth_strategy='keystone')
self.flags(glance_request_timeout=None)
class MyGlanceStubClient(object):
def __init__(inst, version, *args, **kwargs):
self.assertEqual("1", version)
self.assertEqual("http://fake_host:9292", args[0])
self.assertTrue(kwargs['token'])
self.assertNotIn('timeout', kwargs)
self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient)
client = glance._create_glance_client(self.context, 'fake_host:9292',
False)
self.assertIsInstance(client, MyGlanceStubClient)
| {
"content_hash": "240b63d392750b0e47246233de8e5b76",
"timestamp": "",
"source": "github",
"line_count": 941,
"max_line_length": 79,
"avg_line_length": 38.48990435706695,
"alnum_prop": 0.5501256246721334,
"repo_name": "ge0rgi/cinder",
"id": "3370b2603dfede2e07e466d0f65bae267fd969d4",
"size": "36856",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/ocata",
"path": "cinder/tests/unit/image/test_glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19963591"
}
],
"symlink_target": ""
} |
"""Tests for treadmill.rest.api.*"""
| {
"content_hash": "e16b62ffc4a3aab46aa8ec4928eaf141",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 36,
"avg_line_length": 37,
"alnum_prop": 0.6486486486486487,
"repo_name": "ThoughtWorksInc/treadmill",
"id": "7b7ac17bcf8e558056f646ca6c01efd0c9d98e86",
"size": "37",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/rest/api/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "2244673"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "56861"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class spilloverpolicy_lbvserver_binding(base_resource) :
""" Binding class showing the lbvserver that can be bound to spilloverpolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def boundto(self) :
"""The name of the entity to which the policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""The name of the entity to which the policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def name(self) :
"""Name of the spillover policy.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the spillover policy.
"""
try :
self._name = name
except Exception as e:
raise e
@property
def priority(self) :
"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
"""Indicates whether policy is bound or not.
"""
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(spilloverpolicy_lbvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.spilloverpolicy_lbvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch spilloverpolicy_lbvserver_binding resources.
"""
try :
obj = spilloverpolicy_lbvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of spilloverpolicy_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = spilloverpolicy_lbvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count spilloverpolicy_lbvserver_binding resources configued on NetScaler.
"""
try :
obj = spilloverpolicy_lbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of spilloverpolicy_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = spilloverpolicy_lbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class spilloverpolicy_lbvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.spilloverpolicy_lbvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.spilloverpolicy_lbvserver_binding = [spilloverpolicy_lbvserver_binding() for _ in range(length)]
| {
"content_hash": "cc9e4ffcae58f415b5c62756a79db671",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 135,
"avg_line_length": 26.634146341463413,
"alnum_prop": 0.6961538461538461,
"repo_name": "mahabs/nitro",
"id": "168a4aa99b3ede305d99d77100a539e0c79d2aa2",
"size": "6074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/spillover/spilloverpolicy_lbvserver_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
} |
from models import Record
from db import session
from flask_restful import reqparse
from flask_restful import abort
from flask_restful import Resource
from flask_restful import fields
from flask_restful import marshal_with
record_fields = {
'id': fields.Integer,
'data': fields.String,
'uri': fields.Url('records', absolute=True),
}
parser = reqparse.RequestParser()
parser.add_argument('data', type=str)
class RecordResource(Resource):
@marshal_with(record_fields)
def get(self, id):
record = session.query(Record).filter(Record.id == id).first()
if not record:
abort(404, message="Record {} doesn't exist".format(id))
return record
def delete(self, id):
record = session.query(Record).filter(Record.id == id).first()
if not record:
abort(404, message="Record {} doesn't exist".format(id))
session.delete(record)
session.commit()
return {}, 204
@marshal_with(record_fields)
def put(self, id):
parsed_args = parser.parse_args()
record = session.query(Record).filter(Record.id == id).first()
record.data = parsed_args['data']
session.add(record)
session.commit()
return record, 201
class RecordListResource(Resource):
@marshal_with(record_fields)
def get(self):
records = session.query(Record).all()
return records
@marshal_with(record_fields)
def post(self):
parsed_args = parser.parse_args()
record = Record(data = parsed_args['data'])
print(record)
session.add(record)
session.commit()
return record, 201
| {
"content_hash": "5df5bef44352c34f614930690b626910",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 70,
"avg_line_length": 28.689655172413794,
"alnum_prop": 0.6382211538461539,
"repo_name": "RunnerPro/ProApi",
"id": "f913810457aec9ce3ea56f43614646a7755566ac",
"size": "1665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "167726"
},
{
"name": "HTML",
"bytes": "4371"
},
{
"name": "JavaScript",
"bytes": "48122"
},
{
"name": "Makefile",
"bytes": "1504"
},
{
"name": "Python",
"bytes": "5297"
}
],
"symlink_target": ""
} |
import paramiko
from mistral.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _read_paramimko_stream(recv_func):
result = ''
buf = recv_func(1024)
while buf != '':
result += buf
buf = recv_func(1024)
return result
def _connect(host, username, password):
LOG.debug('Creating SSH connection to %s' % host)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=username, password=password)
return ssh
def _cleanup(ssh):
ssh.close()
def execute_command(cmd, host, username, password,
get_stderr=False, raise_when_error=True):
ssh = _connect(host, username, password)
LOG.debug("Executing command %s")
try:
chan = ssh.get_transport().open_session()
chan.exec_command(cmd)
# TODO (nmakhotkin): that could hang if stderr buffer overflows
stdout = _read_paramimko_stream(chan.recv)
stderr = _read_paramimko_stream(chan.recv_stderr)
ret_code = chan.recv_exit_status()
if ret_code and raise_when_error:
raise RuntimeError("Cmd: %s\nReturn code: %s\nstdout: %s"
% (cmd, ret_code, stdout))
if get_stderr:
return ret_code, stdout, stderr
else:
return ret_code, stdout
finally:
_cleanup(ssh)
| {
"content_hash": "19e2f695f241ccfd97fd9a0098cb31a1",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 71,
"avg_line_length": 25.818181818181817,
"alnum_prop": 0.6119718309859155,
"repo_name": "dmitryilyin/mistral",
"id": "a774ec6ac8df6e26d79b057edfd5f6043314a4f1",
"size": "2054",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mistral/utils/ssh_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from collections import Iterable
import numpy as np
Menpo3dErrorMessage = ("In order to keep menpo's dependencies simple, menpo "
"does not contain 3D importing and visualization code. "
"Please install menpo3d to view 3D meshes.")
class Renderer(object):
r"""
Abstract class for rendering visualizations. Framework specific
implementations of these classes are made in order to separate
implementation cleanly from the rest of the code.
It is assumed that the renderers follow some form of stateful pattern for
rendering to Figures. Therefore, the major interface for rendering involves
providing a `figure_id` or a `bool` about whether a new figure should be
used. If neither are provided then the default state of the rendering engine
is assumed to be maintained.
Providing both a ``figure_id`` and ``new_figure == True`` is not a valid
state.
Parameters
----------
figure_id : `object`
A figure id. Could be any valid object that identifies a figure in a
given framework (`str`, `int`, `float`, etc.).
new_figure : `bool`
Whether the rendering engine should create a new figure.
Raises
------
ValueError
It is not valid to provide a figure id AND request a new figure to
be rendered on.
"""
def __init__(self, figure_id, new_figure):
if figure_id is not None and new_figure:
raise ValueError("Conflicting arguments. figure_id cannot be "
"specified if the new_figure flag is True")
self.figure_id = figure_id
self.new_figure = new_figure
self.figure = self.get_figure()
def render(self, **kwargs):
r"""
Abstract method to be overridden by the renderer. This will implement
the actual rendering code for a given object class.
Parameters
----------
kwargs : `dict`
Passed through to specific rendering engine.
Returns
-------
viewer : :map:`Renderer`
Pointer to `self`.
"""
pass
def get_figure(self):
r"""
Abstract method for getting the correct figure to render on. Should
also set the correct `figure_id` for the figure.
Returns
-------
figure : `object`
The figure object that the renderer will render on.
"""
pass
def save_figure(self, **kwargs):
r"""
Abstract method for saving the figure of the current `figure_id` to
file. It will implement the actual saving code for a given object class.
Parameters
----------
kwargs : `dict`
Options to be set when saving the figure to file.
"""
pass
class viewwrapper(object):
r"""
This class abuses the Python descriptor protocol in order to dynamically
change the view method at runtime. Although this is more obviously achieved
through inheritance, the view methods practically amount to syntactic sugar
and so we want to maintain a single view method per class. We do not want
to add the mental overhead of implementing different 2D and 3D PointCloud
classes for example, since, outside of viewing, their implementations would
be identical.
Also note that we could have separated out viewing entirely and made the
check there, but the view method is an important paradigm in menpo that
we want to maintain.
Therefore, this function cleverly (and obscurely) returns the correct
view method for the dimensionality of the given object.
"""
def __init__(self, wrapped_func):
fname = wrapped_func.__name__
self._2d_fname = '_{}_2d'.format(fname)
self._3d_fname = '_{}_3d'.format(fname)
def __get__(self, instance, instancetype):
if instance.n_dims == 2:
return getattr(instance, self._2d_fname)
elif instance.n_dims == 3:
return getattr(instance, self._3d_fname)
else:
def raise_not_supported(self):
r"""
Viewing of objects with greater than 3 dimensions is not
currently possible.
"""
raise ValueError('Viewing of objects with greater than 3 '
'dimensions is not currently possible.')
return raise_not_supported
class Viewable(object):
r"""
Abstract interface for objects that can visualize themselves. This assumes
that the class has dimensionality as the view method checks the ``n_dims``
property to wire up the correct view method.
"""
@viewwrapper
def view(self):
r"""
Abstract method for viewing. See the :map:`viewwrapper` documentation
for an explanation of how the `view` method works.
"""
pass
def _view_2d(self, **kwargs):
raise NotImplementedError('2D Viewing is not supported.')
def _view_3d(self, **kwargs):
raise NotImplementedError('3D Viewing is not supported.')
class LandmarkableViewable(object):
r"""
Mixin for :map:`Landmarkable` and :map:`Viewable` objects. Provides a
single helper method for viewing Landmarks and `self` on the same figure.
"""
@viewwrapper
def view_landmarks(self, **kwargs):
pass
def _view_landmarks_2d(self, **kwargs):
raise NotImplementedError('2D Landmark Viewing is not supported.')
def _view_landmarks_3d(self, **kwargs):
raise NotImplementedError('3D Landmark Viewing is not supported.')
from menpo.visualize.viewmatplotlib import (
MatplotlibImageViewer2d, MatplotlibImageSubplotsViewer2d,
MatplotlibLandmarkViewer2d, MatplotlibAlignmentViewer2d,
MatplotlibGraphPlotter, MatplotlibMultiImageViewer2d,
MatplotlibMultiImageSubplotsViewer2d, MatplotlibPointGraphViewer2d)
# Default importer types
PointGraphViewer2d = MatplotlibPointGraphViewer2d
LandmarkViewer2d = MatplotlibLandmarkViewer2d
ImageViewer2d = MatplotlibImageViewer2d
ImageSubplotsViewer2d = MatplotlibImageSubplotsViewer2d
AlignmentViewer2d = MatplotlibAlignmentViewer2d
GraphPlotter = MatplotlibGraphPlotter
MultiImageViewer2d = MatplotlibMultiImageViewer2d
MultiImageSubplotsViewer2d = MatplotlibMultiImageSubplotsViewer2d
class ImageViewer(object):
r"""
Base :map:`Image` viewer that abstracts away dimensionality. It can
visualize multiple channels of an image in subplots.
Parameters
----------
figure_id : `object`
A figure id. Could be any valid object that identifies a figure in a
given framework (`str`, `int`, `float`, etc.).
new_figure : `bool`
Whether the rendering engine should create a new figure.
dimensions : {``2``, ``3``} `int`
The number of dimensions in the image.
pixels : ``(N, D)`` `ndarray`
The pixels to render.
channels: `int` or `list` or ``'all'`` or `None`
A specific selection of channels to render. The user can choose either
a single or multiple channels. If ``'all'``, render all channels in
subplot mode. If `None` and image is not greyscale or RGB, render all
channels in subplots. If `None` and image is greyscale or RGB, then do
not plot channels in different subplots.
mask: ``(N, D)`` `ndarray`
A `bool` mask to be applied to the image. All points outside the
mask are set to ``0``.
"""
def __init__(self, figure_id, new_figure, dimensions, pixels,
channels=None, mask=None):
pixels = pixels.copy()
self.figure_id = figure_id
self.new_figure = new_figure
self.dimensions = dimensions
pixels, self.use_subplots = \
self._parse_channels(channels, pixels)
self.pixels = self._masked_pixels(pixels, mask)
self._flip_image_channels()
def _flip_image_channels(self):
if self.pixels.ndim == 3:
from menpo.image.base import channels_to_back
self.pixels = channels_to_back(self.pixels)
def _parse_channels(self, channels, pixels):
r"""
Parse `channels` parameter. If `channels` is `int` or `list`, keep it as
is. If `channels` is ``'all'``, return a `list` of all the image's
channels. If `channels` is `None`, return the minimum between an
`upper_limit` and the image's number of channels. If image is greyscale
or RGB and `channels` is `None`, then do not plot channels in different
subplots.
Parameters
----------
channels : `int` or `list` or ``'all'`` or `None`
A specific selection of channels to render.
pixels : ``(N, D)`` `ndarray`
The image's pixels to render.
Returns
-------
pixels : ``(N, D)`` `ndarray`
The pixels to be visualized.
use_subplots : `bool`
Whether to visualize using subplots.
"""
# Flag to trigger ImageSubplotsViewer2d or ImageViewer2d
use_subplots = True
n_channels = pixels.shape[0]
if channels is None:
if n_channels == 1:
pixels = pixels[0, ...]
use_subplots = False
elif n_channels == 3:
use_subplots = False
elif channels != 'all':
if isinstance(channels, Iterable):
if len(channels) == 1:
pixels = pixels[channels[0], ...]
use_subplots = False
else:
pixels = pixels[channels, ...]
else:
pixels = pixels[channels, ...]
use_subplots = False
return pixels, use_subplots
def _masked_pixels(self, pixels, mask):
r"""
Return the masked pixels using a given `bool` mask. In order to make
sure that the non-masked pixels are visualized in white, their value
is set to the maximum of pixels.
Parameters
----------
pixels : ``(N, D)`` `ndarray`
The image's pixels to render.
mask: ``(N, D)`` `ndarray`
A `bool` mask to be applied to the image. All points outside the
mask are set to the image max. If mask is `None`, then the initial
pixels are returned.
Returns
-------
masked_pixels : ``(N, D)`` `ndarray`
The masked pixels.
"""
if mask is not None:
nanmax = np.nanmax(pixels)
pixels[..., ~mask] = nanmax + (0.01 * nanmax)
return pixels
def render(self, **kwargs):
r"""
Select the correct type of image viewer for the given image
dimensionality.
Parameters
----------
kwargs : `dict`
Passed through to image viewer.
Returns
-------
viewer : :map:`Renderer`
The rendering object.
Raises
------
ValueError
Only 2D images are supported.
"""
if self.dimensions == 2:
if self.use_subplots:
return ImageSubplotsViewer2d(self.figure_id, self.new_figure,
self.pixels).render(**kwargs)
else:
return ImageViewer2d(self.figure_id, self.new_figure,
self.pixels).render(**kwargs)
else:
raise ValueError("Only 2D images are currently supported")
def view_image_landmarks(image, channels, masked, group,
with_labels, without_labels, figure_id, new_figure,
interpolation, cmap_name, alpha, render_lines,
line_colour, line_style, line_width,
render_markers, marker_style, marker_size,
marker_face_colour, marker_edge_colour,
marker_edge_width, render_numbering,
numbers_horizontal_align, numbers_vertical_align,
numbers_font_name, numbers_font_size,
numbers_font_style, numbers_font_weight,
numbers_font_colour, render_legend, legend_title,
legend_font_name, legend_font_style, legend_font_size,
legend_font_weight, legend_marker_scale,
legend_location, legend_bbox_to_anchor,
legend_border_axes_pad, legend_n_columns,
legend_horizontal_spacing, legend_vertical_spacing,
legend_border, legend_border_padding, legend_shadow,
legend_rounded_corners, render_axes, axes_font_name,
axes_font_size, axes_font_style, axes_font_weight,
axes_x_limits, axes_y_limits, figure_size):
r"""
This is a helper method that abstracts away the fact that viewing
images and masked images is identical apart from the mask. Therefore,
we do the class check in this method and then proceed identically whether
the image is masked or not.
See the documentation for _view_2d on Image or _view_2d on MaskedImage
for information about the parameters.
"""
import matplotlib.pyplot as plt
if not image.has_landmarks:
raise ValueError('Image does not have landmarks attached, unable '
'to view landmarks.')
# Render self
from menpo.image import MaskedImage
if isinstance(image, MaskedImage):
self_view = image.view(figure_id=figure_id, new_figure=new_figure,
channels=channels, masked=masked,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha)
else:
self_view = image.view(figure_id=figure_id, new_figure=new_figure,
channels=channels,
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha)
# Make sure axes are constrained to the image size
if axes_x_limits is None:
axes_x_limits = [0, image.width - 1]
if axes_y_limits is None:
axes_y_limits = [0, image.height - 1]
# Render landmarks
landmark_view = None # initialize viewer object
# useful in order to visualize the legend only for the last axis object
render_legend_tmp = False
for i, ax in enumerate(self_view.axes_list):
# set current axis
plt.sca(ax)
# show legend only for the last axis object
if i == len(self_view.axes_list) - 1:
render_legend_tmp = render_legend
# viewer
landmark_view = image.landmarks[group].view(
with_labels=with_labels, without_labels=without_labels,
figure_id=self_view.figure_id, new_figure=False,
image_view=True, render_lines=render_lines,
line_colour=line_colour, line_style=line_style,
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour,
render_legend=render_legend_tmp, legend_title=legend_title,
legend_font_name=legend_font_name,
legend_font_style=legend_font_style,
legend_font_size=legend_font_size,
legend_font_weight=legend_font_weight,
legend_marker_scale=legend_marker_scale,
legend_location=legend_location,
legend_bbox_to_anchor=legend_bbox_to_anchor,
legend_border_axes_pad=legend_border_axes_pad,
legend_n_columns=legend_n_columns,
legend_horizontal_spacing=legend_horizontal_spacing,
legend_vertical_spacing=legend_vertical_spacing,
legend_border=legend_border,
legend_border_padding=legend_border_padding,
legend_shadow=legend_shadow,
legend_rounded_corners=legend_rounded_corners,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, figure_size=figure_size)
return landmark_view
class MultipleImageViewer(ImageViewer):
def __init__(self, figure_id, new_figure, dimensions, pixels_list,
channels=None, mask=None):
super(MultipleImageViewer, self).__init__(
figure_id, new_figure, dimensions, pixels_list[0],
channels=channels, mask=mask)
pixels_list = [self._parse_channels(channels, p)[0]
for p in pixels_list]
self.pixels_list = [self._masked_pixels(p, mask)
for p in pixels_list]
def render(self, **kwargs):
if self.dimensions == 2:
if self.use_subplots:
MultiImageSubplotsViewer2d(self.figure_id, self.new_figure,
self.pixels_list).render(**kwargs)
else:
return MultiImageViewer2d(self.figure_id, self.new_figure,
self.pixels_list).render(**kwargs)
else:
raise ValueError("Only 2D images are currently supported")
| {
"content_hash": "6c5e68584660f15a75fb2c45cb1da628",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 80,
"avg_line_length": 39.008620689655174,
"alnum_prop": 0.5992265193370165,
"repo_name": "mozata/menpo",
"id": "2073eae1388dd19b9aacfe304a5c1d080337aa3d",
"size": "18100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "menpo/visualize/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "155"
},
{
"name": "C",
"bytes": "70100"
},
{
"name": "C++",
"bytes": "44577"
},
{
"name": "Makefile",
"bytes": "263"
},
{
"name": "Python",
"bytes": "1728478"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
} |
from _mysql import OperationalError
import sys
import os
from DataBaseSchemaSync.SchemaComparison import SchemaComparison
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DataBaseSchemaSync.db.MysqlDataBaseConnector import MysqlDataBaseConnector
import json
from DataBaseSchemaSync.util.configer import Configer
import sys
class DataBaseSchemaSyncer(object):
def __init__(self, conf_path=None):
try:
configure = Configer(conf_path)
except IOError:
sys.stderr.write('file not exist')
sys.exit(1)
self.db = json.loads(configure.get_config())
try:
self.source = MysqlDataBaseConnector(self.db['source'])
except OperationalError:
sys.stderr.write('database connect error')
sys.exit(1)
try:
self.target = MysqlDataBaseConnector(self.db['target'])
except OperationalError:
sys.stderr.write('database connect error')
sys.exit(1)
self.source.fetch()
self.target.fetch()
self.compare()
def compare(self):
mcmp = SchemaComparison(self.source, self.target)
diffs = mcmp.compare(self.source.database, self.target.database)
mcmp.sync(diffs)
if __name__ == "__main__":
if len(sys.argv) <= 1:
DataBaseSchemaSyncer()
else:
DataBaseSchemaSyncer(sys.argv[1])
| {
"content_hash": "c67c3d958de612d5dc5762bda10e5944",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 28.098039215686274,
"alnum_prop": 0.6454989532449407,
"repo_name": "lyrl/database_schema_sync",
"id": "6354e0e4861c5bcecef90096c65739bcedfa8ac5",
"size": "1475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DataBaseSchemaSync/DataBaseSchemaSyncer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13201"
}
],
"symlink_target": ""
} |
_method_cache = {}
class methodcaller(object):
"""
Return a callable object that calls the given method on its operand.
Unlike the builtin `operator.methodcaller`, instances of this class are
serializable
"""
__slots__ = ('method',)
func = property(lambda self: self.method) # For `funcname` to work
def __new__(cls, method):
if method in _method_cache:
return _method_cache[method]
self = object.__new__(cls)
self.method = method
_method_cache[method] = self
return self
def __call__(self, obj, *args, **kwargs):
return getattr(obj, self.method)(*args, **kwargs)
def __reduce__(self):
return (methodcaller, (self.method,))
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.method)
__repr__ = __str__
class MethodCache(object):
"""Attribute access on this object returns a methodcaller for that
attribute.
Examples
--------
>>> a = [1, 3, 3]
>>> M.count(a, 3) == a.count(3)
True
"""
__getattr__ = staticmethod(methodcaller)
__dir__ = lambda self: list(_method_cache)
M = MethodCache()
| {
"content_hash": "f682b18247d998c0bc6ceb902cbde9ff",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 75,
"avg_line_length": 24.163265306122447,
"alnum_prop": 0.5734797297297297,
"repo_name": "mrocklin/streams",
"id": "4e1538da7c4506a9bf7fed145c07d2eb9fdde2bc",
"size": "1184",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "streamz/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "43372"
}
],
"symlink_target": ""
} |
"""
This code was generated by Codezu.
Changes to this file may cause incorrect behavior and will be lost if
the code is regenerated.
"""
from mozurestsdk.mozuclient import default as default_client
from mozurestsdk.mozuurl import MozuUrl;
from mozurestsdk.urllocation import UrlLocation
from mozurestsdk.apicontext import ApiContext;
class AddressValidationRequest(object):
def __init__(self, apiContext: ApiContext = None, mozuClient = None):
self.client = mozuClient or default_client();
if (apiContext is not None):
self.client.withApiContext(apiContext);
else:
self.client.withApiContext(ApiContext());
def validateAddress(self,addressValidationRequest, responseFields = None):
""" Validates the customer address supplied in the request.
Args:
| addressValidationRequest(addressValidationRequest) - Properties of the address used for validation of the account's full address. This content may include multiple lines of an address, city, state/province, zip/postal code, and country.
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| AddressValidationResponse
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/customer/addressvalidation/?responseFields={responseFields}", "POST", UrlLocation.TenantPod, False);
url.formatUrl("responseFields", responseFields);
self.client.withResourceUrl(url).withBody(addressValidationRequest).execute();
return self.client.result();
| {
"content_hash": "d9c7b826680b66581267f6a6594adbbc",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 241,
"avg_line_length": 34.72727272727273,
"alnum_prop": 0.7604712041884817,
"repo_name": "sanjaymandadi/mozu-python-sdk",
"id": "8646c39d631fd35f4aa1fe5a4e5e8e49e728d154",
"size": "1529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mozurestsdk/commerce/customer/addressvalidationrequest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "649189"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('speakers', '0003_remove_speaker_sessions_preference'),
]
operations = [
migrations.AddField(
model_name='speaker',
name='mobile_number',
field=models.CharField(help_text='Your mobile number, that we can use to contact you at PyCon if your talk has been accepted and put on the schedule but we cannot find you when it is time.', max_length=40, blank=True),
),
]
| {
"content_hash": "49d7d80e3da69c2b6e70cf29fda46013",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 230,
"avg_line_length": 32.27777777777778,
"alnum_prop": 0.657487091222031,
"repo_name": "PyCon/pycon",
"id": "61f85d594b52a6bdc966a1df154f001dc91da320",
"size": "605",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "symposion/speakers/migrations/0004_speaker_mobile_number.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "80909"
},
{
"name": "Dockerfile",
"bytes": "163"
},
{
"name": "HTML",
"bytes": "313093"
},
{
"name": "JavaScript",
"bytes": "161207"
},
{
"name": "Makefile",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "993540"
},
{
"name": "Shell",
"bytes": "14094"
},
{
"name": "Smarty",
"bytes": "7379"
}
],
"symlink_target": ""
} |
import zipfile
def unzip(filename, dir):
zf = zipfile.ZipFile(filename)
zf.extractall(dir)
def unzip1(filename, dir):
zf = zipfile.ZipFile(filename)
zf.extract(dir)
| {
"content_hash": "02213e2ce91f81cfd085d52a61bac627",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 34,
"avg_line_length": 18.9,
"alnum_prop": 0.671957671957672,
"repo_name": "github/codeql",
"id": "b6ae48e1d88611b1e5cc2b56314263989592e6b5",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/ql/src/experimental/Security/CWE-022/zipslip_good.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP.NET",
"bytes": "3739"
},
{
"name": "Batchfile",
"bytes": "3534"
},
{
"name": "C",
"bytes": "410440"
},
{
"name": "C#",
"bytes": "21146000"
},
{
"name": "C++",
"bytes": "1352639"
},
{
"name": "CMake",
"bytes": "1809"
},
{
"name": "CodeQL",
"bytes": "32583145"
},
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "EJS",
"bytes": "1478"
},
{
"name": "Emacs Lisp",
"bytes": "3445"
},
{
"name": "Go",
"bytes": "697562"
},
{
"name": "HTML",
"bytes": "58008"
},
{
"name": "Handlebars",
"bytes": "1000"
},
{
"name": "Java",
"bytes": "5417683"
},
{
"name": "JavaScript",
"bytes": "2432320"
},
{
"name": "Kotlin",
"bytes": "12163740"
},
{
"name": "Lua",
"bytes": "13113"
},
{
"name": "Makefile",
"bytes": "8631"
},
{
"name": "Mustache",
"bytes": "17025"
},
{
"name": "Nunjucks",
"bytes": "923"
},
{
"name": "Perl",
"bytes": "1941"
},
{
"name": "PowerShell",
"bytes": "1295"
},
{
"name": "Python",
"bytes": "1649035"
},
{
"name": "RAML",
"bytes": "2825"
},
{
"name": "Ruby",
"bytes": "299268"
},
{
"name": "Rust",
"bytes": "234024"
},
{
"name": "Shell",
"bytes": "23973"
},
{
"name": "Smalltalk",
"bytes": "23"
},
{
"name": "Starlark",
"bytes": "27062"
},
{
"name": "Swift",
"bytes": "204309"
},
{
"name": "Thrift",
"bytes": "3020"
},
{
"name": "TypeScript",
"bytes": "219623"
},
{
"name": "Vim Script",
"bytes": "1949"
},
{
"name": "Vue",
"bytes": "2881"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "ohlc"
_path_str = "ohlc.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.ohlc.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.ohlc.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.ohlc.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "654141f7bcddef715a8468edc06f15c2",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 82,
"avg_line_length": 29.364285714285714,
"alnum_prop": 0.5254196059352956,
"repo_name": "plotly/python-api",
"id": "8bf7dfcd6dc619b808657d5b918816f9d280695b",
"size": "4111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/ohlc/_stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import base64
import cloudstorage
import datetime
import json
import logging
import tba_config
import traceback
from google.appengine.ext import ndb
from consts.event_type import EventType
from controllers.api.api_status_controller import ApiStatusController
from datafeeds.datafeed_base import DatafeedBase
from models.district import District
from models.event import Event
from models.event_team import EventTeam
from models.sitevar import Sitevar
from parsers.fms_api.fms_api_awards_parser import FMSAPIAwardsParser
from parsers.fms_api.fms_api_district_list_parser import FMSAPIDistrictListParser
from parsers.fms_api.fms_api_district_rankings_parser import FMSAPIDistrictRankingsParser
from parsers.fms_api.fms_api_event_alliances_parser import FMSAPIEventAlliancesParser
from parsers.fms_api.fms_api_event_list_parser import FMSAPIEventListParser
from parsers.fms_api.fms_api_event_rankings_parser import FMSAPIEventRankingsParser, FMSAPIEventRankings2Parser
from parsers.fms_api.fms_api_match_parser import FMSAPIHybridScheduleParser, FMSAPIMatchDetailsParser
from parsers.fms_api.fms_api_team_details_parser import FMSAPITeamDetailsParser
from parsers.fms_api.fms_api_team_avatar_parser import FMSAPITeamAvatarParser
class DatafeedFMSAPI(object):
EVENT_SHORT_EXCEPTIONS = {
'arc': 'archimedes',
'cars': 'carson',
'carv': 'carver',
'cur': 'curie',
'dal': 'daly',
'dar': 'darwin',
'gal': 'galileo',
'hop': 'hopper',
'new': 'newton',
'roe': 'roebling',
'tes': 'tesla',
'tur': 'turing',
}
SUBDIV_TO_DIV = { # 2015, 2016
'arc': 'arte',
'cars': 'gaca',
'carv': 'cuca',
'cur': 'cuca',
'gal': 'gaca',
'hop': 'neho',
'new': 'neho',
'tes': 'arte',
}
SUBDIV_TO_DIV_2017 = { # 2017+
'arc': 'arda',
'cars': 'cate',
'carv': 'cane',
'cur': 'cuda',
'dal': 'arda',
'dar': 'cuda',
'gal': 'garo',
'hop': 'hotu',
'new': 'cane',
'roe': 'garo',
'tes': 'cate',
'tur': 'hotu',
}
SAVED_RESPONSE_DIR_PATTERN = '/tbatv-prod-hrd.appspot.com/frc-api-response/{}/' # % (url)
def __init__(self, version, sim_time=None, save_response=False):
self._sim_time = sim_time
self._save_response = save_response and sim_time is None
fms_api_secrets = Sitevar.get_by_id('fmsapi.secrets')
if fms_api_secrets is None:
if self._sim_time is None:
raise Exception("Missing sitevar: fmsapi.secrets. Can't access FMS API.")
else:
fms_api_username = fms_api_secrets.contents['username']
fms_api_authkey = fms_api_secrets.contents['authkey']
self._fms_api_authtoken = base64.b64encode('{}:{}'.format(fms_api_username, fms_api_authkey))
self._is_down_sitevar = Sitevar.get_by_id('apistatus.fmsapi_down')
if not self._is_down_sitevar:
self._is_down_sitevar = Sitevar(id="apistatus.fmsapi_down", description="Is FMSAPI down?")
self.FMS_API_DOMAIN = 'https://frc-api.firstinspires.org/'
if version == 'v1.0':
FMS_API_URL_BASE = self.FMS_API_DOMAIN + 'api/v1.0'
self.FMS_API_AWARDS_URL_PATTERN = FMS_API_URL_BASE + '/awards/%s/%s' # (year, event_short)
self.FMS_API_HYBRID_SCHEDULE_QUAL_URL_PATTERN = FMS_API_URL_BASE + '/schedule/%s/%s/qual/hybrid' # (year, event_short)
self.FMS_API_HYBRID_SCHEDULE_PLAYOFF_URL_PATTERN = FMS_API_URL_BASE + '/schedule/%s/%s/playoff/hybrid' # (year, event_short)
self.FMS_API_EVENT_RANKINGS_URL_PATTERN = FMS_API_URL_BASE + '/rankings/%s/%s' # (year, event_short)
self.FMS_API_EVENT_ALLIANCES_URL_PATTERN = FMS_API_URL_BASE + '/alliances/%s/%s' # (year, event_short)
self.FMS_API_TEAM_DETAILS_URL_PATTERN = FMS_API_URL_BASE + '/teams/%s/?teamNumber=%s' # (year, teamNumber)
self.FMS_API_TEAM_AVATAR_URL_PATTERN = FMS_API_URL_BASE + '/%s/avatars/?teamNumber=%s' # (year, teamNumber)
self.FMS_API_EVENT_AVATAR_URL_PATTERN = FMS_API_URL_BASE + '/%s/avatars/?eventCode=%s&page=%s' # (year, eventCode, page)
self.FMS_API_EVENT_LIST_URL_PATTERN = FMS_API_URL_BASE + '/events/season=%s'
self.FMS_API_EVENTTEAM_LIST_URL_PATTERN = FMS_API_URL_BASE + '/teams/?season=%s&eventCode=%s&page=%s' # (year, eventCode, page)
elif version == 'v2.0':
FMS_API_URL_BASE = self.FMS_API_DOMAIN + 'v2.0'
self.FMS_API_AWARDS_URL_PATTERN = FMS_API_URL_BASE + '/%s/awards/%s' # (year, event_short)
self.FMS_API_HYBRID_SCHEDULE_QUAL_URL_PATTERN = FMS_API_URL_BASE + '/%s/schedule/%s/qual/hybrid' # (year, event_short)
self.FMS_API_HYBRID_SCHEDULE_PLAYOFF_URL_PATTERN = FMS_API_URL_BASE + '/%s/schedule/%s/playoff/hybrid' # (year, event_short)
self.FMS_API_MATCH_DETAILS_QUAL_URL_PATTERN = FMS_API_URL_BASE + '/%s/scores/%s/qual' # (year, event_short)
self.FMS_API_MATCH_DETAILS_PLAYOFF_URL_PATTERN = FMS_API_URL_BASE + '/%s/scores/%s/playoff' # (year, event_short)
self.FMS_API_EVENT_RANKINGS_URL_PATTERN = FMS_API_URL_BASE + '/%s/rankings/%s' # (year, event_short)
self.FMS_API_EVENT_ALLIANCES_URL_PATTERN = FMS_API_URL_BASE + '/%s/alliances/%s' # (year, event_short)
self.FMS_API_TEAM_DETAILS_URL_PATTERN = FMS_API_URL_BASE + '/%s/teams/?teamNumber=%s' # (year, teamNumber)
self.FMS_API_TEAM_AVATAR_URL_PATTERN = FMS_API_URL_BASE + '/%s/avatars/?teamNumber=%s' # (year, teamNumber)
self.FMS_API_EVENT_AVATAR_URL_PATTERN = FMS_API_URL_BASE + '/%s/avatars/?eventCode=%s&page=%s' # (year, eventCode, page)
self.FMS_API_EVENT_LIST_URL_PATTERN = FMS_API_URL_BASE + '/%s/events' # year
self.FMS_API_EVENT_DETAILS_URL_PATTERN = FMS_API_URL_BASE + '/%s/events?eventCode=%s' # (year, event_short)
self.FMS_API_EVENTTEAM_LIST_URL_PATTERN = FMS_API_URL_BASE + '/%s/teams/?eventCode=%s&page=%s' # (year, eventCode, page)
self.FMS_API_DISTRICT_LIST_URL_PATTERN = FMS_API_URL_BASE + '/%s/districts' # (year)
self.FMS_API_DISTRICT_RANKINGS_PATTERN = FMS_API_URL_BASE + '/%s/rankings/district?districtCode=%s&page=%s' # (year, district abbreviation, page)
else:
raise Exception("Unknown FMS API version: {}".format(version))
def _get_event_short(self, event_short, event=None):
# First, check if we've manually set the FRC API key
if event and event.first_code:
return event.first_code
# Otherwise, check hard-coded exceptions
return self.EVENT_SHORT_EXCEPTIONS.get(event_short, event_short)
@ndb.tasklet
def _parse_async(self, url, parser):
# For URLFetches
context = ndb.get_context()
# Prep for saving/reading raw API response into/from cloudstorage
gcs_dir_name = self.SAVED_RESPONSE_DIR_PATTERN.format(url.replace(self.FMS_API_DOMAIN, ''))
if self._save_response and tba_config.CONFIG['save-frc-api-response']:
try:
gcs_dir_contents = cloudstorage.listbucket(gcs_dir_name) # This is async
except Exception, exception:
logging.error("Error prepping for saving API response for: {}".format(url))
logging.error(traceback.format_exc())
gcs_dir_contents = []
if self._sim_time:
"""
Simulate FRC API response at a given time
"""
content = None
# Get list of responses
file_prefix = 'frc-api-response/{}/'.format(url.replace(self.FMS_API_DOMAIN, ''))
bucket_list_url = 'https://www.googleapis.com/storage/v1/b/bucket/o?bucket=tbatv-prod-hrd.appspot.com&prefix={}'.format(file_prefix)
try:
result = yield context.urlfetch(bucket_list_url)
except Exception, e:
logging.error("URLFetch failed for: {}".format(bucket_list_url))
logging.info(e)
raise ndb.Return(None)
# Find appropriate timed response
last_file_url = None
for item in json.loads(result.content)['items']:
filename = item['name']
time_str = filename.replace(file_prefix, '').replace('.json', '').strip()
file_time = datetime.datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S.%f")
if file_time <= self._sim_time:
last_file_url = item['mediaLink']
else:
break
# Fetch response
if last_file_url:
try:
result = yield context.urlfetch(last_file_url)
except Exception, e:
logging.error("URLFetch failed for: {}".format(last_file_url))
logging.info(e)
raise ndb.Return(None)
content = result.content
if content is None:
raise ndb.Return(None)
result = type('DummyResult', (object,), {"status_code": 200, "content": content})
else:
"""
Make fetch to FRC API
"""
headers = {
'Authorization': 'Basic {}'.format(self._fms_api_authtoken),
'Accept': 'application/json',
'Cache-Control': 'no-cache, max-age=10',
'Pragma': 'no-cache',
}
try:
result = yield context.urlfetch(url, headers=headers)
except Exception, e:
logging.error("URLFetch failed for: {}".format(url))
logging.info(e)
raise ndb.Return(None)
old_status = self._is_down_sitevar.contents
if result.status_code == 200:
if old_status == True:
self._is_down_sitevar.contents = False
self._is_down_sitevar.put()
ApiStatusController.clear_cache_if_needed(old_status, self._is_down_sitevar.contents)
# Save raw API response into cloudstorage
if self._save_response and tba_config.CONFIG['save-frc-api-response']:
try:
# Check for last response
last_item = None
for last_item in gcs_dir_contents:
pass
write_new = True
if last_item is not None:
with cloudstorage.open(last_item.filename, 'r') as last_json_file:
if last_json_file.read() == result.content:
write_new = False # Do not write if content didn't change
if write_new:
file_name = gcs_dir_name + '{}.json'.format(datetime.datetime.now())
with cloudstorage.open(file_name, 'w') as json_file:
json_file.write(result.content)
except Exception, exception:
logging.error("Error saving API response for: {}".format(url))
logging.error(traceback.format_exc())
try:
json_content = json.loads(result.content.lstrip('\xef\xbb\xbf').rstrip('\x00'))
except Exception, exception:
logging.error("Error parsing: {}".format(url))
logging.error(traceback.format_exc())
raise ndb.Return(None)
if type(parser) == list:
raise ndb.Return([p.parse(json_content) for p in parser])
else:
raise ndb.Return(parser.parse(json_content))
elif result.status_code % 100 == 5:
# 5XX error - something is wrong with the server
logging.warning('URLFetch for %s failed; Error code %s' % (url, result.status_code))
if old_status == False:
self._is_down_sitevar.contents = True
self._is_down_sitevar.put()
ApiStatusController.clear_cache_if_needed(old_status, self._is_down_sitevar.contents)
raise ndb.Return(None)
else:
logging.warning('URLFetch for %s failed; Error code %s' % (url, result.status_code))
raise ndb.Return(None)
@ndb.toplevel
def _parse(self, url, parser):
result = yield self._parse_async(url, parser)
raise ndb.Return(result)
def getAwards(self, event):
awards = []
if event.event_type_enum == EventType.CMP_DIVISION and event.year >= 2015: # 8 subdivisions from 2015+ have awards listed under 4 divisions
event_team_keys = EventTeam.query(EventTeam.event == event.key).fetch(keys_only=True)
valid_team_nums = set([int(etk.id().split('_')[1][3:]) for etk in event_team_keys])
if event.year >= 2017:
division = self.SUBDIV_TO_DIV_2017[event.event_short]
else:
division = self.SUBDIV_TO_DIV[event.event_short]
awards += self._parse(self.FMS_API_AWARDS_URL_PATTERN % (event.year, self._get_event_short(division)), FMSAPIAwardsParser(event, valid_team_nums))
awards += self._parse(self.FMS_API_AWARDS_URL_PATTERN % (event.year, self._get_event_short(event.event_short, event)), FMSAPIAwardsParser(event))
return awards
def getEventAlliances(self, event_key):
year = int(event_key[:4])
event_short = event_key[4:]
event = Event.get_by_id(event_key)
alliances = self._parse(self.FMS_API_EVENT_ALLIANCES_URL_PATTERN % (year, self._get_event_short(event_short, event)), FMSAPIEventAlliancesParser())
return alliances
def getMatches(self, event_key):
year = int(event_key[:4])
event_short = event_key[4:]
event = Event.get_by_id(event_key)
hs_parser = FMSAPIHybridScheduleParser(year, event_short)
detail_parser = FMSAPIMatchDetailsParser(year, event_short)
qual_matches_future = self._parse_async(self.FMS_API_HYBRID_SCHEDULE_QUAL_URL_PATTERN % (year, self._get_event_short(event_short, event)), hs_parser)
playoff_matches_future = self._parse_async(self.FMS_API_HYBRID_SCHEDULE_PLAYOFF_URL_PATTERN % (year, self._get_event_short(event_short, event)), hs_parser)
qual_details_future = self._parse_async(self.FMS_API_MATCH_DETAILS_QUAL_URL_PATTERN % (year, self._get_event_short(event_short, event)), detail_parser)
playoff_details_future = self._parse_async(self.FMS_API_MATCH_DETAILS_PLAYOFF_URL_PATTERN % (year, self._get_event_short(event_short, event)), detail_parser)
matches_by_key = {}
qual_matches = qual_matches_future.get_result()
if qual_matches is not None:
for match in qual_matches[0]:
matches_by_key[match.key.id()] = match
playoff_matches = playoff_matches_future.get_result()
if playoff_matches is not None:
for match in playoff_matches[0]:
matches_by_key[match.key.id()] = match
qual_details = qual_details_future.get_result()
qual_details_items = qual_details.items() if qual_details is not None else []
playoff_details = playoff_details_future.get_result()
playoff_details_items = playoff_details.items() if playoff_details is not None else []
for match_key, match_details in qual_details_items + playoff_details_items:
match_key = playoff_matches[1].get(match_key, match_key)
if match_key in matches_by_key:
matches_by_key[match_key].score_breakdown_json = json.dumps(match_details)
return filter(
lambda m: not FMSAPIHybridScheduleParser.is_blank_match(m),
matches_by_key.values())
def getEventRankings(self, event_key):
year = int(event_key[:4])
event_short = event_key[4:]
event = Event.get_by_id(event_key)
result = self._parse(
self.FMS_API_EVENT_RANKINGS_URL_PATTERN % (year, self._get_event_short(event_short, event)),
[FMSAPIEventRankingsParser(year), FMSAPIEventRankings2Parser(year)])
if result:
return result
else:
return None, None
def getTeamDetails(self, year, team_key):
team_number = team_key[3:] # everything after 'frc'
result = self._parse(self.FMS_API_TEAM_DETAILS_URL_PATTERN % (year, team_number), FMSAPITeamDetailsParser(year))
if result:
return result[0]
else:
return None
def getTeamAvatar(self, year, team_key):
team_number = team_key[3:] # everything after 'frc'
avatars, keys_to_delete, _ = self._parse(self.FMS_API_TEAM_AVATAR_URL_PATTERN % (year, team_number), FMSAPITeamAvatarParser(year))
if avatars:
return avatars[0], keys_to_delete
else:
return None, keys_to_delete
# Returns a tuple: (list(Event), list(District))
def getEventList(self, year):
result = self._parse(self.FMS_API_EVENT_LIST_URL_PATTERN % (year), FMSAPIEventListParser(year))
if result:
return result
else:
return [], []
# Returns a list of districts
def getDistrictList(self, year):
result = self._parse(self.FMS_API_DISTRICT_LIST_URL_PATTERN % (year),
FMSAPIDistrictListParser(year))
return result
def getDistrictRankings(self, district_key):
district = District.get_by_id(district_key)
if not district:
return None
year = int(district_key[:4])
district_short = district_key[4:]
advancement = {}
for page in range(1, 15): # Ensure this won't loop forever
url = self.FMS_API_DISTRICT_RANKINGS_PATTERN % (year, district_short.upper(), page)
result = self._parse(url, FMSAPIDistrictRankingsParser(advancement))
if not result:
break
advancement, more_pages = result
if not more_pages:
break
district.advancement = advancement
return [district]
# Returns a tuple: (list(Event), list(District))
def getEventDetails(self, event_key):
year = int(event_key[:4])
event_short = event_key[4:]
event = Event.get_by_id(event_key)
api_event_short = self._get_event_short(event_short, event)
result = self._parse(self.FMS_API_EVENT_DETAILS_URL_PATTERN % (year, api_event_short), FMSAPIEventListParser(year, short=event_short))
if result:
return result
else:
return [], []
# Returns a list(Media)
def getEventTeamAvatars(self, event_key):
year = int(event_key[:4])
event_short = event_key[4:]
event = Event.get_by_id(event_key)
parser = FMSAPITeamAvatarParser(year, short=event_short)
api_event_short = self._get_event_short(event_short, event)
avatars = []
keys_to_delete = set()
for page in range(1, 9): # Ensure this won't loop forever. 8 pages should be more than enough
url = self.FMS_API_EVENT_AVATAR_URL_PATTERN % (year, api_event_short, page)
result = self._parse(url, parser)
if result is None:
break
partial_avatars, partial_keys_to_delete, more_pages = result
avatars.extend(partial_avatars)
keys_to_delete = keys_to_delete.union(partial_keys_to_delete)
if not more_pages:
break
return avatars, keys_to_delete
# Returns list of tuples (team, districtteam, robot)
def getEventTeams(self, event_key):
year = int(event_key[:4])
event_code = self._get_event_short(event_key[4:])
event = Event.get_by_id(event_key)
parser = FMSAPITeamDetailsParser(year)
models = [] # will be list of tuples (team, districtteam, robot) model
for page in range(1, 9): # Ensure this won't loop forever. 8 pages should be more than enough
url = self.FMS_API_EVENTTEAM_LIST_URL_PATTERN % (year, self._get_event_short(event_code, event), page)
result = self._parse(url, parser)
if result is None:
break
partial_models, more_pages = result
models.extend(partial_models)
if not more_pages:
break
return models
| {
"content_hash": "79e08385938932e0438cc7e5fe235cb0",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 165,
"avg_line_length": 46.85876993166287,
"alnum_prop": 0.5950610082154489,
"repo_name": "bdaroz/the-blue-alliance",
"id": "cacedc34fd92301e4902de66a50906c8129c16b8",
"size": "20571",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "datafeeds/datafeed_fms_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "340944"
},
{
"name": "Dockerfile",
"bytes": "1510"
},
{
"name": "HTML",
"bytes": "910114"
},
{
"name": "JavaScript",
"bytes": "512382"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Python",
"bytes": "2744849"
},
{
"name": "Ruby",
"bytes": "3494"
},
{
"name": "Shell",
"bytes": "13901"
}
],
"symlink_target": ""
} |
from route53.change_set import ChangeSet
from route53.exceptions import AlreadyDeletedError
from route53.resource_record_set import AResourceRecordSet, AAAAResourceRecordSet, CNAMEResourceRecordSet, MXResourceRecordSet, NSResourceRecordSet, PTRResourceRecordSet, SOAResourceRecordSet, SPFResourceRecordSet, SRVResourceRecordSet, TXTResourceRecordSet
class HostedZone(object):
"""
A hosted zone is a collection of resource record sets hosted by Route 53.
Like a traditional DNS zone file, a hosted zone represents a collection of
resource record sets that are managed together under a single domain name.
Each hosted zone has its own metadata and configuration information.
.. warning:: Do not instantiate this directly yourself. Go through
one of the methods on :py:class:`route53.connection.Route53Connection`.
"""
def __init__(self, connection, id, name, caller_reference,
resource_record_set_count, comment):
"""
:param Route53Connection connection: The connection instance that
was used to query the Route53 API, leading to this object's
creation.
:param str id: Route53's unique ID for this hosted zone.
:param str name: The name of the domain.
:param str caller_reference: A unique string that identifies the
request to create the hosted zone.
:param int resource_record_set_count: The number of resource record
sets in the hosted zone.
"""
self.connection = connection
self.id = id
self.name = name
self.caller_reference = caller_reference
self.resource_record_set_count = int(resource_record_set_count)
self.comment = comment
# Don't access this directly, we use it for lazy loading.
self._nameservers = []
# This is set to True when this HostedZone has been deleted in Route53.
self._is_deleted = False
def __str__(self):
return '<HostedZone: %s -- %s>' % (self.name, self.id)
@property
def nameservers(self):
"""
:rtype: list
:returns: A list of nameserver strings for this hosted zone.
"""
# If this HostedZone was instantiated by ListHostedZones, the nameservers
# attribute didn't get populated. If the user requests it, we'll
# lazy load by querying it in after the fact. It's safe to cache like
# this since these nameserver values won't change.
if not self._nameservers:
# We'll just snatch the nameserver values from a fresh copy
# via GetHostedZone.
hosted_zone = self.connection.get_hosted_zone_by_id(self.id)
self._nameservers = hosted_zone._nameservers
return self._nameservers
@property
def record_sets(self):
"""
Queries for the Resource Record Sets that are under this HostedZone.
This is typically the way to go to find specific record sets, or
to list them all.
We don't currently implement any filtering convenience method,
since it is very easy to do this yourself, catered to your own needs.
For example, if you find your match, you may choose to stop iterating
on the generator, potentially saving yourself extra API queries
(behind the scenes).
.. warning:: This result set can get pretty large if you have a ton
of records.
:rtype: generator
:returns: A generator of ResourceRecordSet sub-classes.
"""
for rrset in self.connection._list_resource_record_sets_by_zone_id(self.id):
yield rrset
def delete(self, force=False):
"""
Deletes this hosted zone. After this method is ran, you won't be able
to add records, or do anything else with the zone. You'd need to
re-create it, as zones are read-only after creation.
:keyword bool force: If ``True``, delete the
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`, even if it
means nuking all associated record sets. If ``False``, an
exception is raised if this
:py:class:`HostedZone <route53.hosted_zone.HostedZone>`
has record sets.
:rtype: dict
:returns: A dict of change info, which contains some details about
the request.
"""
self._halt_if_already_deleted()
if force:
# Forcing deletion by cleaning up all record sets first. We'll
# do it all in one change set.
cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id)
for rrset in self.record_sets:
# You can delete a HostedZone if there are only SOA and NS
# entries left. So delete everything but SOA/NS entries.
if rrset.rrset_type not in ['SOA', 'NS']:
cset.add_change('DELETE', rrset)
if cset.deletions or cset.creations:
# Bombs away.
self.connection._change_resource_record_sets(cset)
# Now delete the HostedZone.
retval = self.connection.delete_hosted_zone_by_id(self.id)
# Used to protect against modifying a deleted HostedZone.
self._is_deleted = True
return retval
def _halt_if_already_deleted(self):
"""
Convenience method used to raise an AlreadyDeletedError exception if
this HostedZone has been deleted.
:raises: AlreadyDeletedError
"""
if self._is_deleted:
raise AlreadyDeletedError("Can't manipulate a deleted zone.")
def _add_record(self, record_set_class, name, values, ttl=60, weight=None,
region=None,set_identifier=None, alias_hosted_zone_id=None,
alias_dns_name=None):
"""
Convenience method for creating ResourceRecordSets. Most of the calls
are basically the same, this saves on repetition.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created ResourceRecordSet sub-class
instance.
"""
self._halt_if_already_deleted()
rrset_kwargs = dict(
connection=self.connection,
zone_id=self.id,
name=name,
ttl=ttl,
records=values,
weight=weight,
region=region,
set_identifier=set_identifier,
)
if alias_hosted_zone_id or alias_dns_name:
rrset_kwargs.update(dict(
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name
))
rrset = record_set_class(**rrset_kwargs)
cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id)
cset.add_change('CREATE', rrset)
change_info = self.connection._change_resource_record_sets(cset)
return rrset, change_info
def create_a_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None, alias_hosted_zone_id=None,
alias_dns_name=None):
"""
Creates and returns an A record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:keyword str alias_hosted_zone_id: Alias A records have this specified.
It appears to be the hosted zone ID for the ELB the Alias points at.
:keyword str alias_dns_name: Alias A records have this specified. It is
the DNS name for the ELB that the Alias points to.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created
:py:class:`AResourceRecordSet <route53.resource_record_set.AResourceRecordSet>`
instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(AResourceRecordSet, **values)
def create_aaaa_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates an AAAA record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created AAAAResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(AAAAResourceRecordSet, **values)
def create_cname_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates a CNAME record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created CNAMEResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(CNAMEResourceRecordSet, **values)
def create_mx_record(self, name, values, ttl=60):
"""
Creates a MX record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created MXResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(MXResourceRecordSet, **values)
def create_ns_record(self, name, values, ttl=60):
"""
Creates a NS record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created NSResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(NSResourceRecordSet, **values)
def create_ptr_record(self, name, values, ttl=60):
"""
Creates a PTR record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created PTRResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(PTRResourceRecordSet, **values)
def create_spf_record(self, name, values, ttl=60):
"""
Creates a SPF record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created SPFResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(SPFResourceRecordSet, **values)
def create_srv_record(self, name, values, ttl=60):
"""
Creates a SRV record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created SRVResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(SRVResourceRecordSet, **values)
def create_txt_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates a TXT record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created TXTResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(TXTResourceRecordSet, **values) | {
"content_hash": "5602d9698f52bbaa1a5933a34acdd980",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 257,
"avg_line_length": 42.22463768115942,
"alnum_prop": 0.6316572278473771,
"repo_name": "EricSchles/python-route53",
"id": "2c9332524d412088a947fe4021a6b0ec5eadd042",
"size": "17481",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "route53/hosted_zone.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5596"
},
{
"name": "Python",
"bytes": "84593"
}
],
"symlink_target": ""
} |
""" Sahana Eden Automated Test - HRM003 Create Staff Training
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
import unittest
from tests.web2unittest import SeleniumUnitTest
from selenium.common.exceptions import NoSuchElementException
from s3 import s3_debug
from tests import *
#import unittest, re, time
import time
class CreateStaffTraining(SeleniumUnitTest):
def test_hrm003_create_staff_training(self):
"""
@case: HRM003
@description: Create a Staff Training
* Create Course
* Create Training Event
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
@ToDo:
* Add Staff Participants
"""
browser = self.browser
self.login(account="admin", nexturl="hrm/course/create")
self.create("hrm_course",
[( "code",
"32329408",),
( "name",
"Emergency First Aid"),
]
)
self.login(account="admin", nexturl="hrm/training_event/create")
self.create("hrm_training_event",
[( "course_id",
"Emergency First Aid",
"option"),
( "start_date",
"2012-08-01"),
( "hours",
"12"),
( "site_id",
"Bucharest RFAAT Centre",
"autocomplete"),
( "comments",
"Testing comments"),
]
) | {
"content_hash": "fb7540ad6810a33831682ad3aba25af8",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 110,
"avg_line_length": 38.51948051948052,
"alnum_prop": 0.5906945380984491,
"repo_name": "mrGeen/eden",
"id": "66189b6608fe3d342d2e6a165a7f7a7eaf591542",
"size": "2966",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/tests/hrm/create_staff_training.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "1070670"
},
{
"name": "HTML",
"bytes": "358005"
},
{
"name": "JavaScript",
"bytes": "14790995"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "22735063"
},
{
"name": "XSLT",
"bytes": "1263876"
}
],
"symlink_target": ""
} |
import logging
class _AnsiColorStreamHandler(logging.StreamHandler):
DEFAULT = '\x1b[0m'
RED = '\x1b[31m'
GREEN = '\x1b[32m'
YELLOW = '\x1b[33m'
CYAN = '\x1b[36m'
CRITICAL = RED
ERROR = RED
WARNING = YELLOW
INFO = GREEN
DEBUG = CYAN
@classmethod
def _get_color(cls, level):
if level >= logging.CRITICAL: return cls.CRITICAL
elif level >= logging.ERROR: return cls.ERROR
elif level >= logging.WARNING: return cls.WARNING
elif level >= logging.INFO: return cls.INFO
elif level >= logging.DEBUG: return cls.DEBUG
else: return cls.DEFAULT
def __init__(self, stream=None):
logging.StreamHandler.__init__(self, stream)
def format(self, record):
text = logging.StreamHandler.format(self, record)
color = self._get_color(record.levelno)
return color + text + self.DEFAULT
class _WinColorStreamHandler(logging.StreamHandler):
# wincon.h
FOREGROUND_BLACK = 0x0000
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_CYAN = 0x0003
FOREGROUND_RED = 0x0004
FOREGROUND_MAGENTA = 0x0005
FOREGROUND_YELLOW = 0x0006
FOREGROUND_GREY = 0x0007
FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
FOREGROUND_WHITE = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED
BACKGROUND_BLACK = 0x0000
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_CYAN = 0x0030
BACKGROUND_RED = 0x0040
BACKGROUND_MAGENTA = 0x0050
BACKGROUND_YELLOW = 0x0060
BACKGROUND_GREY = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
DEFAULT = FOREGROUND_WHITE
CRITICAL = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY
ERROR = FOREGROUND_RED | FOREGROUND_INTENSITY
WARNING = FOREGROUND_YELLOW | FOREGROUND_INTENSITY
INFO = FOREGROUND_GREEN
DEBUG = FOREGROUND_CYAN
@classmethod
def _get_color(cls, level):
if level >= logging.CRITICAL: return cls.CRITICAL
elif level >= logging.ERROR: return cls.ERROR
elif level >= logging.WARNING: return cls.WARNING
elif level >= logging.INFO: return cls.INFO
elif level >= logging.DEBUG: return cls.DEBUG
else: return cls.DEFAULT
def _set_color(self, code):
import ctypes
ctypes.windll.kernel32.SetConsoleTextAttribute(self._outhdl, code)
def __init__(self, stream):
logging.StreamHandler.__init__(self, stream)
# get file handle for the stream
import ctypes, ctypes.util
crtname = ctypes.util.find_msvcrt()
if not crtname:
crtname = ctypes.util.find_library("msvcrt")
crtlib = ctypes.cdll.LoadLibrary(crtname)
self._outhdl = crtlib._get_osfhandle(stream.fileno())
def emit(self, record):
color = self._get_color(record.levelno)
self._set_color(color)
logging.StreamHandler.emit(self, record)
self._set_color(self.FOREGROUND_WHITE)
# select ColorStreamHandler based on platform
import platform
if platform.system() == 'Windows':
ColorStreamHandler = _WinColorStreamHandler
else:
ColorStreamHandler = _AnsiColorStreamHandler
| {
"content_hash": "2bc2cbb5c8d752cb83c373762dd56879",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 95,
"avg_line_length": 35.22680412371134,
"alnum_prop": 0.6376938835235587,
"repo_name": "FichteFoll/TelegramIRCImageProxy",
"id": "e04f28570a2d6ff9528c97485a9a30f063057754",
"size": "4713",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "colorstreamhandler/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38538"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import prettyplotlib as ppl
import itertools
import sklearn
import asyncio
import random
import math
import numpy as np
def data_gen():
status = {True:'running', False:'sleeping'}
return [{'battery':random.randint(0,5), 'status':status[random.random()>0.9]} for i in range(5)]
# The slices will be ordered and plotted counter-clockwise.
def add_pie(fig, ax, center, percent, status):
sizes = [percent, 100-percent]
colors = ['#F05F40', '#d9d9d9']
sleep_colors = ['#252525', '#d9d9d9']
c = {'running':colors, 'sleeping':sleep_colors, 'pending':colors}
ax.pie(sizes, colors=c[status],wedgeprops = {'linewidth':0, 'zorder':1}, center = center, radius = 1, startangle = 10)
#draw a circle at the center of pie to make it look like a donut
centre_circle = plt.Circle(center,0.75,color='white', fc='white',linewidth=1.25)
#fig = plt.gcf()
ax.add_artist(centre_circle)
# Set aspect ratio to be equal so that pie is drawn as a circle.
ax.axis('equal')
return fig, ax
def add_line(ax, list_of_pairs):
#e.g. [(0,0),(1,0)]
for i in list_of_pairs:
xs = [j[0] for j in i]
ys = [j[1] for j in i]
ppl.plot(ax, xs,ys, color = '#252525', linewidth = 1.0, zorder=0)
def update(fig, ax, centres, new_data, max_batt):
###centres is a list of centre coordinates
###new data is a list of corresponding update, {'battery': , 'status':}
on_pairs = [e for i, e in enumerate(centres) if new_data[i]['status']!='sleeping']
print(on_pairs)
pairs = itertools.combinations(on_pairs,2)
add_line(ax, pairs)
for ind,centr in enumerate(centres):
battery = max(100*new_data[ind]['battery']/max_batt, 5)
status = new_data[ind]['status']
print('status',status)
fig, ax = add_pie(fig, ax, centr,battery, status)
#plt.show()
def data_generator(single_series, num_sensors, max_batt):
initial = [{'status':'running', 'battery':max_batt, '_id':i} for i in range(num_sensors)]
for update in single_series:
_id = update[1]['_id']
initial[_id] = update[1]
yield initial
def distance(xy1, xy2):
x = (xy2[0]-xy1[0])**2
y = (xy2[1]-xy1[1])**2
return math.sqrt(x+y)
def safe(candidate, existing_one, radius):
return distance(candidate, existing_one)>=radius+2
def safe_vs_all(candidate, all_existing, radius):
return all([safe(candidate, i, radius) for i in all_existing])
def gen_centre(x, y):
#generate random within [0,x], [0,y]
candidate = (random.gauss(x/2,1 ), random.gauss(y/2, 1))
return candidate
def gen_safe_centre(radius, x, y, existing):
#generate random within [0,x], [0,y]
while True:
candidate = gen_centre(x,y)
if safe_vs_all(candidate, existing, radius):
return candidate
def gen_centres(radius, x, y, number):
existing = [(0,0)]
while len(existing)<number:
existing.append(gen_safe_centre(radius, x,y, existing))
return existing
if __name__ == '__main__':
num_sensors = 3 #one more than there is a record for
num_records = int((num_sensors-1)/2)
print('num records', num_records)
centres = gen_centres(1, 10,10, num_sensors)
all_data = [sklearn.externals.joblib.load('data/batt_0')]
all_data = [i[400:500] for i in all_data]
merged = sorted(list(itertools.chain(*all_data)))
streamer = data_generator(merged, num_sensors, 5)
count = 0
for index, new_data in enumerate(streamer):
if index%2 == 1:
fig, ax = plt.subplots(1)
update(fig, ax, centres, new_data, 5)
fig.savefig('plot/coop%d.png'%count, format = 'png')
count+=1
| {
"content_hash": "2e2e359b5298ef31259792952e2a562d",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 122,
"avg_line_length": 38.72043010752688,
"alnum_prop": 0.6467647875590113,
"repo_name": "jamesjarlathlong/resourceful",
"id": "6a646b891a6b5690fc8a459b18f99904e1d05117",
"size": "3601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visualise_stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2652"
},
{
"name": "JavaScript",
"bytes": "5496"
},
{
"name": "Jupyter Notebook",
"bytes": "2267185"
},
{
"name": "Python",
"bytes": "80130"
}
],
"symlink_target": ""
} |
"""DB related custom exceptions."""
import six
from ironic.openstack.common.gettextutils import _
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(six.text_type(inner_exception))
class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=[], inner_exception=None):
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
class DbMigrationError(DBError):
"""Wraps migration specific exception."""
def __init__(self, message=None):
super(DbMigrationError, self).__init__(message)
class DBConnectionError(DBError):
"""Wraps connection specific exception."""
pass
| {
"content_hash": "3a2cd5b539f98a5d4f610630159f1c81",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 69,
"avg_line_length": 28.5,
"alnum_prop": 0.6824561403508772,
"repo_name": "varunarya10/ironic",
"id": "d4b8ff442e5f5fbfa5c6b58e5f6f3b3b33852e72",
"size": "1872",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ironic/openstack/common/db/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1640165"
}
],
"symlink_target": ""
} |
"""
ROS Master API.
L{ROSMasterHandler} provides the API implementation of the
Master. Python allows an API to be introspected from a Python class,
so the handler has a 1-to-1 mapping with the actual XMLRPC API.
API return convention: (statusCode, statusMessage, returnValue)
- statusCode: an integer indicating the completion condition of the method.
- statusMessage: a human-readable string message for debugging
- returnValue: the return value of the method; method-specific.
Current status codes:
- -1: ERROR: Error on the part of the caller, e.g. an invalid parameter
- 0: FAILURE: Method was attempted but failed to complete correctly.
- 1: SUCCESS: Method completed successfully.
Individual methods may assign additional meaning/semantics to statusCode.
"""
from __future__ import print_function
import os
import sys
import logging
import threading
import time
import traceback
from rosgraph.xmlrpc import XmlRpcHandler
import rosgraph.names
from rosgraph.names import resolve_name
import rosmaster.paramserver
import rosmaster.threadpool
from rosmaster.util import xmlrpcapi
from rosmaster.registrations import RegistrationManager
from rosmaster.validators import non_empty, non_empty_str, not_none, is_api, is_topic, is_service, valid_type_name, valid_name, empty_or_valid_name, ParameterInvalid
NUM_WORKERS = 3 #number of threads we use to send publisher_update notifications
# Return code slots
STATUS = 0
MSG = 1
VAL = 2
_logger = logging.getLogger("rosmaster.master")
LOG_API = False
def mloginfo(msg, *args):
"""
Info-level master log statements. These statements may be printed
to screen so they should be user-readable.
@param msg: Message string
@type msg: str
@param args: arguments for msg if msg is a format string
"""
#mloginfo is in core so that it is accessible to master and masterdata
_logger.info(msg, *args)
def mlogwarn(msg, *args):
"""
Warn-level master log statements. These statements may be printed
to screen so they should be user-readable.
@param msg: Message string
@type msg: str
@param args: arguments for msg if msg is a format string
"""
#mloginfo is in core so that it is accessible to master and masterdata
_logger.warn(msg, *args)
if args:
print("WARN: " + msg % args)
else:
print("WARN: " + str(msg))
def apivalidate(error_return_value, validators=()):
"""
ROS master/slave arg-checking decorator. Applies the specified
validator to the corresponding argument and also remaps each
argument to be the value returned by the validator. Thus,
arguments can be simultaneously validated and canonicalized prior
to actual function call.
@param error_return_value: API value to return if call unexpectedly fails
@param validators: sequence of validators to apply to each
arg. None means no validation for the parameter is required. As all
api methods take caller_id as the first parameter, the validators
start with the second param.
@type validators: sequence
"""
def check_validates(f):
try:
func_code = f.__code__
func_name = f.__name__
except AttributeError:
func_code = f.func_code
func_name = f.func_name
assert len(validators) == func_code.co_argcount - 2, "%s failed arg check"%f #ignore self and caller_id
def validated_f(*args, **kwds):
if LOG_API:
_logger.debug("%s%s", func_name, str(args[1:]))
#print "%s%s"%(func_name, str(args[1:]))
if len(args) == 1:
_logger.error("%s invoked without caller_id paramter" % func_name)
return -1, "missing required caller_id parameter", error_return_value
elif len(args) != func_code.co_argcount:
return -1, "Error: bad call arity", error_return_value
instance = args[0]
caller_id = args[1]
def isstring(s):
"""Small helper version to check an object is a string in
a way that works for both Python 2 and 3
"""
try:
return isinstance(s, basestring)
except NameError:
return isinstance(s, str)
if not isstring(caller_id):
_logger.error("%s: invalid caller_id param type", func_name)
return -1, "caller_id must be a string", error_return_value
newArgs = [instance, caller_id] #canonicalized args
try:
for (v, a) in zip(validators, args[2:]):
if v:
try:
newArgs.append(v(a, caller_id))
except ParameterInvalid as e:
_logger.error("%s: invalid parameter: %s", func_name, str(e) or 'error')
return -1, str(e) or 'error', error_return_value
else:
newArgs.append(a)
if LOG_API:
retval = f(*newArgs, **kwds)
_logger.debug("%s%s returns %s", func_name, args[1:], retval)
return retval
else:
code, msg, val = f(*newArgs, **kwds)
if val is None:
return -1, "Internal error (None value returned)", error_return_value
return code, msg, val
except TypeError as te: #most likely wrong arg number
_logger.error(traceback.format_exc())
return -1, "Error: invalid arguments: %s"%te, error_return_value
except Exception as e: #internal failure
_logger.error(traceback.format_exc())
return 0, "Internal failure: %s"%e, error_return_value
try:
validated_f.__name__ = func_name
except AttributeError:
validated_f.func_name = func_name
validated_f.__doc__ = f.__doc__ #preserve doc
return validated_f
return check_validates
def publisher_update_task(api, topic, pub_uris):
"""
Contact api.publisherUpdate with specified parameters
@param api: XML-RPC URI of node to contact
@type api: str
@param topic: Topic name to send to node
@type topic: str
@param pub_uris: list of publisher APIs to send to node
@type pub_uris: [str]
"""
mloginfo("publisherUpdate[%s] -> %s", topic, api)
#TODO: check return value for errors so we can unsubscribe if stale
xmlrpcapi(api).publisherUpdate('/master', topic, pub_uris)
def service_update_task(api, service, uri):
"""
Contact api.serviceUpdate with specified parameters
@param api: XML-RPC URI of node to contact
@type api: str
@param service: Service name to send to node
@type service: str
@param uri: URI to send to node
@type uri: str
"""
mloginfo("serviceUpdate[%s, %s] -> %s",service, uri, api)
xmlrpcapi(api).serviceUpdate('/master', service, uri)
###################################################
# Master Implementation
class ROSMasterHandler(object):
"""
XML-RPC handler for ROS master APIs.
API routines for the ROS Master Node. The Master Node is a
superset of the Slave Node and contains additional API methods for
creating and monitoring a graph of slave nodes.
By convention, ROS nodes take in caller_id as the first parameter
of any API call. The setting of this parameter is rarely done by
client code as ros::msproxy::MasterProxy automatically inserts
this parameter (see ros::client::getMaster()).
"""
def __init__(self, num_workers=NUM_WORKERS):
"""ctor."""
self.uri = None
self.done = False
self.thread_pool = rosmaster.threadpool.MarkedThreadPool(num_workers)
# pub/sub/providers: dict { topicName : [publishers/subscribers names] }
self.ps_lock = threading.Condition(threading.Lock())
self.reg_manager = RegistrationManager(self.thread_pool)
# maintain refs to reg_manager fields
self.publishers = self.reg_manager.publishers
self.subscribers = self.reg_manager.subscribers
self.services = self.reg_manager.services
self.param_subscribers = self.reg_manager.param_subscribers
self.topics_types = {} #dict { topicName : type }
# parameter server dictionary
self.param_server = rosmaster.paramserver.ParamDictionary(self.reg_manager)
def _shutdown(self, reason=''):
if self.thread_pool is not None:
self.thread_pool.join_all(wait_for_tasks=False, wait_for_threads=False)
self.thread_pool = None
self.done = True
def _ready(self, uri):
"""
Initialize the handler with the XMLRPC URI. This is a standard callback from the XmlRpcNode API.
@param uri: XML-RPC URI
@type uri: str
"""
self.uri = uri
def _ok(self):
return not self.done
###############################################################################
# EXTERNAL API
@apivalidate(0, (None, ))
def shutdown(self, caller_id, msg=''):
"""
Stop this server
@param caller_id: ROS caller id
@type caller_id: str
@param msg: a message describing why the node is being shutdown.
@type msg: str
@return: [code, msg, 0]
@rtype: [int, str, int]
"""
if msg:
print("shutdown request: %s" % msg, file=sys.stdout)
else:
print("shutdown requst", file=sys.stdout)
self._shutdown('external shutdown request from [%s]: %s'%(caller_id, msg))
return 1, "shutdown", 0
@apivalidate('')
def getUri(self, caller_id):
"""
Get the XML-RPC URI of this server.
@param caller_id str: ROS caller id
@return [int, str, str]: [1, "", xmlRpcUri]
"""
return 1, "", self.uri
@apivalidate(-1)
def getPid(self, caller_id):
"""
Get the PID of this server
@param caller_id: ROS caller id
@type caller_id: str
@return: [1, "", serverProcessPID]
@rtype: [int, str, int]
"""
return 1, "", os.getpid()
################################################################
# PARAMETER SERVER ROUTINES
@apivalidate(0, (non_empty_str('key'),))
def deleteParam(self, caller_id, key):
"""
Parameter Server: delete parameter
@param caller_id: ROS caller id
@type caller_id: str
@param key: parameter name
@type key: str
@return: [code, msg, 0]
@rtype: [int, str, int]
"""
try:
key = resolve_name(key, caller_id)
self.param_server.delete_param(key, self._notify_param_subscribers)
mloginfo("-PARAM [%s] by %s",key, caller_id)
return 1, "parameter %s deleted"%key, 0
except KeyError as e:
return -1, "parameter [%s] is not set"%key, 0
@apivalidate(0, (non_empty_str('key'), not_none('value')))
def setParam(self, caller_id, key, value):
"""
Parameter Server: set parameter. NOTE: if value is a
dictionary it will be treated as a parameter tree, where key
is the parameter namespace. For example:::
{'x':1,'y':2,'sub':{'z':3}}
will set key/x=1, key/y=2, and key/sub/z=3. Furthermore, it
will replace all existing parameters in the key parameter
namespace with the parameters in value. You must set
parameters individually if you wish to perform a union update.
@param caller_id: ROS caller id
@type caller_id: str
@param key: parameter name
@type key: str
@param value: parameter value.
@type value: XMLRPCLegalValue
@return: [code, msg, 0]
@rtype: [int, str, int]
"""
key = resolve_name(key, caller_id)
self.param_server.set_param(key, value, self._notify_param_subscribers)
mloginfo("+PARAM [%s] by %s",key, caller_id)
return 1, "parameter %s set"%key, 0
@apivalidate(0, (non_empty_str('key'),))
def getParam(self, caller_id, key):
"""
Retrieve parameter value from server.
@param caller_id: ROS caller id
@type caller_id: str
@param key: parameter to lookup. If key is a namespace,
getParam() will return a parameter tree.
@type key: str
getParam() will return a parameter tree.
@return: [code, statusMessage, parameterValue]. If code is not
1, parameterValue should be ignored. If key is a namespace,
the return value will be a dictionary, where each key is a
parameter in that namespace. Sub-namespaces are also
represented as dictionaries.
@rtype: [int, str, XMLRPCLegalValue]
"""
try:
key = resolve_name(key, caller_id)
return 1, "Parameter [%s]"%key, self.param_server.get_param(key)
except KeyError as e:
return -1, "Parameter [%s] is not set"%key, 0
@apivalidate(0, (non_empty_str('key'),))
def searchParam(self, caller_id, key):
"""
Search for parameter key on parameter server. Search starts in caller's namespace and proceeds
upwards through parent namespaces until Parameter Server finds a matching key.
searchParam's behavior is to search for the first partial match.
For example, imagine that there are two 'robot_description' parameters::
/robot_description
/robot_description/arm
/robot_description/base
/pr2/robot_description
/pr2/robot_description/base
If I start in the namespace /pr2/foo and search for
'robot_description', searchParam will match
/pr2/robot_description. If I search for 'robot_description/arm'
it will return /pr2/robot_description/arm, even though that
parameter does not exist (yet).
@param caller_id str: ROS caller id
@type caller_id: str
@param key: parameter key to search for.
@type key: str
@return: [code, statusMessage, foundKey]. If code is not 1, foundKey should be
ignored.
@rtype: [int, str, str]
"""
search_key = self.param_server.search_param(caller_id, key)
if search_key:
return 1, "Found [%s]"%search_key, search_key
else:
return -1, "Cannot find parameter [%s] in an upwards search"%key, ''
@apivalidate(0, (is_api('caller_api'), non_empty_str('key'),))
def subscribeParam(self, caller_id, caller_api, key):
"""
Retrieve parameter value from server and subscribe to updates to that param. See
paramUpdate() in the Node API.
@param caller_id str: ROS caller id
@type caller_id: str
@param key: parameter to lookup.
@type key: str
@param caller_api: API URI for paramUpdate callbacks.
@type caller_api: str
@return: [code, statusMessage, parameterValue]. If code is not
1, parameterValue should be ignored. parameterValue is an empty dictionary if the parameter
has not been set yet.
@rtype: [int, str, XMLRPCLegalValue]
"""
key = resolve_name(key, caller_id)
try:
# ps_lock has precedence and is required due to
# potential self.reg_manager modification
self.ps_lock.acquire()
val = self.param_server.subscribe_param(key, (caller_id, caller_api))
finally:
self.ps_lock.release()
return 1, "Subscribed to parameter [%s]"%key, val
@apivalidate(0, (is_api('caller_api'), non_empty_str('key'),))
def unsubscribeParam(self, caller_id, caller_api, key):
"""
Retrieve parameter value from server and subscribe to updates to that param. See
paramUpdate() in the Node API.
@param caller_id str: ROS caller id
@type caller_id: str
@param key: parameter to lookup.
@type key: str
@param caller_api: API URI for paramUpdate callbacks.
@type caller_api: str
@return: [code, statusMessage, numUnsubscribed].
If numUnsubscribed is zero it means that the caller was not subscribed to the parameter.
@rtype: [int, str, int]
"""
key = resolve_name(key, caller_id)
try:
# ps_lock is required due to potential self.reg_manager modification
self.ps_lock.acquire()
retval = self.param_server.unsubscribe_param(key, (caller_id, caller_api))
finally:
self.ps_lock.release()
return 1, "Unsubscribe to parameter [%s]"%key, 1
@apivalidate(False, (non_empty_str('key'),))
def hasParam(self, caller_id, key):
"""
Check if parameter is stored on server.
@param caller_id str: ROS caller id
@type caller_id: str
@param key: parameter to check
@type key: str
@return: [code, statusMessage, hasParam]
@rtype: [int, str, bool]
"""
key = resolve_name(key, caller_id)
if self.param_server.has_param(key):
return 1, key, True
else:
return 1, key, False
@apivalidate([])
def getParamNames(self, caller_id):
"""
Get list of all parameter names stored on this server.
This does not adjust parameter names for caller's scope.
@param caller_id: ROS caller id
@type caller_id: str
@return: [code, statusMessage, parameterNameList]
@rtype: [int, str, [str]]
"""
return 1, "Parameter names", self.param_server.get_param_names()
##################################################################################
# NOTIFICATION ROUTINES
def _notify(self, registrations, task, key, value, node_apis):
"""
Generic implementation of callback notification
@param registrations: Registrations
@type registrations: L{Registrations}
@param task: task to queue
@type task: fn
@param key: registration key
@type key: str
@param value: value to pass to task
@type value: Any
"""
# cache thread_pool for thread safety
thread_pool = self.thread_pool
if not thread_pool:
return
try:
for node_api in node_apis:
# use the api as a marker so that we limit one thread per subscriber
thread_pool.queue_task(node_api, task, (node_api, key, value))
except KeyError:
_logger.warn('subscriber data stale (key [%s], listener [%s]): node API unknown'%(key, s))
def _notify_param_subscribers(self, updates):
"""
Notify parameter subscribers of new parameter value
@param updates [([str], str, any)*]: [(subscribers, param_key, param_value)*]
@param param_value str: parameter value
"""
# cache thread_pool for thread safety
thread_pool = self.thread_pool
if not thread_pool:
return
for subscribers, key, value in updates:
# use the api as a marker so that we limit one thread per subscriber
for caller_id, caller_api in subscribers:
self.thread_pool.queue_task(caller_api, self.param_update_task, (caller_id, caller_api, key, value))
def param_update_task(self, caller_id, caller_api, param_key, param_value):
"""
Contact api.paramUpdate with specified parameters
@param caller_id: caller ID
@type caller_id: str
@param caller_api: XML-RPC URI of node to contact
@type caller_api: str
@param param_key: parameter key to pass to node
@type param_key: str
@param param_value: parameter value to pass to node
@type param_value: str
"""
mloginfo("paramUpdate[%s]", param_key)
code, _, _ = xmlrpcapi(caller_api).paramUpdate('/master', param_key, param_value)
if code == -1:
try:
# ps_lock is required due to potential self.reg_manager modification
self.ps_lock.acquire()
# reverse lookup to figure out who we just called
matches = self.reg_manager.reverse_lookup(caller_api)
for m in matches:
retval = self.param_server.unsubscribe_param(param_key, (m.id, caller_api))
finally:
self.ps_lock.release()
def _notify_topic_subscribers(self, topic, pub_uris, sub_uris):
"""
Notify subscribers with new publisher list
@param topic: name of topic
@type topic: str
@param pub_uris: list of URIs of publishers.
@type pub_uris: [str]
"""
self._notify(self.subscribers, publisher_update_task, topic, pub_uris, sub_uris)
##################################################################################
# SERVICE PROVIDER
@apivalidate(0, ( is_service('service'), is_api('service_api'), is_api('caller_api')))
def registerService(self, caller_id, service, service_api, caller_api):
"""
Register the caller as a provider of the specified service.
@param caller_id str: ROS caller id
@type caller_id: str
@param service: Fully-qualified name of service
@type service: str
@param service_api: Service URI
@type service_api: str
@param caller_api: XML-RPC URI of caller node
@type caller_api: str
@return: (code, message, ignore)
@rtype: (int, str, int)
"""
try:
self.ps_lock.acquire()
self.reg_manager.register_service(service, caller_id, caller_api, service_api)
mloginfo("+SERVICE [%s] %s %s", service, caller_id, caller_api)
finally:
self.ps_lock.release()
return 1, "Registered [%s] as provider of [%s]"%(caller_id, service), 1
@apivalidate(0, (is_service('service'),))
def lookupService(self, caller_id, service):
"""
Lookup all provider of a particular service.
@param caller_id str: ROS caller id
@type caller_id: str
@param service: fully-qualified name of service to lookup.
@type: service: str
@return: (code, message, serviceUrl). service URL is provider's
ROSRPC URI with address and port. Fails if there is no provider.
@rtype: (int, str, str)
"""
try:
self.ps_lock.acquire()
service_url = self.services.get_service_api(service)
finally:
self.ps_lock.release()
if service_url:
return 1, "rosrpc URI: [%s]"%service_url, service_url
else:
return -1, "no provider", ''
@apivalidate(0, ( is_service('service'), is_api('service_api')))
def unregisterService(self, caller_id, service, service_api):
"""
Unregister the caller as a provider of the specified service.
@param caller_id str: ROS caller id
@type caller_id: str
@param service: Fully-qualified name of service
@type service: str
@param service_api: API URI of service to unregister. Unregistration will only occur if current
registration matches.
@type service_api: str
@return: (code, message, numUnregistered). Number of unregistrations (either 0 or 1).
If this is zero it means that the caller was not registered as a service provider.
The call still succeeds as the intended final state is reached.
@rtype: (int, str, int)
"""
try:
self.ps_lock.acquire()
retval = self.reg_manager.unregister_service(service, caller_id, service_api)
mloginfo("-SERVICE [%s] %s %s", service, caller_id, service_api)
return retval
finally:
self.ps_lock.release()
##################################################################################
# PUBLISH/SUBSCRIBE
@apivalidate(0, ( is_topic('topic'), valid_type_name('topic_type'), is_api('caller_api')))
def registerSubscriber(self, caller_id, topic, topic_type, caller_api):
"""
Subscribe the caller to the specified topic. In addition to receiving
a list of current publishers, the subscriber will also receive notifications
of new publishers via the publisherUpdate API.
@param caller_id: ROS caller id
@type caller_id: str
@param topic str: Fully-qualified name of topic to subscribe to.
@param topic_type: Datatype for topic. Must be a package-resource name, i.e. the .msg name.
@type topic_type: str
@param caller_api: XML-RPC URI of caller node for new publisher notifications
@type caller_api: str
@return: (code, message, publishers). Publishers is a list of XMLRPC API URIs
for nodes currently publishing the specified topic.
@rtype: (int, str, [str])
"""
#NOTE: subscribers do not get to set topic type
try:
self.ps_lock.acquire()
self.reg_manager.register_subscriber(topic, caller_id, caller_api)
# ROS 1.1: subscriber can now set type if it is not already set
# - don't let '*' type squash valid typing
if not topic in self.topics_types and topic_type != rosgraph.names.ANYTYPE:
self.topics_types[topic] = topic_type
mloginfo("+SUB [%s] %s %s",topic, caller_id, caller_api)
pub_uris = self.publishers.get_apis(topic)
finally:
self.ps_lock.release()
return 1, "Subscribed to [%s]"%topic, pub_uris
@apivalidate(0, (is_topic('topic'), is_api('caller_api')))
def unregisterSubscriber(self, caller_id, topic, caller_api):
"""
Unregister the caller as a publisher of the topic.
@param caller_id: ROS caller id
@type caller_id: str
@param topic: Fully-qualified name of topic to unregister.
@type topic: str
@param caller_api: API URI of service to unregister. Unregistration will only occur if current
registration matches.
@type caller_api: str
@return: (code, statusMessage, numUnsubscribed).
If numUnsubscribed is zero it means that the caller was not registered as a subscriber.
The call still succeeds as the intended final state is reached.
@rtype: (int, str, int)
"""
try:
self.ps_lock.acquire()
retval = self.reg_manager.unregister_subscriber(topic, caller_id, caller_api)
mloginfo("-SUB [%s] %s %s",topic, caller_id, caller_api)
return retval
finally:
self.ps_lock.release()
@apivalidate(0, ( is_topic('topic'), valid_type_name('topic_type'), is_api('caller_api')))
def registerPublisher(self, caller_id, topic, topic_type, caller_api):
"""
Register the caller as a publisher the topic.
@param caller_id: ROS caller id
@type caller_id: str
@param topic: Fully-qualified name of topic to register.
@type topic: str
@param topic_type: Datatype for topic. Must be a
package-resource name, i.e. the .msg name.
@type topic_type: str
@param caller_api str: ROS caller XML-RPC API URI
@type caller_api: str
@return: (code, statusMessage, subscriberApis).
List of current subscribers of topic in the form of XMLRPC URIs.
@rtype: (int, str, [str])
"""
#NOTE: we need topic_type for getPublishedTopics.
try:
self.ps_lock.acquire()
self.reg_manager.register_publisher(topic, caller_id, caller_api)
# don't let '*' type squash valid typing
if topic_type != rosgraph.names.ANYTYPE or not topic in self.topics_types:
self.topics_types[topic] = topic_type
pub_uris = self.publishers.get_apis(topic)
sub_uris = self.subscribers.get_apis(topic)
self._notify_topic_subscribers(topic, pub_uris, sub_uris)
mloginfo("+PUB [%s] %s %s",topic, caller_id, caller_api)
sub_uris = self.subscribers.get_apis(topic)
finally:
self.ps_lock.release()
return 1, "Registered [%s] as publisher of [%s]"%(caller_id, topic), sub_uris
@apivalidate(0, (is_topic('topic'), is_api('caller_api')))
def unregisterPublisher(self, caller_id, topic, caller_api):
"""
Unregister the caller as a publisher of the topic.
@param caller_id: ROS caller id
@type caller_id: str
@param topic: Fully-qualified name of topic to unregister.
@type topic: str
@param caller_api str: API URI of service to
unregister. Unregistration will only occur if current
registration matches.
@type caller_api: str
@return: (code, statusMessage, numUnregistered).
If numUnregistered is zero it means that the caller was not registered as a publisher.
The call still succeeds as the intended final state is reached.
@rtype: (int, str, int)
"""
try:
self.ps_lock.acquire()
retval = self.reg_manager.unregister_publisher(topic, caller_id, caller_api)
if retval[VAL]:
self._notify_topic_subscribers(topic, self.publishers.get_apis(topic), self.subscribers.get_apis(topic))
mloginfo("-PUB [%s] %s %s",topic, caller_id, caller_api)
finally:
self.ps_lock.release()
return retval
##################################################################################
# GRAPH STATE APIS
@apivalidate('', (valid_name('node'),))
def lookupNode(self, caller_id, node_name):
"""
Get the XML-RPC URI of the node with the associated
name/caller_id. This API is for looking information about
publishers and subscribers. Use lookupService instead to lookup
ROS-RPC URIs.
@param caller_id: ROS caller id
@type caller_id: str
@param node: name of node to lookup
@type node: str
@return: (code, msg, URI)
@rtype: (int, str, str)
"""
try:
self.ps_lock.acquire()
node = self.reg_manager.get_node(node_name)
if node is not None:
retval = 1, "node api", node.api
else:
retval = -1, "unknown node [%s]"%node_name, ''
finally:
self.ps_lock.release()
return retval
@apivalidate(0, (empty_or_valid_name('subgraph'),))
def getPublishedTopics(self, caller_id, subgraph):
"""
Get list of topics that can be subscribed to. This does not return topics that have no publishers.
See L{getSystemState()} to get more comprehensive list.
@param caller_id: ROS caller id
@type caller_id: str
@param subgraph: Restrict topic names to match within the specified subgraph. Subgraph namespace
is resolved relative to the caller's namespace. Use '' to specify all names.
@type subgraph: str
@return: (code, msg, [[topic1, type1]...[topicN, typeN]])
@rtype: (int, str, [[str, str],])
"""
try:
self.ps_lock.acquire()
# force subgraph to be a namespace with trailing slash
if subgraph and subgraph[-1] != rosgraph.names.SEP:
subgraph = subgraph + rosgraph.names.SEP
#we don't bother with subscribers as subscribers don't report topic types. also, the intended
#use case is for subscribe-by-topic-type
retval = [[t, self.topics_types[t]] for t in self.publishers.iterkeys() if t.startswith(subgraph)]
finally:
self.ps_lock.release()
return 1, "current topics", retval
@apivalidate([])
def getTopicTypes(self, caller_id):
"""
Retrieve list topic names and their types.
@param caller_id: ROS caller id
@type caller_id: str
@rtype: (int, str, [[str,str]] )
@return: (code, statusMessage, topicTypes). topicTypes is a list of [topicName, topicType] pairs.
"""
try:
self.ps_lock.acquire()
retval = list(self.topics_types.items())
finally:
self.ps_lock.release()
return 1, "current system state", retval
@apivalidate([[],[], []])
def getSystemState(self, caller_id):
"""
Retrieve list representation of system state (i.e. publishers, subscribers, and services).
@param caller_id: ROS caller id
@type caller_id: str
@rtype: (int, str, [[str,[str]], [str,[str]], [str,[str]]])
@return: (code, statusMessage, systemState).
System state is in list representation::
[publishers, subscribers, services].
publishers is of the form::
[ [topic1, [topic1Publisher1...topic1PublisherN]] ... ]
subscribers is of the form::
[ [topic1, [topic1Subscriber1...topic1SubscriberN]] ... ]
services is of the form::
[ [service1, [service1Provider1...service1ProviderN]] ... ]
"""
edges = []
try:
self.ps_lock.acquire()
retval = [r.get_state() for r in (self.publishers, self.subscribers, self.services)]
finally:
self.ps_lock.release()
return 1, "current system state", retval
| {
"content_hash": "cc29dfc26dacd817742a06f05addfb71",
"timestamp": "",
"source": "github",
"line_count": 846,
"max_line_length": 165,
"avg_line_length": 40.523640661938536,
"alnum_prop": 0.5858005425429513,
"repo_name": "MangoMangoDevelopment/neptune",
"id": "bf78a889e0c527d83a45cedd5c525f3b9534112c",
"size": "35905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/ros_comm-1.12.0/tools/rosmaster/src/rosmaster/master_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "640"
},
{
"name": "Batchfile",
"bytes": "118"
},
{
"name": "C",
"bytes": "80165"
},
{
"name": "C#",
"bytes": "692379"
},
{
"name": "C++",
"bytes": "1997489"
},
{
"name": "CMake",
"bytes": "327588"
},
{
"name": "EmberScript",
"bytes": "44956"
},
{
"name": "GLSL",
"bytes": "9781"
},
{
"name": "HTML",
"bytes": "12555"
},
{
"name": "Makefile",
"bytes": "25846"
},
{
"name": "Objective-C",
"bytes": "11275"
},
{
"name": "Python",
"bytes": "4265203"
},
{
"name": "Shell",
"bytes": "31793"
}
],
"symlink_target": ""
} |
import sys
from setuptools import setup
packages = ['BacklogPy', 'BacklogPy.api']
requires = [
'requests',
'Deprecated'
]
test_requirements = ['mock', 'nose']
if sys.version_info < (2, 7, 0):
test_requirements.append('unittest2')
with open('LICENSE', 'r') as f:
_license = f.read()
with open('README.rst', 'r') as f:
readme = f.read()
use_scm_version = {'write_to': 'BacklogPy/version.py'}
setup(
name='BacklogPy',
use_scm_version=use_scm_version,
extras_require={'deploy': ['wheel', 'twine']},
description='Backlog API v2 Client Library',
long_description=readme,
author='Koudai Aono',
author_email='koxudaxi@gmail.com',
url='https://github.com/koxudaxi/BacklogPy',
packages=packages,
data_files=[('', ['LICENSE', 'README.rst'])],
package_dir={'BacklogPy': 'BacklogPy'},
include_package_data=True,
install_requires=requires,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
],
setup_requires=['nose>=1.0'],
tests_require=test_requirements,
)
| {
"content_hash": "c395af048adb417e865fa53ea2aa75b5",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 70,
"avg_line_length": 28.982142857142858,
"alnum_prop": 0.6149106592729513,
"repo_name": "koxudaxi/BacklogPy",
"id": "0c29c4d8d0dd93aa2c8c3b746a294373b537dca0",
"size": "1648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "226976"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.