gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import pickle
import time
import datetime
import unittest
import locale
from nose.plugins import skip
from tests import utils
from freezegun import freeze_time
from freezegun.api import FakeDatetime, FakeDate, real_date
class temp_locale(object):
"""Temporarily change the locale."""
def __init__(self, *targets):
self.targets = targets
def __enter__(self):
self.old = locale.setlocale(locale.LC_ALL)
for target in self.targets:
try:
locale.setlocale(locale.LC_ALL, target)
return
except locale.Error:
pass
msg = 'could not set locale to any of: %s' % ', '.join(self.targets)
raise skip.SkipTest(msg)
def __exit__(self, *args):
locale.setlocale(locale.LC_ALL, self.old)
# Small sample of locales where '%x' expands to a dd/mm/yyyy string,
# which can cause trouble when parsed with dateutil.
_dd_mm_yyyy_locales = ['da_DK.UTF-8', 'de_DE.UTF-8', 'fr_FR.UTF-8']
def test_simple_api():
# time to freeze is always provided in UTC
freezer = freeze_time("2012-01-14")
# expected timestamp must be a timestamp, corresponding to 2012-01-14 UTC
local_time = datetime.datetime(2012, 1, 14)
utc_time = local_time - datetime.timedelta(seconds=time.timezone)
expected_timestamp = time.mktime(utc_time.timetuple())
freezer.start()
assert time.time() == expected_timestamp
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
assert datetime.datetime.utcnow() == datetime.datetime(2012, 1, 14)
assert datetime.date.today() == datetime.date(2012, 1, 14)
assert datetime.datetime.now().today() == datetime.datetime(2012, 1, 14)
freezer.stop()
assert time.time() != expected_timestamp
assert datetime.datetime.now() != datetime.datetime(2012, 1, 14)
assert datetime.datetime.utcnow() != datetime.datetime(2012, 1, 14)
freezer = freeze_time("2012-01-10 13:52:01")
freezer.start()
assert datetime.datetime.now() == datetime.datetime(2012, 1, 10, 13, 52, 1)
freezer.stop()
def test_tz_offset():
freezer = freeze_time("2012-01-14 03:21:34", tz_offset=-4)
# expected timestamp must be a timestamp,
# corresponding to 2012-01-14 03:21:34 UTC
# and it doesn't depend on tz_offset
local_time = datetime.datetime(2012, 1, 14, 3, 21, 34)
utc_time = local_time - datetime.timedelta(seconds=time.timezone)
expected_timestamp = time.mktime(utc_time.timetuple())
freezer.start()
assert datetime.datetime.now() == datetime.datetime(2012, 1, 13, 23, 21, 34)
assert datetime.datetime.utcnow() == datetime.datetime(2012, 1, 14, 3, 21, 34)
assert time.time() == expected_timestamp
freezer.stop()
def test_tz_offset_with_today():
freezer = freeze_time("2012-01-14", tz_offset=-4)
freezer.start()
assert datetime.date.today() == datetime.date(2012, 1, 13)
freezer.stop()
assert datetime.date.today() != datetime.date(2012, 1, 13)
def test_zero_tz_offset_with_time():
# we expect the system to behave like a system with UTC timezone
# at the beginning of the Epoch
freezer = freeze_time('1970-01-01')
freezer.start()
assert datetime.date.today() == datetime.date(1970, 1, 1)
assert datetime.datetime.now() == datetime.datetime(1970, 1, 1)
assert datetime.datetime.utcnow() == datetime.datetime(1970, 1, 1)
assert time.time() == 0.0
freezer.stop()
def test_tz_offset_with_time():
# we expect the system to behave like a system with UTC-4 timezone
# at the beginning of the Epoch (wall clock should be 4 hrs late)
freezer = freeze_time('1970-01-01', tz_offset=-4)
freezer.start()
assert datetime.date.today() == datetime.date(1969, 12, 31)
assert datetime.datetime.now() == datetime.datetime(1969, 12, 31, 20)
assert datetime.datetime.utcnow() == datetime.datetime(1970, 1, 1)
assert time.time() == 0.0
freezer.stop()
def test_time_with_microseconds():
freezer = freeze_time(datetime.datetime(1970, 1, 1, 0, 0, 1, 123456))
freezer.start()
assert time.time() == 1.123456
freezer.stop()
def test_bad_time_argument():
try:
freeze_time("2012-13-14", tz_offset=-4)
except ValueError:
pass
else:
assert False, "Bad values should raise a ValueError"
def test_date_object():
frozen_date = datetime.date(year=2012, month=11, day=10)
date_freezer = freeze_time(frozen_date)
regular_freezer = freeze_time('2012-11-10')
assert date_freezer.time_to_freeze == regular_freezer.time_to_freeze
def test_date_with_locale():
with temp_locale(*_dd_mm_yyyy_locales):
frozen_date = datetime.date(year=2012, month=1, day=2)
date_freezer = freeze_time(frozen_date)
assert date_freezer.time_to_freeze.date() == frozen_date
def test_invalid_type():
try:
freeze_time(int(4))
except TypeError:
pass
else:
assert False, "Bad types should raise a TypeError"
def test_datetime_object():
frozen_datetime = datetime.datetime(year=2012, month=11, day=10,
hour=4, minute=15, second=30)
datetime_freezer = freeze_time(frozen_datetime)
regular_freezer = freeze_time('2012-11-10 04:15:30')
assert datetime_freezer.time_to_freeze == regular_freezer.time_to_freeze
def test_datetime_with_locale():
with temp_locale(*_dd_mm_yyyy_locales):
frozen_datetime = datetime.datetime(year=2012, month=1, day=2)
date_freezer = freeze_time(frozen_datetime)
assert date_freezer.time_to_freeze == frozen_datetime
@freeze_time("2012-01-14")
def test_decorator():
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
@freeze_time("2012-01-14")
class Tester(object):
def test_the_class(self):
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
def test_still_the_same(self):
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
@freeze_time("Jan 14th, 2012")
def test_nice_datetime():
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
def test_context_manager():
with freeze_time("2012-01-14"):
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14)
assert datetime.datetime.now() != datetime.datetime(2012, 1, 14)
def test_nested_context_manager():
with freeze_time("2012-01-14"):
with freeze_time("2012-12-25"):
_assert_datetime_date_and_time_are_all_equal(datetime.datetime(2012, 12, 25))
_assert_datetime_date_and_time_are_all_equal(datetime.datetime(2012, 1, 14))
assert datetime.datetime.now() > datetime.datetime(2013, 1, 1)
def _assert_datetime_date_and_time_are_all_equal(expected_datetime):
assert datetime.datetime.now() == expected_datetime
assert datetime.date.today() == expected_datetime.date()
datetime_from_time = datetime.datetime.fromtimestamp(time.time())
timezone_adjusted_datetime = datetime_from_time + datetime.timedelta(seconds=time.timezone)
assert timezone_adjusted_datetime == expected_datetime
def test_nested_context_manager_with_tz_offsets():
with freeze_time("2012-01-14 23:00:00", tz_offset=2):
with freeze_time("2012-12-25 19:00:00", tz_offset=6):
assert datetime.datetime.now() == datetime.datetime(2012, 12, 26, 1)
assert datetime.date.today() == datetime.date(2012, 12, 26)
#no assertion for time.time() since it's not affected by tz_offset
assert datetime.datetime.now() == datetime.datetime(2012, 1, 15, 1)
assert datetime.date.today() == datetime.date(2012, 1, 15)
assert datetime.datetime.now() > datetime.datetime(2013, 1, 1)
@freeze_time("Jan 14th, 2012")
def test_isinstance_with_active():
now = datetime.datetime.now()
assert utils.is_fake_datetime(now)
today = datetime.date.today()
assert utils.is_fake_date(today)
def test_isinstance_without_active():
now = datetime.datetime.now()
assert isinstance(now, datetime.datetime)
assert isinstance(now, datetime.date)
today = datetime.date.today()
assert isinstance(today, datetime.date)
@freeze_time('2013-04-09')
class TestUnitTestClassDecorator(unittest.TestCase):
def test_class_decorator_works_on_unittest(self):
self.assertEqual(datetime.date(2013,4,9), datetime.date.today())
@freeze_time('2013-04-09')
class TestUnitTestClassDecoratorWithSetup(unittest.TestCase):
def setUp(self):
pass
def test_class_decorator_works_on_unittest(self):
self.assertEqual(datetime.date(2013,4,9), datetime.date.today())
def assert_class_of_datetimes(right_class, wrong_class):
datetime.datetime.min.__class__.should.equal(right_class)
datetime.datetime.max.__class__.should.equal(right_class)
datetime.date.min.__class__.should.equal(right_class)
datetime.date.max.__class__.should.equal(right_class)
datetime.datetime.min.__class__.shouldnt.equal(wrong_class)
datetime.datetime.max.__class__.shouldnt.equal(wrong_class)
datetime.date.min.__class__.shouldnt.equal(wrong_class)
datetime.date.max.__class__.shouldnt.equal(wrong_class)
def test_min_and_max():
freezer = freeze_time("2012-01-14")
real_datetime = datetime
freezer.start()
datetime.datetime.min.__class__.should.equal(FakeDatetime)
datetime.datetime.max.__class__.should.equal(FakeDatetime)
datetime.date.min.__class__.should.equal(FakeDate)
datetime.date.max.__class__.should.equal(FakeDate)
datetime.datetime.min.__class__.shouldnt.equal(real_datetime)
datetime.datetime.max.__class__.shouldnt.equal(real_datetime)
datetime.date.min.__class__.shouldnt.equal(real_date)
datetime.date.max.__class__.shouldnt.equal(real_date)
freezer.stop()
datetime.datetime.min.__class__.should.equal(datetime.datetime)
datetime.datetime.max.__class__.should.equal(datetime.datetime)
datetime.date.min.__class__.should.equal(datetime.date)
datetime.date.max.__class__.should.equal(datetime.date)
datetime.datetime.min.__class__.shouldnt.equal(FakeDatetime)
datetime.datetime.max.__class__.shouldnt.equal(FakeDatetime)
datetime.date.min.__class__.shouldnt.equal(FakeDate)
datetime.date.max.__class__.shouldnt.equal(FakeDate)
def assert_pickled_datetimes_equal_original():
min_datetime = datetime.datetime.min
max_datetime = datetime.datetime.max
min_date = datetime.date.min
max_date = datetime.date.max
now = datetime.datetime.now()
today = datetime.date.today()
utc_now = datetime.datetime.utcnow()
assert pickle.loads(pickle.dumps(min_datetime)) == min_datetime
assert pickle.loads(pickle.dumps(max_datetime)) == max_datetime
assert pickle.loads(pickle.dumps(min_date)) == min_date
assert pickle.loads(pickle.dumps(max_date)) == max_date
assert pickle.loads(pickle.dumps(now)) == now
assert pickle.loads(pickle.dumps(today)) == today
assert pickle.loads(pickle.dumps(utc_now)) == utc_now
def test_pickle():
freezer = freeze_time("2012-01-14")
freezer.start()
assert_pickled_datetimes_equal_original()
freezer.stop()
assert_pickled_datetimes_equal_original()
@freeze_time("2014-07-30T01:00:00Z")
def test_freeze_with_timezone_aware_datetime_in_utc():
"""
utcnow() should always return a timezone naive datetime
"""
utc_now = datetime.datetime.utcnow()
assert utc_now.tzinfo == None
@freeze_time("1970-01-01T00:00:00-04:00")
def test_freeze_with_timezone_aware_datetime_in_non_utc():
"""
we expect the system to behave like a system with UTC-4 timezone
at the beginning of the Epoch (wall clock should be 4 hrs late)
"""
utc_now = datetime.datetime.utcnow()
assert utc_now.tzinfo == None
assert utc_now == datetime.datetime(1969, 12, 31, 20)
|
|
#! /usr/bin/env python
#Note this file (model.py) is the same as that in Benchmarks/Pilot1/Uno/uno_baseline_keras2.py except with the following change::
#
#- unoBmk = benchmark.BenchmarkUno(benchmark.file_path, 'uno_default_model.txt', 'keras',
#+ #mymodel_common = candle.Benchmark(file_path,os.getenv("DEFAULT_PARAMS_FILE"),'keras',prog='myprog',desc='My model')
#+ unoBmk = benchmark.BenchmarkUno(benchmark.file_path, os.getenv("DEFAULT_PARAMS_FILE"), 'keras',
from __future__ import division, print_function
import argparse
import collections
import logging
import os
import random
import threading
import numpy as np
import pandas as pd
import keras
from keras import backend as K
from keras import optimizers
from keras.models import Model
from keras.layers import Input, Dense, Dropout
from keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler, TensorBoard
from keras.utils import get_custom_objects
from keras.utils.vis_utils import plot_model
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from scipy.stats.stats import pearsonr
# For non-interactive plotting
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import uno as benchmark
import candle_keras as candle
import uno_data
from uno_data import CombinedDataLoader, CombinedDataGenerator
logger = logging.getLogger(__name__)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def set_seed(seed):
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
random.seed(seed)
if K.backend() == 'tensorflow':
import tensorflow as tf
tf.set_random_seed(seed)
# session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
# Uncommit when running on an optimized tensorflow where NUM_INTER_THREADS and
# NUM_INTRA_THREADS env vars are set.
# session_conf = tf.ConfigProto(inter_op_parallelism_threads=int(os.environ['NUM_INTER_THREADS']),
# intra_op_parallelism_threads=int(os.environ['NUM_INTRA_THREADS']))
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
def verify_path(path):
folder = os.path.dirname(path)
if folder and not os.path.exists(folder):
os.makedirs(folder)
def set_up_logger(logfile, verbose):
verify_path(logfile)
fh = logging.FileHandler(logfile)
fh.setFormatter(logging.Formatter("[%(asctime)s %(process)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S"))
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(''))
sh.setLevel(logging.DEBUG if verbose else logging.INFO)
for log in [logger, uno_data.logger]:
log.setLevel(logging.DEBUG)
log.addHandler(fh)
log.addHandler(sh)
def extension_from_parameters(args):
"""Construct string for saving model with annotation of parameters"""
ext = ''
ext += '.A={}'.format(args.activation)
ext += '.B={}'.format(args.batch_size)
ext += '.E={}'.format(args.epochs)
ext += '.O={}'.format(args.optimizer)
# ext += '.LEN={}'.format(args.maxlen)
ext += '.LR={}'.format(args.learning_rate)
ext += '.CF={}'.format(''.join([x[0] for x in sorted(args.cell_features)]))
ext += '.DF={}'.format(''.join([x[0] for x in sorted(args.drug_features)]))
if args.feature_subsample > 0:
ext += '.FS={}'.format(args.feature_subsample)
if args.drop > 0:
ext += '.DR={}'.format(args.drop)
if args.warmup_lr:
ext += '.wu_lr'
if args.reduce_lr:
ext += '.re_lr'
if args.residual:
ext += '.res'
if args.use_landmark_genes:
ext += '.L1000'
if args.no_gen:
ext += '.ng'
for i, n in enumerate(args.dense):
if n > 0:
ext += '.D{}={}'.format(i+1, n)
if args.dense_feature_layers != args.dense:
for i, n in enumerate(args.dense):
if n > 0:
ext += '.FD{}={}'.format(i+1, n)
return ext
def discretize(y, bins=5):
percentiles = [100 / bins * (i + 1) for i in range(bins - 1)]
thresholds = [np.percentile(y, x) for x in percentiles]
classes = np.digitize(y, thresholds)
return classes
def r2(y_true, y_pred):
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res/(SS_tot + K.epsilon()))
def mae(y_true, y_pred):
return keras.metrics.mean_absolute_error(y_true, y_pred)
def evaluate_prediction(y_true, y_pred):
mse = mean_squared_error(y_true, y_pred)
mae = mean_absolute_error(y_true, y_pred)
r2 = r2_score(y_true, y_pred)
corr, _ = pearsonr(y_true, y_pred)
return {'mse': mse, 'mae': mae, 'r2': r2, 'corr': corr}
def log_evaluation(metric_outputs, description='Comparing y_true and y_pred:'):
logger.info(description)
for metric, value in metric_outputs.items():
logger.info(' {}: {:.4f}'.format(metric, value))
def plot_history(out, history, metric='loss', title=None):
title = title or 'model {}'.format(metric)
val_metric = 'val_{}'.format(metric)
plt.figure(figsize=(8, 6))
plt.plot(history.history[metric], marker='o')
plt.plot(history.history[val_metric], marker='d')
plt.title(title)
plt.ylabel(metric)
plt.xlabel('epoch')
plt.legend(['train_{}'.format(metric), 'val_{}'.format(metric)], loc='upper center')
png = '{}.plot.{}.png'.format(out, metric)
plt.savefig(png, bbox_inches='tight')
class LoggingCallback(Callback):
def __init__(self, print_fcn=print):
Callback.__init__(self)
self.print_fcn = print_fcn
def on_epoch_end(self, epoch, logs={}):
msg = "[Epoch: %i] %s" % (epoch, ", ".join("%s: %f" % (k, v) for k, v in sorted(logs.items())))
self.print_fcn(msg)
class PermanentDropout(Dropout):
def __init__(self, rate, **kwargs):
super(PermanentDropout, self).__init__(rate, **kwargs)
self.uses_learning_phase = False
def call(self, x, mask=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(x)
x = K.dropout(x, self.rate, noise_shape)
return x
class ModelRecorder(Callback):
def __init__(self, save_all_models=False):
Callback.__init__(self)
self.save_all_models = save_all_models
get_custom_objects()['PermanentDropout'] = PermanentDropout
def on_train_begin(self, logs={}):
self.val_losses = []
self.best_val_loss = np.Inf
self.best_model = None
def on_epoch_end(self, epoch, logs={}):
val_loss = logs.get('val_loss')
self.val_losses.append(val_loss)
if val_loss < self.best_val_loss:
self.best_model = keras.models.clone_model(self.model)
self.best_val_loss = val_loss
def build_feature_model(input_shape, name='', dense_layers=[1000, 1000],
activation='relu', residual=False,
dropout_rate=0, permanent_dropout=True):
x_input = Input(shape=input_shape)
h = x_input
for i, layer in enumerate(dense_layers):
x = h
h = Dense(layer, activation=activation)(h)
if dropout_rate > 0:
if permanent_dropout:
h = PermanentDropout(dropout_rate)(h)
else:
h = Dropout(dropout_rate)(h)
if residual:
try:
h = keras.layers.add([h, x])
except ValueError:
pass
model = Model(x_input, h, name=name)
return model
def build_model(loader, args, permanent_dropout=True, silent=False):
input_models = {}
dropout_rate = args.drop
for fea_type, shape in loader.feature_shapes.items():
base_type = fea_type.split('.')[0]
if base_type in ['cell', 'drug']:
box = build_feature_model(input_shape=shape, name=fea_type,
dense_layers=args.dense_feature_layers,
dropout_rate=dropout_rate, permanent_dropout=permanent_dropout)
if not silent:
logger.debug('Feature encoding submodel for %s:', fea_type)
box.summary(print_fn=logger.debug)
input_models[fea_type] = box
inputs = []
encoded_inputs = []
for fea_name, fea_type in loader.input_features.items():
shape = loader.feature_shapes[fea_type]
fea_input = Input(shape, name='input.'+fea_name)
inputs.append(fea_input)
if fea_type in input_models:
input_model = input_models[fea_type]
encoded = input_model(fea_input)
else:
encoded = fea_input
encoded_inputs.append(encoded)
merged = keras.layers.concatenate(encoded_inputs)
h = merged
for i, layer in enumerate(args.dense):
x = h
h = Dense(layer, activation=args.activation)(h)
if dropout_rate > 0:
if permanent_dropout:
h = PermanentDropout(dropout_rate)(h)
else:
h = Dropout(dropout_rate)(h)
if args.residual:
try:
h = keras.layers.add([h, x])
except ValueError:
pass
output = Dense(1)(h)
return Model(inputs, output)
def initialize_parameters():
# Build benchmark object
#mymodel_common = candle.Benchmark(file_path,os.getenv("DEFAULT_PARAMS_FILE"),'keras',prog='myprog',desc='My model')
unoBmk = benchmark.BenchmarkUno(benchmark.file_path, os.getenv("DEFAULT_PARAMS_FILE"), 'keras',
prog='uno_baseline', desc='Build neural network based models to predict tumor response to single and paired drugs.')
# Initialize parameters
gParameters = candle.initialize_parameters(unoBmk)
#benchmark.logger.info('Params: {}'.format(gParameters))
return gParameters
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
def run(params):
args = Struct(**params)
set_seed(args.rng_seed)
ext = extension_from_parameters(args)
verify_path(args.save)
prefix = args.save + ext
logfile = args.logfile if args.logfile else prefix+'.log'
set_up_logger(logfile, args.verbose)
logger.info('Params: {}'.format(params))
loader = CombinedDataLoader(seed=args.rng_seed)
loader.load(cache=args.cache,
ncols=args.feature_subsample,
cell_features=args.cell_features,
drug_features=args.drug_features,
drug_median_response_min=args.drug_median_response_min,
drug_median_response_max=args.drug_median_response_max,
use_landmark_genes=args.use_landmark_genes,
use_filtered_genes=args.use_filtered_genes,
preprocess_rnaseq=args.preprocess_rnaseq,
single=args.single,
train_sources=args.train_sources,
test_sources=args.test_sources,
embed_feature_source=not args.no_feature_source,
encode_response_source=not args.no_response_source,
)
val_split = args.validation_split
train_split = 1 - val_split
if args.export_data:
fname = args.export_data
loader.partition_data(cv_folds=args.cv, train_split=train_split, val_split=val_split,
cell_types=args.cell_types, by_cell=args.by_cell, by_drug=args.by_drug)
train_gen = CombinedDataGenerator(loader, batch_size=args.batch_size, shuffle=args.shuffle)
val_gen = CombinedDataGenerator(loader, partition='val', batch_size=args.batch_size, shuffle=args.shuffle)
x_train_list, y_train = train_gen.get_slice(size=train_gen.size, dataframe=True, single=args.single)
x_val_list, y_val = val_gen.get_slice(size=val_gen.size, dataframe=True, single=args.single)
df_train = pd.concat([y_train] + x_train_list, axis=1)
df_val = pd.concat([y_val] + x_val_list, axis=1)
df = pd.concat([df_train, df_val]).reset_index(drop=True)
if args.growth_bins > 1:
df = uno_data.discretize(df, 'Growth', bins=args.growth_bins)
df.to_csv(fname, sep='\t', index=False, float_format="%.3g")
return
loader.partition_data(cv_folds=args.cv, train_split=train_split, val_split=val_split,
cell_types=args.cell_types, by_cell=args.by_cell, by_drug=args.by_drug)
model = build_model(loader, args)
logger.info('Combined model:')
model.summary(print_fn=logger.info)
# plot_model(model, to_file=prefix+'.model.png', show_shapes=True)
if args.cp:
model_json = model.to_json()
with open(prefix+'.model.json', 'w') as f:
print(model_json, file=f)
def warmup_scheduler(epoch):
lr = args.learning_rate or base_lr * args.batch_size/100
if epoch <= 5:
K.set_value(model.optimizer.lr, (base_lr * (5-epoch) + lr * epoch) / 5)
logger.debug('Epoch {}: lr={:.5g}'.format(epoch, K.get_value(model.optimizer.lr)))
return K.get_value(model.optimizer.lr)
df_pred_list = []
cv_ext = ''
cv = args.cv if args.cv > 1 else 1
for fold in range(cv):
if args.cv > 1:
logger.info('Cross validation fold {}/{}:'.format(fold+1, cv))
cv_ext = '.cv{}'.format(fold+1)
model = build_model(loader, args, silent=True)
optimizer = optimizers.deserialize({'class_name': args.optimizer, 'config': {}})
base_lr = args.base_lr or K.get_value(optimizer.lr)
if args.learning_rate:
K.set_value(optimizer.lr, args.learning_rate)
model.compile(loss=args.loss, optimizer=optimizer, metrics=[mae, r2])
# calculate trainable and non-trainable params
params.update(candle.compute_trainable_params(model))
candle_monitor = candle.CandleRemoteMonitor(params=params)
timeout_monitor = candle.TerminateOnTimeOut(params['timeout'])
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.00001)
warmup_lr = LearningRateScheduler(warmup_scheduler)
checkpointer = ModelCheckpoint(prefix+cv_ext+'.weights.h5', save_best_only=True, save_weights_only=True)
tensorboard = TensorBoard(log_dir="tb/tb{}{}".format(ext, cv_ext))
history_logger = LoggingCallback(logger.debug)
model_recorder = ModelRecorder()
# callbacks = [history_logger, model_recorder]
callbacks = [candle_monitor, timeout_monitor, history_logger, model_recorder]
if args.reduce_lr:
callbacks.append(reduce_lr)
if args.warmup_lr:
callbacks.append(warmup_lr)
if args.cp:
callbacks.append(checkpointer)
if args.tb:
callbacks.append(tensorboard)
train_gen = CombinedDataGenerator(loader, fold=fold, batch_size=args.batch_size, shuffle=args.shuffle)
val_gen = CombinedDataGenerator(loader, partition='val', fold=fold, batch_size=args.batch_size, shuffle=args.shuffle)
df_val = val_gen.get_response(copy=True)
y_val = df_val['Growth'].values
y_shuf = np.random.permutation(y_val)
log_evaluation(evaluate_prediction(y_val, y_shuf),
description='Between random pairs in y_val:')
if args.no_gen:
x_train_list, y_train = train_gen.get_slice(size=train_gen.size, single=args.single)
x_val_list, y_val = val_gen.get_slice(size=val_gen.size, single=args.single)
history = model.fit(x_train_list, y_train,
batch_size=args.batch_size,
epochs=args.epochs,
callbacks=callbacks,
validation_data=(x_val_list, y_val))
else:
logger.info('Data points per epoch: train = %d, val = %d',train_gen.size, val_gen.size)
logger.info('Steps per epoch: train = %d, val = %d',train_gen.steps, val_gen.steps)
history = model.fit_generator(train_gen.flow(single=args.single), train_gen.steps,
epochs=args.epochs,
callbacks=callbacks,
validation_data=val_gen.flow(single=args.single),
validation_steps=val_gen.steps)
if args.cp:
model.load_weights(prefix+cv_ext+'.weights.h5')
# model = model_recorder.best_model
if args.no_gen:
y_val_pred = model.predict(x_val_list, batch_size=args.batch_size)
else:
val_gen.reset()
y_val_pred = model.predict_generator(val_gen.flow(single=args.single), val_gen.steps)
y_val_pred = y_val_pred[:val_gen.size]
y_val_pred = y_val_pred.flatten()
scores = evaluate_prediction(y_val, y_val_pred)
log_evaluation(scores)
df_val = df_val.assign(PredictedGrowth=y_val_pred, GrowthError=y_val_pred-y_val)
df_pred_list.append(df_val)
plot_history(prefix, history, 'loss')
plot_history(prefix, history, 'r2')
pred_fname = prefix + '.predicted.tsv'
df_pred = pd.concat(df_pred_list)
df_pred.sort_values(['Source', 'Sample', 'Drug1', 'Drug2', 'Dose1', 'Dose2', 'Growth'], inplace=True)
df_pred.to_csv(pred_fname, sep='\t', index=False, float_format='%.4g')
if args.cv > 1:
scores = evaluate_prediction(df_pred['Growth'], df_pred['PredictedGrowth'])
log_evaluation(scores, description='Combining cross validation folds:')
for test_source in loader.test_sep_sources:
test_gen = CombinedDataGenerator(loader, partition='test', batch_size=args.batch_size, source=test_source)
df_test = test_gen.get_response(copy=True)
y_test = df_test['Growth'].values
n_test = len(y_test)
if n_test == 0:
continue
if args.no_gen:
x_test_list, y_test = test_gen.get_slice(size=test_gen.size, single=args.single)
y_test_pred = model.predict(x_test_list, batch_size=args.batch_size)
else:
y_test_pred = model.predict_generator(test_gen.flow(single=args.single), test_gen.steps)
y_test_pred = y_test_pred[:test_gen.size]
y_test_pred = y_test_pred.flatten()
scores = evaluate_prediction(y_test, y_test_pred)
log_evaluation(scores, description='Testing on data from {} ({})'.format(test_source, n_test))
if K.backend() == 'tensorflow':
K.clear_session()
logger.handlers = []
return history
def main():
params = initialize_parameters()
run(params)
if __name__ == '__main__':
main()
if K.backend() == 'tensorflow':
K.clear_session()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import getpass
import re
from pystachio import Empty, Ref
from twitter.common.lang import Compatibility
from apache.aurora.config.schema.base import HealthCheckConfig, MesosContext, MesosTaskInstance
from apache.thermos.config.loader import ThermosTaskValidator
from gen.apache.aurora.api.constants import AURORA_EXECUTOR_NAME, GOOD_IDENTIFIER_PATTERN_PYTHON
from gen.apache.aurora.api.ttypes import (
Constraint,
Container,
CronCollisionPolicy,
DockerContainer,
DockerParameter,
ExecutorConfig,
Identity,
JobConfiguration,
JobKey,
LimitConstraint,
MesosContainer,
Metadata,
TaskConfig,
TaskConstraint,
ValueConstraint
)
__all__ = (
'InvalidConfig',
'convert'
)
class InvalidConfig(ValueError):
pass
def constraints_to_thrift(constraints):
"""Convert a python dictionary to a set of Constraint thrift objects."""
result = set()
for attribute, constraint_value in constraints.items():
assert isinstance(attribute, Compatibility.string) and (
isinstance(constraint_value, Compatibility.string)), (
"Both attribute name and value in constraints must be string")
constraint = Constraint()
constraint.name = attribute
task_constraint = TaskConstraint()
if constraint_value.startswith('limit:'):
task_constraint.limit = LimitConstraint()
try:
task_constraint.limit.limit = int(constraint_value.replace('limit:', '', 1))
except ValueError:
print('%s is not a valid limit value, must be integer' % constraint_value)
raise
else:
# Strip off the leading negation if present.
negated = constraint_value.startswith('!')
if negated:
constraint_value = constraint_value[1:]
task_constraint.value = ValueConstraint(negated, set(constraint_value.split(',')))
constraint.constraint = task_constraint
result.add(constraint)
return result
def task_instance_from_job(job, instance, hostname):
instance_context = MesosContext(instance=instance, hostname=hostname)
health_check_config = HealthCheckConfig()
if job.has_health_check_config():
health_check_config = job.health_check_config()
ti = MesosTaskInstance(task=job.task(),
role=job.role(),
health_check_config=health_check_config,
instance=instance)
if job.has_announce():
ti = ti(announce=job.announce())
if job.has_environment():
ti = ti(environment=job.environment())
if job.has_lifecycle():
ti = ti(lifecycle=job.lifecycle())
return ti.bind(mesos=instance_context)
def fully_interpolated(pystachio_object, coerce_fn=lambda i: i):
# Extract a fully-interpolated unwrapped object from pystachio_object or raise InvalidConfig.
#
# TODO(ksweeney): Remove this once Pystachio 1.0 changes the behavior of interpolate() to return
# unwrapped objects and fail when there are any unbound refs.
if not pystachio_object.check().ok():
raise InvalidConfig(pystachio_object.check().message())
# If an object type-checks it's okay to use the raw value from the wrapped object returned by
# interpolate. Without the previous check value.get() could return a string with mustaches
# instead of an object of the expected type.
value, _ = pystachio_object.interpolate()
return coerce_fn(value.get())
def parse_enum(enum_type, value):
enum_value = enum_type._NAMES_TO_VALUES.get(value.get().upper())
if enum_value is None:
raise InvalidConfig('Invalid %s type: %s' % (enum_type, value.get()))
return enum_value
def select_cron_policy(cron_policy):
return parse_enum(CronCollisionPolicy, cron_policy)
def select_service_bit(job):
return fully_interpolated(job.service(), bool)
def create_container_config(container):
if container is Empty:
return Container(MesosContainer(), None)
elif container.docker() is not Empty:
params = list()
if container.docker().parameters() is not Empty:
for p in fully_interpolated(container.docker().parameters()):
params.append(DockerParameter(p['name'], p['value']))
return Container(None, DockerContainer(fully_interpolated(container.docker().image()), params))
else:
raise InvalidConfig('If a container is specified it must set one type.')
# TODO(wickman): We should revert to using the MesosTaskInstance.
#
# Using the MesosJob instead of the MesosTaskInstance was to allow for
# planned future use of fields such as 'cluster' and to allow for conversion
# from Job=>Task to be done entirely on the executor, but instead this had
# made it impossible to run idempotent updates.
#
# In the meantime, we are erasing fields of the Job that are controversial.
# This achieves roughly the same effect as using the MesosTaskInstance.
ALIASED_FIELDS = (
'update_config',
'instances'
)
def filter_aliased_fields(job):
return job(**dict((key, Empty) for key in ALIASED_FIELDS))
def assert_valid_field(field, identifier):
VALID_IDENTIFIER = re.compile(GOOD_IDENTIFIER_PATTERN_PYTHON)
if not isinstance(identifier, Compatibility.string):
raise InvalidConfig("%s must be a string" % field)
if not VALID_IDENTIFIER.match(identifier):
raise InvalidConfig("Invalid %s '%s'" % (field, identifier))
return identifier
MESOS_INSTANCE_REF = Ref.from_address('mesos.instance')
MESOS_HOSTNAME_REF = Ref.from_address('mesos.hostname')
THERMOS_PORT_SCOPE_REF = Ref.from_address('thermos.ports')
THERMOS_TASK_ID_REF = Ref.from_address('thermos.task_id')
def convert(job, metadata=frozenset(), ports=frozenset()):
"""Convert a Pystachio MesosJob to an Aurora Thrift JobConfiguration."""
owner = Identity(role=fully_interpolated(job.role()), user=getpass.getuser())
key = JobKey(
role=assert_valid_field('role', fully_interpolated(job.role())),
environment=assert_valid_field('environment', fully_interpolated(job.environment())),
name=assert_valid_field('name', fully_interpolated(job.name())))
task_raw = job.task()
MB = 1024 * 1024
task = TaskConfig()
def not_empty_or(item, default):
return default if item is Empty else fully_interpolated(item)
# job components
task.jobName = fully_interpolated(job.name())
task.environment = fully_interpolated(job.environment())
task.production = fully_interpolated(job.production(), bool)
task.isService = select_service_bit(job)
task.maxTaskFailures = fully_interpolated(job.max_task_failures())
task.priority = fully_interpolated(job.priority())
task.contactEmail = not_empty_or(job.contact(), None)
task.tier = not_empty_or(job.tier(), None)
# Add metadata to a task, to display in the scheduler UI.
task.metadata = frozenset(Metadata(key=str(key), value=str(value)) for key, value in metadata)
# task components
if not task_raw.has_resources():
raise InvalidConfig('Task must specify resources!')
if (fully_interpolated(task_raw.resources().ram()) == 0
or fully_interpolated(task_raw.resources().disk()) == 0):
raise InvalidConfig('Must specify ram and disk resources, got ram:%r disk:%r' % (
fully_interpolated(task_raw.resources().ram()),
fully_interpolated(task_raw.resources().disk())))
task.numCpus = fully_interpolated(task_raw.resources().cpu())
task.ramMb = fully_interpolated(task_raw.resources().ram()) / MB
task.diskMb = fully_interpolated(task_raw.resources().disk()) / MB
if task.numCpus <= 0 or task.ramMb <= 0 or task.diskMb <= 0:
raise InvalidConfig('Task has invalid resources. cpu/ramMb/diskMb must all be positive: '
'cpu:%r ramMb:%r diskMb:%r' % (task.numCpus, task.ramMb, task.diskMb))
task.job = key
task.owner = owner
task.requestedPorts = ports
task.taskLinks = {} # See AURORA-739
task.constraints = constraints_to_thrift(not_empty_or(job.constraints(), {}))
task.container = create_container_config(job.container())
underlying, refs = job.interpolate()
# need to fake an instance id for the sake of schema checking
underlying_checked = underlying.bind(mesos={'instance': 31337, 'hostname': ''})
try:
ThermosTaskValidator.assert_valid_task(underlying_checked.task())
except ThermosTaskValidator.InvalidTaskError as e:
raise InvalidConfig('Task is invalid: %s' % e)
if not underlying_checked.check().ok():
raise InvalidConfig('Job not fully specified: %s' % underlying.check().message())
unbound = []
for ref in refs:
if ref in (THERMOS_TASK_ID_REF, MESOS_INSTANCE_REF, MESOS_HOSTNAME_REF) or (
Ref.subscope(THERMOS_PORT_SCOPE_REF, ref)):
continue
unbound.append(ref)
if unbound:
raise InvalidConfig('Config contains unbound variables: %s' % ' '.join(map(str, unbound)))
task.executorConfig = ExecutorConfig(
name=AURORA_EXECUTOR_NAME,
data=filter_aliased_fields(underlying).json_dumps())
return JobConfiguration(
key=key,
owner=owner,
cronSchedule=not_empty_or(job.cron_schedule(), None),
cronCollisionPolicy=select_cron_policy(job.cron_collision_policy()),
taskConfig=task,
instanceCount=fully_interpolated(job.instances()))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.compute import claims
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova.db import api as db
from nova import exception
from nova.network.neutronv2 import api as neutron_api
from nova import objects
from nova import test
from nova.tests.unit.compute import test_compute
from nova.tests.unit.image import fake as fake_image
CONF = nova.conf.CONF
def _fake_resources():
resources = {
'memory_mb': 2048,
'memory_mb_used': 0,
'free_ram_mb': 2048,
'local_gb': 20,
'local_gb_used': 0,
'free_disk_gb': 20,
'vcpus': 2,
'vcpus_used': 0
}
return objects.ComputeNode(**resources)
class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_terminate_volume_connections')
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'power_off')
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'snapshot')
@mock.patch.object(nova.compute.manager.ComputeManager, '_get_power_state')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage')
@mock.patch('nova.compute.utils.notify_about_instance_action')
def _shelve_instance(self, shelved_offload_time, mock_notify,
mock_notify_instance_usage, mock_get_power_state,
mock_snapshot, mock_power_off, mock_terminate,
mock_get_bdms, clean_shutdown=True,
guest_power_state=power_state.RUNNING):
mock_get_power_state.return_value = 123
CONF.set_override('shelved_offload_time', shelved_offload_time)
host = 'fake-mini'
instance = self._create_fake_instance_obj(
params={'host': host, 'power_state': guest_power_state})
image_id = 'fake_image_id'
host = 'fake-mini'
self.useFixture(utils_fixture.TimeFixture())
instance.task_state = task_states.SHELVING
instance.save()
fake_bdms = None
if shelved_offload_time == 0:
fake_bdms = objects.BlockDeviceMappingList()
mock_get_bdms.return_value = fake_bdms
tracking = {'last_state': instance.vm_state}
def check_save(expected_task_state=None):
self.assertEqual(123, instance.power_state)
if tracking['last_state'] == vm_states.ACTIVE:
if CONF.shelved_offload_time == 0:
self.assertEqual(task_states.SHELVING_OFFLOADING,
instance.task_state)
else:
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.SHELVED, instance.vm_state)
self.assertEqual([task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING],
expected_task_state)
self.assertIn('shelved_at', instance.system_metadata)
self.assertEqual(image_id,
instance.system_metadata['shelved_image_id'])
self.assertEqual(host,
instance.system_metadata['shelved_host'])
tracking['last_state'] = instance.vm_state
elif (tracking['last_state'] == vm_states.SHELVED and
CONF.shelved_offload_time == 0):
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.SHELVED_OFFLOADED,
instance.vm_state)
self.assertEqual([task_states.SHELVING,
task_states.SHELVING_OFFLOADING],
expected_task_state)
tracking['last_state'] = instance.vm_state
elif (tracking['last_state'] == vm_states.SHELVED_OFFLOADED and
CONF.shelved_offload_time == 0):
self.assertIsNone(instance.host)
self.assertIsNone(instance.node)
self.assertIsNone(expected_task_state)
else:
self.fail('Unexpected save!')
with test.nested(
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute.network_api,
'cleanup_instance_network_on_host')) as (
mock_save, mock_cleanup
):
mock_save.side_effect = check_save
self.compute.shelve_instance(self.context, instance,
image_id=image_id,
clean_shutdown=clean_shutdown)
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='shelve', phase='start', bdms=fake_bdms),
mock.call(self.context, instance, 'fake-mini',
action='shelve', phase='end', bdms=fake_bdms)])
# prepare expect call lists
mock_notify_instance_usage_call_list = [
mock.call(self.context, instance, 'shelve.start'),
mock.call(self.context, instance, 'shelve.end')]
mock_power_off_call_list = []
mock_get_power_state_call_list = [
mock.call(self.context, instance)]
mock_cleanup_call_list = []
if clean_shutdown:
if guest_power_state == power_state.PAUSED:
mock_power_off_call_list.append(mock.call(instance, 0, 0))
else:
mock_power_off_call_list.append(
mock.call(instance, CONF.shutdown_timeout,
CONF.compute.shutdown_retry_interval))
else:
mock_power_off_call_list.append(mock.call(instance, 0, 0))
if CONF.shelved_offload_time == 0:
mock_notify_instance_usage_call_list.extend([
mock.call(self.context, instance, 'shelve_offload.start'),
mock.call(self.context, instance, 'shelve_offload.end')])
mock_power_off_call_list.append(mock.call(instance, 0, 0))
mock_get_power_state_call_list.append(mock.call(self.context,
instance))
# instance.host is replaced with host because
# original instance.host is clear after
# ComputeManager.shelve_instance execute with
# shelved_offload_time == 0
mock_cleanup_call_list.append(mock.call(self.context, instance,
host))
mock_notify_instance_usage.assert_has_calls(
mock_notify_instance_usage_call_list)
mock_power_off.assert_has_calls(mock_power_off_call_list)
mock_cleanup.assert_has_calls(mock_cleanup_call_list)
mock_snapshot.assert_called_once_with(self.context, instance,
'fake_image_id', mock.ANY)
mock_get_power_state.assert_has_calls(mock_get_power_state_call_list)
if CONF.shelved_offload_time == 0:
self.assertTrue(mock_terminate.called)
def test_shelve(self):
self._shelve_instance(-1)
def test_shelve_forced_shutdown(self):
self._shelve_instance(-1, clean_shutdown=False)
def test_shelve_and_offload(self):
self._shelve_instance(0)
def test_shelve_paused_instance(self):
self._shelve_instance(-1, guest_power_state=power_state.PAUSED)
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'power_off')
def test_shelve_offload(self, mock_power_off):
instance = self._shelve_offload()
mock_power_off.assert_called_once_with(instance,
CONF.shutdown_timeout, CONF.compute.shutdown_retry_interval)
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'power_off')
def test_shelve_offload_forced_shutdown(self, mock_power_off):
instance = self._shelve_offload(clean_shutdown=False)
mock_power_off.assert_called_once_with(instance, 0, 0)
@mock.patch.object(compute_utils, 'EventReporter')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_terminate_volume_connections')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'delete_allocation_for_shelve_offloaded_instance')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_update_resource_tracker')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_get_power_state', return_value=123)
@mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage')
@mock.patch('nova.compute.utils.notify_about_instance_action')
def _shelve_offload(self, mock_notify, mock_notify_instance_usage,
mock_get_power_state, mock_update_resource_tracker,
mock_delete_alloc, mock_terminate, mock_get_bdms,
mock_event, clean_shutdown=True):
host = 'fake-mini'
instance = self._create_fake_instance_obj(params={'host': host})
instance.task_state = task_states.SHELVING
instance.save()
self.useFixture(utils_fixture.TimeFixture())
fake_bdms = objects.BlockDeviceMappingList()
mock_get_bdms.return_value = fake_bdms
with mock.patch.object(instance, 'save'):
self.compute.shelve_offload_instance(self.context, instance,
clean_shutdown=clean_shutdown)
mock_notify.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='shelve_offload', phase='start',
bdms=fake_bdms),
mock.call(self.context, instance, 'fake-mini',
action='shelve_offload', phase='end',
bdms=fake_bdms)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertTrue(mock_terminate.called)
# prepare expect call lists
mock_notify_instance_usage_call_list = [
mock.call(self.context, instance, 'shelve_offload.start'),
mock.call(self.context, instance, 'shelve_offload.end')]
mock_notify_instance_usage.assert_has_calls(
mock_notify_instance_usage_call_list)
# instance.host is replaced with host because
# original instance.host is clear after
# ComputeManager.shelve_offload_instance execute
mock_get_power_state.assert_called_once_with(
self.context, instance)
mock_update_resource_tracker.assert_called_once_with(self.context,
instance)
mock_delete_alloc.assert_called_once_with(self.context, instance)
mock_event.assert_called_once_with(self.context,
'compute_shelve_offload_instance',
CONF.host,
instance.uuid)
return instance
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_prep_block_device', return_value='fake_bdm')
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'spawn')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_get_power_state', return_value=123)
@mock.patch.object(neutron_api.API, 'setup_instance_network_on_host')
def test_unshelve(self, mock_setup_network,
mock_get_power_state, mock_spawn,
mock_prep_block_device, mock_notify_instance_usage,
mock_notify_instance_action,
mock_get_bdms):
mock_bdms = mock.Mock()
mock_get_bdms.return_value = mock_bdms
instance = self._create_fake_instance_obj()
instance.task_state = task_states.UNSHELVING
instance.save()
image = {'id': uuids.image_id}
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
host = 'fake-mini'
cur_time = timeutils.utcnow()
# Adding shelved_* keys in system metadata to verify
# whether those are deleted after unshelve call.
sys_meta = dict(instance.system_metadata)
sys_meta['shelved_at'] = cur_time.isoformat()
sys_meta['shelved_image_id'] = image['id']
sys_meta['shelved_host'] = host
instance.system_metadata = sys_meta
self.deleted_image_id = None
def fake_delete(self2, ctxt, image_id):
self.deleted_image_id = image_id
def fake_claim(context, instance, node, limits):
instance.host = self.compute.host
requests = objects.InstancePCIRequests(requests=[])
return claims.Claim(context, instance, test_compute.NODENAME,
self.rt, _fake_resources(),
requests)
tracking = {
'last_state': instance.task_state,
'spawned': False,
}
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
if tracking['spawned']:
self.assertIsNone(instance.task_state)
else:
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['spawned'] = True
tracking['last_state'] == instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
tracking['last_state'] == instance.task_state
else:
self.fail('Unexpected save!')
fake_image.stub_out_image_service(self)
self.stub_out('nova.tests.unit.image.fake._FakeImageService.delete',
fake_delete)
with mock.patch.object(self.rt, 'instance_claim',
side_effect=fake_claim), \
mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.unshelve_instance(
self.context, instance, image=image,
filter_properties=filter_properties,
node=node)
mock_notify_instance_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='start', bdms=mock_bdms),
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='end', bdms=mock_bdms)])
# prepare expect call lists
mock_notify_instance_usage_call_list = [
mock.call(self.context, instance, 'unshelve.start'),
mock.call(self.context, instance, 'unshelve.end')]
mock_notify_instance_usage.assert_has_calls(
mock_notify_instance_usage_call_list)
mock_prep_block_device.assert_called_once_with(self.context,
instance, mock.ANY)
mock_setup_network.assert_called_once_with(self.context, instance,
self.compute.host)
mock_spawn.assert_called_once_with(self.context, instance,
test.MatchType(objects.ImageMeta), injected_files=[],
admin_password=None, allocations={}, network_info=[],
block_device_info='fake_bdm')
self.mock_get_allocs.assert_called_once_with(self.context,
instance.uuid)
mock_get_power_state.assert_called_once_with(self.context, instance)
self.assertNotIn('shelved_at', instance.system_metadata)
self.assertNotIn('shelved_image_id', instance.system_metadata)
self.assertNotIn('shelved_host', instance.system_metadata)
self.assertEqual(image['id'], self.deleted_image_id)
self.assertEqual(instance.host, self.compute.host)
self.assertEqual(123, instance.power_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIsNone(instance.key_data)
self.assertEqual(self.compute.host, instance.host)
self.assertFalse(instance.auto_disk_config)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch.object(nova.compute.resource_tracker.ResourceTracker,
'instance_claim')
@mock.patch.object(neutron_api.API, 'setup_instance_network_on_host')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_get_power_state', return_value=123)
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'spawn')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_prep_block_device', return_value='fake_bdm')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage')
@mock.patch('nova.utils.get_image_from_system_metadata')
def test_unshelve_volume_backed(self, mock_image_meta,
mock_notify_instance_usage,
mock_prep_block_device, mock_spawn,
mock_get_power_state,
mock_setup_network, mock_instance_claim,
mock_notify_instance_action,
mock_get_bdms):
mock_bdms = mock.Mock()
mock_get_bdms.return_value = mock_bdms
instance = self._create_fake_instance_obj()
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
instance.task_state = task_states.UNSHELVING
instance.save()
image_meta = {'properties': {'base_image_ref': uuids.image_id}}
mock_image_meta.return_value = image_meta
tracking = {'last_state': instance.task_state}
def fake_claim(context, instance, node, limits):
instance.host = self.compute.host
requests = objects.InstancePCIRequests(requests=[])
return claims.Claim(context, instance, test_compute.NODENAME,
self.rt, _fake_resources(),
requests)
mock_instance_claim.side_effect = fake_claim
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['last_state'] = instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
self.assertEqual(123, instance.power_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIsNone(instance.key_data)
self.assertFalse(instance.auto_disk_config)
self.assertIsNone(instance.task_state)
tracking['last_state'] = instance.task_state
else:
self.fail('Unexpected save!')
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.unshelve_instance(self.context, instance, image=None,
filter_properties=filter_properties, node=node)
mock_notify_instance_action.assert_has_calls([
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='start', bdms=mock_bdms),
mock.call(self.context, instance, 'fake-mini',
action='unshelve', phase='end', bdms=mock_bdms)])
# prepare expect call lists
mock_notify_instance_usage_call_list = [
mock.call(self.context, instance, 'unshelve.start'),
mock.call(self.context, instance, 'unshelve.end')]
mock_notify_instance_usage.assert_has_calls(
mock_notify_instance_usage_call_list)
mock_prep_block_device.assert_called_once_with(self.context, instance,
mock.ANY)
mock_setup_network.assert_called_once_with(self.context, instance,
self.compute.host)
mock_instance_claim.assert_called_once_with(self.context, instance,
test_compute.NODENAME,
limits)
mock_spawn.assert_called_once_with(self.context, instance,
test.MatchType(objects.ImageMeta),
injected_files=[], admin_password=None,
allocations={}, network_info=[], block_device_info='fake_bdm')
self.mock_get_allocs.assert_called_once_with(self.context,
instance.uuid)
mock_get_power_state.assert_called_once_with(self.context, instance)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch.object(nova.compute.resource_tracker.ResourceTracker,
'instance_claim')
@mock.patch.object(neutron_api.API, 'setup_instance_network_on_host')
@mock.patch.object(nova.virt.fake.SmallFakeDriver, 'spawn',
side_effect=test.TestingException('oops!'))
@mock.patch.object(nova.compute.manager.ComputeManager,
'_prep_block_device', return_value='fake_bdm')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_notify_about_instance_usage')
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(nova.compute.manager.ComputeManager,
'_terminate_volume_connections')
def test_unshelve_spawn_fails_cleanup_volume_connections(
self, mock_terminate_volume_connections, mock_image_meta,
mock_notify_instance_usage, mock_prep_block_device, mock_spawn,
mock_setup_network, mock_instance_claim,
mock_notify_instance_action, mock_get_bdms):
"""Tests error handling when a instance fails to unshelve and makes
sure that volume connections are cleaned up from the host
and that the host/node values are unset on the instance.
"""
mock_bdms = mock.Mock()
mock_get_bdms.return_value = mock_bdms
instance = self._create_fake_instance_obj()
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
instance.task_state = task_states.UNSHELVING
instance.save()
image_meta = {'properties': {'base_image_ref': uuids.image_id}}
mock_image_meta.return_value = image_meta
tracking = {'last_state': instance.task_state}
def fake_claim(context, instance, node, limits):
instance.host = self.compute.host
instance.node = node
requests = objects.InstancePCIRequests(requests=[])
return claims.Claim(context, instance, node,
self.rt, _fake_resources(),
requests, limits=limits)
mock_instance_claim.side_effect = fake_claim
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
# This is before we've failed.
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['last_state'] = instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
# This is after we've failed.
self.assertIsNone(instance.host)
self.assertIsNone(instance.node)
self.assertIsNone(instance.task_state)
tracking['last_state'] = instance.task_state
else:
self.fail('Unexpected save!')
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.assertRaises(test.TestingException,
self.compute.unshelve_instance,
self.context, instance, image=None,
filter_properties=filter_properties, node=node)
mock_notify_instance_action.assert_called_once_with(
self.context, instance, 'fake-mini', action='unshelve',
phase='start', bdms=mock_bdms)
mock_notify_instance_usage.assert_called_once_with(
self.context, instance, 'unshelve.start')
mock_prep_block_device.assert_called_once_with(
self.context, instance, mock_bdms)
mock_setup_network.assert_called_once_with(self.context, instance,
self.compute.host)
mock_instance_claim.assert_called_once_with(self.context, instance,
test_compute.NODENAME,
limits)
mock_spawn.assert_called_once_with(
self.context, instance, test.MatchType(objects.ImageMeta),
injected_files=[], admin_password=None,
allocations={}, network_info=[], block_device_info='fake_bdm')
mock_terminate_volume_connections.assert_called_once_with(
self.context, instance, mock_bdms)
@mock.patch.object(objects.InstanceList, 'get_by_filters')
def test_shelved_poll_none_offloaded(self, mock_get_by_filters):
# Test instances are not offloaded when shelved_offload_time is -1
self.flags(shelved_offload_time=-1)
self.compute._poll_shelved_instances(self.context)
self.assertEqual(0, mock_get_by_filters.call_count)
@mock.patch('oslo_utils.timeutils.is_older_than')
def test_shelved_poll_none_exist(self, mock_older):
self.flags(shelved_offload_time=1)
mock_older.return_value = False
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi:
self.compute._poll_shelved_instances(self.context)
self.assertFalse(soi.called)
@mock.patch('oslo_utils.timeutils.is_older_than')
def test_shelved_poll_not_timedout(self, mock_older):
mock_older.return_value = False
self.flags(shelved_offload_time=1)
shelved_time = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(shelved_time))
time_fixture.advance_time_seconds(CONF.shelved_offload_time - 1)
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED
instance.task_state = None
instance.host = self.compute.host
sys_meta = instance.system_metadata
sys_meta['shelved_at'] = shelved_time.isoformat()
instance.save()
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi:
self.compute._poll_shelved_instances(self.context)
self.assertFalse(soi.called)
self.assertTrue(mock_older.called)
def test_shelved_poll_timedout(self):
self.flags(shelved_offload_time=1)
shelved_time = timeutils.utcnow()
time_fixture = self.useFixture(utils_fixture.TimeFixture(shelved_time))
time_fixture.advance_time_seconds(CONF.shelved_offload_time + 1)
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED
instance.task_state = None
instance.host = self.compute.host
sys_meta = instance.system_metadata
sys_meta['shelved_at'] = shelved_time.isoformat()
instance.save()
data = []
def fake_soi(context, instance, **kwargs):
data.append(instance.uuid)
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi:
soi.side_effect = fake_soi
self.compute._poll_shelved_instances(self.context)
self.assertTrue(soi.called)
self.assertEqual(instance.uuid, data[0])
@mock.patch('oslo_utils.timeutils.is_older_than')
@mock.patch('oslo_utils.timeutils.parse_strtime')
def test_shelved_poll_filters_task_state(self, mock_parse, mock_older):
self.flags(shelved_offload_time=1)
mock_older.return_value = True
instance1 = self._create_fake_instance_obj()
instance1.task_state = task_states.SPAWNING
instance1.vm_state = vm_states.SHELVED
instance1.host = self.compute.host
instance1.system_metadata = {'shelved_at': ''}
instance1.save()
instance2 = self._create_fake_instance_obj()
instance2.task_state = None
instance2.vm_state = vm_states.SHELVED
instance2.host = self.compute.host
instance2.system_metadata = {'shelved_at': ''}
instance2.save()
data = []
def fake_soi(context, instance, **kwargs):
data.append(instance.uuid)
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi:
soi.side_effect = fake_soi
self.compute._poll_shelved_instances(self.context)
self.assertTrue(soi.called)
self.assertEqual([instance2.uuid], data)
@mock.patch('oslo_utils.timeutils.is_older_than')
@mock.patch('oslo_utils.timeutils.parse_strtime')
def test_shelved_poll_checks_task_state_on_save(self, mock_parse,
mock_older):
self.flags(shelved_offload_time=1)
mock_older.return_value = True
instance = self._create_fake_instance_obj()
instance.task_state = None
instance.vm_state = vm_states.SHELVED
instance.host = self.compute.host
instance.system_metadata = {'shelved_at': ''}
instance.save()
def fake_parse_hook(timestring):
instance.task_state = task_states.SPAWNING
instance.save()
mock_parse.side_effect = fake_parse_hook
with mock.patch.object(self.compute, 'shelve_offload_instance') as soi:
self.compute._poll_shelved_instances(self.context)
self.assertFalse(soi.called)
class ShelveComputeAPITestCase(test_compute.BaseTestCase):
def _get_vm_states(self, exclude_states=None):
vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
if not exclude_states:
exclude_states = set()
return vm_state - exclude_states
def _test_shelve(self, vm_state=vm_states.ACTIVE, boot_from_volume=False,
clean_shutdown=True):
# Ensure instance can be shelved.
params = dict(task_state=None, vm_state=vm_state, display_name='vm01')
fake_instance = self._create_fake_instance_obj(params=params)
instance = fake_instance
self.assertIsNone(instance['task_state'])
with test.nested(
mock.patch.object(compute_utils, 'is_volume_backed_instance',
return_value=boot_from_volume),
mock.patch.object(self.compute_api, '_create_image',
return_value=dict(id='fake-image-id')),
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api, '_record_action_start'),
mock.patch.object(self.compute_api.compute_rpcapi,
'shelve_instance'),
mock.patch.object(self.compute_api.compute_rpcapi,
'shelve_offload_instance')
) as (
volume_backed_inst, create_image, instance_save,
record_action_start, rpcapi_shelve_instance,
rpcapi_shelve_offload_instance
):
self.compute_api.shelve(self.context, instance,
clean_shutdown=clean_shutdown)
self.assertEqual(instance.task_state, task_states.SHELVING)
# assert our mock calls
volume_backed_inst.assert_called_once_with(
self.context, instance)
instance_save.assert_called_once_with(expected_task_state=[None])
record_action_start.assert_called_once_with(
self.context, instance, instance_actions.SHELVE)
if boot_from_volume:
rpcapi_shelve_offload_instance.assert_called_once_with(
self.context, instance=instance,
clean_shutdown=clean_shutdown)
else:
rpcapi_shelve_instance.assert_called_once_with(
self.context, instance=instance, image_id='fake-image-id',
clean_shutdown=clean_shutdown)
db.instance_destroy(self.context, instance['uuid'])
def test_shelve(self):
self._test_shelve()
def test_shelves_stopped(self):
self._test_shelve(vm_state=vm_states.STOPPED)
def test_shelves_paused(self):
self._test_shelve(vm_state=vm_states.PAUSED)
def test_shelves_suspended(self):
self._test_shelve(vm_state=vm_states.SUSPENDED)
def test_shelves_boot_from_volume(self):
self._test_shelve(boot_from_volume=True)
def test_shelve_forced_shutdown(self):
self._test_shelve(clean_shutdown=False)
def test_shelve_boot_from_volume_forced_shutdown(self):
self._test_shelve(boot_from_volume=True,
clean_shutdown=False)
def _test_shelve_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
fake_instance = self._create_fake_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.shelve,
self.context, fake_instance)
def test_shelve_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
vm_states.STOPPED,
vm_states.PAUSED,
vm_states.SUSPENDED]))
for state in invalid_vm_states:
self._test_shelve_invalid_state(state)
def _test_shelve_offload(self, clean_shutdown=True):
params = dict(task_state=None, vm_state=vm_states.SHELVED)
fake_instance = self._create_fake_instance_obj(params=params)
with test.nested(
mock.patch.object(fake_instance, 'save'),
mock.patch.object(self.compute_api.compute_rpcapi,
'shelve_offload_instance'),
mock.patch('nova.compute.api.API._record_action_start')
) as (
instance_save, rpcapi_shelve_offload_instance, record
):
self.compute_api.shelve_offload(self.context, fake_instance,
clean_shutdown=clean_shutdown)
# assert field values set on the instance object
self.assertEqual(task_states.SHELVING_OFFLOADING,
fake_instance.task_state)
instance_save.assert_called_once_with(expected_task_state=[None])
rpcapi_shelve_offload_instance.assert_called_once_with(
self.context, instance=fake_instance,
clean_shutdown=clean_shutdown)
record.assert_called_once_with(self.context, fake_instance,
instance_actions.SHELVE_OFFLOAD)
def test_shelve_offload(self):
self._test_shelve_offload()
def test_shelve_offload_forced_shutdown(self):
self._test_shelve_offload(clean_shutdown=False)
def _test_shelve_offload_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
fake_instance = self._create_fake_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.shelve_offload,
self.context, fake_instance)
def test_shelve_offload_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.SHELVED]))
for state in invalid_vm_states:
self._test_shelve_offload_invalid_state(state)
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
def test_unshelve(self, get_by_instance_uuid):
# Ensure instance can be unshelved.
instance = self._create_fake_instance_obj()
self.assertIsNone(instance['task_state'])
self.compute_api.shelve(self.context, instance)
instance.task_state = None
instance.vm_state = vm_states.SHELVED
instance.save()
fake_spec = objects.RequestSpec()
get_by_instance_uuid.return_value = fake_spec
with mock.patch.object(self.compute_api.compute_task_api,
'unshelve_instance') as unshelve:
self.compute_api.unshelve(self.context, instance)
get_by_instance_uuid.assert_called_once_with(self.context,
instance.uuid)
unshelve.assert_called_once_with(self.context, instance, fake_spec)
self.assertEqual(instance.task_state, task_states.UNSHELVING)
db.instance_destroy(self.context, instance['uuid'])
|
|
import collections
import math
import cairocffi
import xcffib.xproto
from . import pangocffi
from . import utils
class TextLayout(object):
def __init__(self, drawer, text, colour, font_family, font_size,
font_shadow, wrap=True, markup=False):
self.drawer, self.colour = drawer, colour
layout = drawer.ctx.create_layout()
layout.set_alignment(pangocffi.ALIGN_CENTER)
if not wrap: # pango wraps by default
layout.set_ellipsize(pangocffi.ELLIPSIZE_END)
desc = pangocffi.FontDescription()
desc.set_family(font_family)
desc.set_absolute_size(pangocffi.units_from_double(font_size))
layout.set_font_description(desc)
self.font_shadow = font_shadow
self.layout = layout
self.markup = markup
self.text = text
self._width = None
@property
def text(self):
return self.layout.get_text()
@text.setter
def text(self, value):
if self.markup:
# pangocffi doesn't like None here, so we use "".
if value is None:
value = ''
attrlist, value, accel_char = pangocffi.parse_markup(value)
self.layout.set_attributes(attrlist)
return self.layout.set_text(utils.scrub_to_utf8(value))
@property
def width(self):
if self._width is not None:
return self._width
else:
return self.layout.get_pixel_size()[0]
@width.setter
def width(self, value):
self._width = value
self.layout.set_width(pangocffi.units_from_double(value))
@width.deleter
def width(self):
self._width = None
self.layout.set_width(-1)
@property
def height(self):
return self.layout.get_pixel_size()[1]
def fontdescription(self):
return self.layout.get_font_description()
@property
def font_family(self):
d = self.fontdescription()
return d.get_family()
@font_family.setter
def font_family(self, font):
d = self.fontdescription()
d.set_family(font)
self.layout.set_font_description(d)
@property
def font_size(self):
d = self.fontdescription()
return d.get_size()
@font_size.setter
def font_size(self, size):
d = self.fontdescription()
d.set_size(size)
d.set_absolute_size(pangocffi.units_from_double(size))
self.layout.set_font_description(d)
def draw(self, x, y):
if self.font_shadow is not None:
self.drawer.set_source_rgb(self.font_shadow)
self.drawer.ctx.move_to(x + 1, y + 1)
self.drawer.ctx.show_layout(self.layout)
self.drawer.set_source_rgb(self.colour)
self.drawer.ctx.move_to(x, y)
self.drawer.ctx.show_layout(self.layout)
def framed(self, border_width, border_color, pad_x, pad_y):
return TextFrame(self, border_width, border_color, pad_x, pad_y)
class TextFrame:
def __init__(self, layout, border_width, border_color, pad_x, pad_y):
self.layout = layout
self.border_width = border_width
self.border_color = border_color
self.drawer = self.layout.drawer
if isinstance(pad_x, collections.Iterable):
self.pad_left = pad_x[0]
self.pad_right = pad_x[1]
else:
self.pad_left = self.pad_right = pad_x
if isinstance(pad_y, collections.Iterable):
self.pad_top = pad_y[0]
self.pad_bottom = pad_y[1]
else:
self.pad_top = self.pad_bottom = pad_y
def draw(self, x, y, rounded=True, fill=False):
self.drawer.set_source_rgb(self.border_color)
opts = [
x, y,
self.layout.width + self.pad_left + self.pad_right,
self.layout.height + self.pad_top + self.pad_bottom,
self.border_width
]
if fill:
if rounded:
self.drawer.rounded_fillrect(*opts)
else:
self.drawer.fillrect(*opts)
else:
if rounded:
self.drawer.rounded_rectangle(*opts)
else:
self.drawer.rectangle(*opts)
self.drawer.ctx.stroke()
self.layout.draw(
x + self.pad_left,
y + self.pad_top
)
def draw_fill(self, x, y, rounded=True):
self.draw(x, y, rounded, fill=True)
@property
def height(self):
return self.layout.height + self.pad_top + self.pad_bottom
@property
def width(self):
return self.layout.width + self.pad_left + self.pad_right
class Drawer:
"""
A helper class for drawing and text layout.
We have a drawer object for each widget in the bar. The underlying
surface is a pixmap with the same size as the bar itself. We draw to
the pixmap starting at offset 0, 0, and when the time comes to display
to the window, we copy the appropriate portion of the pixmap onto the
window.
"""
def __init__(self, qtile, wid, width, height):
self.qtile = qtile
self.wid, self.width, self.height = wid, width, height
self.pixmap = self.qtile.conn.conn.generate_id()
self.gc = self.qtile.conn.conn.generate_id()
self.qtile.conn.conn.core.CreatePixmap(
self.qtile.conn.default_screen.root_depth,
self.pixmap,
self.wid,
self.width,
self.height
)
self.qtile.conn.conn.core.CreateGC(
self.gc,
self.wid,
xcffib.xproto.GC.Foreground | xcffib.xproto.GC.Background,
[
self.qtile.conn.default_screen.black_pixel,
self.qtile.conn.default_screen.white_pixel
]
)
self.surface = cairocffi.XCBSurface(
qtile.conn.conn,
self.pixmap,
self.find_root_visual(),
self.width,
self.height,
)
self.ctx = self.new_ctx()
self.clear((0, 0, 1))
def __del__(self):
self.qtile.conn.conn.core.FreeGC(self.gc)
self.qtile.conn.conn.core.FreePixmap(self.pixmap)
def _rounded_rect(self, x, y, width, height, linewidth):
aspect = 1.0
corner_radius = height / 10.0
radius = corner_radius / aspect
degrees = math.pi / 180.0
self.ctx.new_sub_path()
delta = radius + linewidth / 2
self.ctx.arc(x + width - delta, y + delta, radius,
-90 * degrees, 0 * degrees)
self.ctx.arc(x + width - delta, y + height - delta,
radius, 0 * degrees, 90 * degrees)
self.ctx.arc(x + delta, y + height - delta, radius,
90 * degrees, 180 * degrees)
self.ctx.arc(x + delta, y + delta, radius,
180 * degrees, 270 * degrees)
self.ctx.close_path()
def rounded_rectangle(self, x, y, width, height, linewidth):
self._rounded_rect(x, y, width, height, linewidth)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
def rounded_fillrect(self, x, y, width, height, linewidth):
self._rounded_rect(x, y, width, height, linewidth)
self.ctx.fill()
def rectangle(self, x, y, width, height, linewidth=2):
self.ctx.set_line_width(linewidth)
self.ctx.rectangle(x, y, width, height)
self.ctx.stroke()
def fillrect(self, x, y, width, height, linewidth=2):
self.ctx.set_line_width(linewidth)
self.ctx.rectangle(x, y, width, height)
self.ctx.fill()
self.ctx.stroke()
def draw(self, offset, width):
"""
offset: the X offset to start drawing at.
width: the portion of the canvas to draw at the starting point.
"""
self.qtile.conn.conn.core.CopyArea(
self.pixmap,
self.wid,
self.gc,
0, 0, # srcx, srcy
offset, 0, # dstx, dsty
width, self.height
)
def find_root_visual(self):
for i in self.qtile.conn.default_screen.allowed_depths:
for v in i.visuals:
if v.visual_id == self.qtile.conn.default_screen.root_visual:
return v
def new_ctx(self):
return pangocffi.CairoContext(cairocffi.Context(self.surface))
def set_source_rgb(self, colour):
if type(colour) == list:
linear = cairocffi.LinearGradient(0.0, 0.0, 0.0, self.height)
step_size = 1.0 / (len(colour) - 1)
step = 0.0
for c in colour:
rgb_col = utils.rgb(c)
if len(rgb_col) < 4:
rgb_col[3] = 1
linear.add_color_stop_rgba(step, *rgb_col)
step += step_size
self.ctx.set_source(linear)
else:
self.ctx.set_source_rgba(*utils.rgb(colour))
def clear(self, colour):
self.set_source_rgb(colour)
self.ctx.rectangle(0, 0, self.width, self.height)
self.ctx.fill()
self.ctx.stroke()
def textlayout(self, text, colour, font_family, font_size, font_shadow,
markup=False, **kw):
"""
Get a text layout.
NB: the return value of this function should be saved, and reused
to avoid a huge memory leak in the pygtk bindings. Once this has
been repaired, we can make the semantics easier.
https://bugzilla.gnome.org/show_bug.cgi?id=625287
"""
return TextLayout(self, text, colour, font_family, font_size,
font_shadow, markup=markup, **kw)
_sizelayout = None
def max_layout_size(self, texts, font_family, font_size):
# FIXME: This is incredibly clumsy, to avoid a memory leak in pygtk.
# See comment on textlayout() for details.
if not self._sizelayout:
self._sizelayout = self.textlayout(
"", "ffffff", font_family, font_size, None)
widths, heights = [], []
self._sizelayout.font_family = font_family
self._sizelayout.font_size = font_size
for i in texts:
self._sizelayout.text = i
widths.append(self._sizelayout.width)
heights.append(self._sizelayout.height)
return max(widths), max(heights)
# Old text layout functions, to be deprectated.
def set_font(self, fontface, size, antialias=True):
self.ctx.select_font_face(fontface)
self.ctx.set_font_size(size)
fo = self.ctx.get_font_options()
fo.set_antialias(cairocffi.ANTIALIAS_SUBPIXEL)
def text_extents(self, text):
return self.ctx.text_extents(utils.scrub_to_utf8(text))
def font_extents(self):
return self.ctx.font_extents()
def fit_fontsize(self, heightlimit):
"""
Try to find a maximum font size that fits any strings within the
height.
"""
self.ctx.set_font_size(heightlimit)
asc, desc, height, _, _ = self.font_extents()
self.ctx.set_font_size(
int(heightlimit * (heightlimit / float(height))))
return self.font_extents()
def fit_text(self, strings, heightlimit):
"""
Try to find a maximum font size that fits all strings within the
height.
"""
self.ctx.set_font_size(heightlimit)
_, _, _, maxheight, _, _ = self.ctx.text_extents("".join(strings))
if not maxheight:
return 0, 0
self.ctx.set_font_size(
int(heightlimit * (heightlimit / float(maxheight))))
maxwidth, maxheight = 0, 0
for i in strings:
_, _, x, y, _, _ = self.ctx.text_extents(i)
maxwidth = max(maxwidth, x)
maxheight = max(maxheight, y)
return maxwidth, maxheight
def draw_vbar(self, color, x, y1, y2, linewidth=1):
self.set_source_rgb(color)
self.ctx.move_to(x, y1)
self.ctx.line_to(x, y2)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
def draw_hbar(self, color, x1, x2, y, linewidth=1):
self.set_source_rgb(color)
self.ctx.move_to(x1, y)
self.ctx.line_to(x2, y)
self.ctx.set_line_width(linewidth)
self.ctx.stroke()
|
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
def download(child_rel_path, child_abs_path, download_dir):
artifact_downloaded_path = ctx.download_resource(child_abs_path)
new_file = os.path.join(download_dir, child_rel_path)
new_file_dir = os.path.dirname(new_file)
if not os.path.exists(new_file_dir):
os.makedirs(new_file_dir)
os.rename(artifact_downloaded_path, new_file)
ctx.logger.info('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file)
return new_file
def download_artifacts(artifacts, download_dir):
downloaded_artifacts = {}
os.makedirs(download_dir)
for artifact_name, artifact_ref in artifacts.items():
ctx.logger.info('Download artifact ' + artifact_name)
if isinstance(artifact_ref, basestring):
downloaded_artifacts[artifact_name] = download(os.path.basename(artifact_ref), artifact_ref, download_dir)
else:
child_download_dir = os.path.join(download_dir, artifact_name)
for child_path in artifact_ref:
download(child_path['relative_path'], child_path['absolute_path'], child_download_dir)
downloaded_artifacts[artifact_name] = child_download_dir
return downloaded_artifacts
env_map = {}
env_map['TARGET_NODE'] = ctx.target.node.id
env_map['TARGET_INSTANCE'] = ctx.target.instance.id
env_map['TARGET_INSTANCES'] = get_instance_list(ctx.target.node.id)
env_map['SOURCE_NODE'] = ctx.source.node.id
env_map['SOURCE_INSTANCE'] = ctx.source.instance.id
env_map['SOURCE_INSTANCES'] = get_instance_list(ctx.source.node.id)
env_map['DB_IP'] = get_attribute(ctx.target, 'ip_address')
env_map['DB_PORT'] = r'3306'
env_map['DB_NAME'] = r'wordpress'
env_map['DB_USER'] = r'pass'
env_map['DB_PASSWORD'] = r'pass'
other_instances_map = _all_instances_get_attribute(ctx.target, 'ip_address')
if other_instances_map is not None:
for other_instances_key in other_instances_map:
env_map[other_instances_key + 'DB_IP'] = other_instances_map[other_instances_key]
new_script_process = {'env': env_map}
node_artifacts = {
"configs": [
{
"relative_path": "mysqld_charset.cnf",
"absolute_path": "artifacts/mysql-type/configs/mysqld_charset.cnf"
}
]
}
relationship_artifacts = {
}
artifacts = node_artifacts.copy()
artifacts.update(relationship_artifacts)
download_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'downloads')
new_script_process['env'].update(download_artifacts(artifacts, download_dir))
ctx.logger.info('Operation is executed with inputs {0}'.format(inputs))
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
new_script_process['env'].update(inputs['process']['env'])
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('artifacts/wordpress-type/scripts/config_wordpress_for_mysql.sh'), new_script_process, operationOutputNames)
for k,v in parsed_output['outputs'].items():
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.source.instance.runtime_properties['_a4c_OO:tosca.interfaces.relationship.Configure:pre_configure_source:{0}'.format(k)] = v
ctx.source.instance.runtime_properties['wordpress_url'] = r'http://' + get_attribute(ctx.source, 'public_ip_address') + r':' + r'80' + r'/'
ctx.source.instance.update()
ctx.target.instance.update()
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Viele\personalWork\2019\onionSkinRenderer\ui_window.ui'
#
# Created: Sun Sep 6 19:00:41 2020
# by: pyside2-uic running on PySide2 2.0.0~alpha0
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_onionSkinRenderer(object):
def setupUi(self, onionSkinRenderer):
onionSkinRenderer.setObjectName("onionSkinRenderer")
onionSkinRenderer.resize(488, 684)
self.onionSkinRenderer_mainLayout = QtWidgets.QWidget(onionSkinRenderer)
self.onionSkinRenderer_mainLayout.setObjectName("onionSkinRenderer_mainLayout")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.onionSkinRenderer_mainLayout)
self.verticalLayout_3.setContentsMargins(2, 0, 2, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.onionSkins_grp = QtWidgets.QGroupBox(self.onionSkinRenderer_mainLayout)
self.onionSkins_grp.setTitle("")
self.onionSkins_grp.setObjectName("onionSkins_grp")
self.verticalLayout = QtWidgets.QVBoxLayout(self.onionSkins_grp)
self.verticalLayout.setSpacing(8)
self.verticalLayout.setContentsMargins(4, 9, 4, 4)
self.verticalLayout.setObjectName("verticalLayout")
self.main_scrollArea = QtWidgets.QScrollArea(self.onionSkins_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.main_scrollArea.sizePolicy().hasHeightForWidth())
self.main_scrollArea.setSizePolicy(sizePolicy)
self.main_scrollArea.setMinimumSize(QtCore.QSize(0, 150))
self.main_scrollArea.setFrameShape(QtWidgets.QFrame.NoFrame)
self.main_scrollArea.setFrameShadow(QtWidgets.QFrame.Plain)
self.main_scrollArea.setLineWidth(0)
self.main_scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.main_scrollArea.setWidgetResizable(True)
self.main_scrollArea.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.main_scrollArea.setObjectName("main_scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 474, 592))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollAreaWidgetContents.sizePolicy().hasHeightForWidth())
self.scrollAreaWidgetContents.setSizePolicy(sizePolicy)
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_6.setSpacing(8)
self.verticalLayout_6.setContentsMargins(2, -1, 2, 2)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.targetObjects_grp = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.targetObjects_grp.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.targetObjects_grp.setFlat(True)
self.targetObjects_grp.setCheckable(True)
self.targetObjects_grp.setObjectName("targetObjects_grp")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.targetObjects_grp)
self.horizontalLayout.setContentsMargins(-1, 9, -1, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
self.targetObjects_list = QtWidgets.QListWidget(self.targetObjects_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.targetObjects_list.sizePolicy().hasHeightForWidth())
self.targetObjects_list.setSizePolicy(sizePolicy)
self.targetObjects_list.setBaseSize(QtCore.QSize(2, 1))
self.targetObjects_list.setFrameShadow(QtWidgets.QFrame.Plain)
self.targetObjects_list.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.targetObjects_list.setObjectName("targetObjects_list")
self.horizontalLayout.addWidget(self.targetObjects_list)
self.targetObjects_btn_layout = QtWidgets.QVBoxLayout()
self.targetObjects_btn_layout.setObjectName("targetObjects_btn_layout")
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.targetObjects_btn_layout.addItem(spacerItem)
self.targetObjects_add_btn = QtWidgets.QPushButton(self.targetObjects_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.targetObjects_add_btn.sizePolicy().hasHeightForWidth())
self.targetObjects_add_btn.setSizePolicy(sizePolicy)
self.targetObjects_add_btn.setMinimumSize(QtCore.QSize(0, 0))
self.targetObjects_add_btn.setObjectName("targetObjects_add_btn")
self.targetObjects_btn_layout.addWidget(self.targetObjects_add_btn)
self.targetObjects_remove_btn = QtWidgets.QPushButton(self.targetObjects_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.targetObjects_remove_btn.sizePolicy().hasHeightForWidth())
self.targetObjects_remove_btn.setSizePolicy(sizePolicy)
self.targetObjects_remove_btn.setMinimumSize(QtCore.QSize(0, 0))
self.targetObjects_remove_btn.setObjectName("targetObjects_remove_btn")
self.targetObjects_btn_layout.addWidget(self.targetObjects_remove_btn)
self.targetObjects_clear_btn = QtWidgets.QPushButton(self.targetObjects_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.targetObjects_clear_btn.sizePolicy().hasHeightForWidth())
self.targetObjects_clear_btn.setSizePolicy(sizePolicy)
self.targetObjects_clear_btn.setMinimumSize(QtCore.QSize(0, 0))
self.targetObjects_clear_btn.setObjectName("targetObjects_clear_btn")
self.targetObjects_btn_layout.addWidget(self.targetObjects_clear_btn)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.targetObjects_btn_layout.addItem(spacerItem1)
self.horizontalLayout.addLayout(self.targetObjects_btn_layout)
self.verticalLayout_6.addWidget(self.targetObjects_grp)
self.onionSkinFrames_grp = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.onionSkinFrames_grp.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.onionSkinFrames_grp.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.onionSkinFrames_grp.setFlat(True)
self.onionSkinFrames_grp.setCheckable(True)
self.onionSkinFrames_grp.setChecked(True)
self.onionSkinFrames_grp.setObjectName("onionSkinFrames_grp")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.onionSkinFrames_grp)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.relative_step_layout = QtWidgets.QHBoxLayout()
self.relative_step_layout.setContentsMargins(5, -1, 5, -1)
self.relative_step_layout.setObjectName("relative_step_layout")
self.relative_step_label = QtWidgets.QLabel(self.onionSkinFrames_grp)
self.relative_step_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.relative_step_label.setObjectName("relative_step_label")
self.relative_step_layout.addWidget(self.relative_step_label)
self.relative_step_spinBox = QtWidgets.QSpinBox(self.onionSkinFrames_grp)
self.relative_step_spinBox.setMinimum(1)
self.relative_step_spinBox.setObjectName("relative_step_spinBox")
self.relative_step_layout.addWidget(self.relative_step_spinBox)
self.gridLayout.addLayout(self.relative_step_layout, 3, 0, 1, 1)
self.relative_frame = QtWidgets.QFrame(self.onionSkinFrames_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.relative_frame.sizePolicy().hasHeightForWidth())
self.relative_frame.setSizePolicy(sizePolicy)
self.relative_frame.setMinimumSize(QtCore.QSize(200, 0))
self.relative_frame.setMaximumSize(QtCore.QSize(100000, 16777215))
self.relative_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.relative_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.relative_frame.setObjectName("relative_frame")
self.relative_frame_layout = QtWidgets.QVBoxLayout(self.relative_frame)
self.relative_frame_layout.setSpacing(3)
self.relative_frame_layout.setContentsMargins(0, 4, 4, 4)
self.relative_frame_layout.setObjectName("relative_frame_layout")
self.gridLayout.addWidget(self.relative_frame, 1, 0, 1, 1)
self.relative_label = QtWidgets.QLabel(self.onionSkinFrames_grp)
self.relative_label.setObjectName("relative_label")
self.gridLayout.addWidget(self.relative_label, 0, 0, 1, 1)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setContentsMargins(5, -1, 5, -1)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_4 = QtWidgets.QLabel(self.onionSkinFrames_grp)
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.horizontalLayout_5.addWidget(self.label_4)
self.relative_keyframes_chkbx = QtWidgets.QCheckBox(self.onionSkinFrames_grp)
self.relative_keyframes_chkbx.setText("")
self.relative_keyframes_chkbx.setChecked(True)
self.relative_keyframes_chkbx.setObjectName("relative_keyframes_chkbx")
self.horizontalLayout_5.addWidget(self.relative_keyframes_chkbx)
self.gridLayout.addLayout(self.horizontalLayout_5, 2, 0, 1, 1)
self.absolute_label = QtWidgets.QLabel(self.onionSkinFrames_grp)
self.absolute_label.setObjectName("absolute_label")
self.gridLayout.addWidget(self.absolute_label, 0, 1, 1, 1)
self.absolute_frame = QtWidgets.QFrame(self.onionSkinFrames_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.absolute_frame.sizePolicy().hasHeightForWidth())
self.absolute_frame.setSizePolicy(sizePolicy)
self.absolute_frame.setMinimumSize(QtCore.QSize(200, 0))
self.absolute_frame.setMaximumSize(QtCore.QSize(10000, 16777215))
self.absolute_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.absolute_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.absolute_frame.setObjectName("absolute_frame")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.absolute_frame)
self.verticalLayout_2.setSpacing(3)
self.verticalLayout_2.setContentsMargins(4, 4, 4, 4)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.absolute_list = QtWidgets.QListWidget(self.absolute_frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.absolute_list.sizePolicy().hasHeightForWidth())
self.absolute_list.setSizePolicy(sizePolicy)
self.absolute_list.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.absolute_list.setObjectName("absolute_list")
self.verticalLayout_2.addWidget(self.absolute_list)
self.gridLayout.addWidget(self.absolute_frame, 1, 1, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.absolute_add_btn = QtWidgets.QPushButton(self.onionSkinFrames_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.absolute_add_btn.sizePolicy().hasHeightForWidth())
self.absolute_add_btn.setSizePolicy(sizePolicy)
self.absolute_add_btn.setObjectName("absolute_add_btn")
self.horizontalLayout_3.addWidget(self.absolute_add_btn)
self.absolute_add_spinBox = QtWidgets.QSpinBox(self.onionSkinFrames_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.absolute_add_spinBox.sizePolicy().hasHeightForWidth())
self.absolute_add_spinBox.setSizePolicy(sizePolicy)
self.absolute_add_spinBox.setMinimum(-100000)
self.absolute_add_spinBox.setMaximum(100000)
self.absolute_add_spinBox.setObjectName("absolute_add_spinBox")
self.horizontalLayout_3.addWidget(self.absolute_add_spinBox)
self.gridLayout.addLayout(self.horizontalLayout_3, 2, 1, 1, 1)
self.absolute_add_layout = QtWidgets.QHBoxLayout()
self.absolute_add_layout.setObjectName("absolute_add_layout")
self.absolute_addCrnt_btn = QtWidgets.QPushButton(self.onionSkinFrames_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.absolute_addCrnt_btn.sizePolicy().hasHeightForWidth())
self.absolute_addCrnt_btn.setSizePolicy(sizePolicy)
self.absolute_addCrnt_btn.setMinimumSize(QtCore.QSize(0, 0))
self.absolute_addCrnt_btn.setObjectName("absolute_addCrnt_btn")
self.absolute_add_layout.addWidget(self.absolute_addCrnt_btn)
self.absolute_clear_btn = QtWidgets.QPushButton(self.onionSkinFrames_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.absolute_clear_btn.sizePolicy().hasHeightForWidth())
self.absolute_clear_btn.setSizePolicy(sizePolicy)
self.absolute_clear_btn.setObjectName("absolute_clear_btn")
self.absolute_add_layout.addWidget(self.absolute_clear_btn)
self.gridLayout.addLayout(self.absolute_add_layout, 3, 1, 1, 1)
self.verticalLayout_4.addLayout(self.gridLayout)
self.verticalLayout_6.addWidget(self.onionSkinFrames_grp)
self.onionSkinSettings_grp = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.onionSkinSettings_grp.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.onionSkinSettings_grp.setFlat(True)
self.onionSkinSettings_grp.setCheckable(True)
self.onionSkinSettings_grp.setObjectName("onionSkinSettings_grp")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.onionSkinSettings_grp)
self.verticalLayout_5.setContentsMargins(9, -1, -1, -1)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.label = QtWidgets.QLabel(self.onionSkinSettings_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QtCore.QSize(90, 20))
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 4, 0, 1, 1)
self.relative_tint_strength_slider = QtWidgets.QSlider(self.onionSkinSettings_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.relative_tint_strength_slider.sizePolicy().hasHeightForWidth())
self.relative_tint_strength_slider.setSizePolicy(sizePolicy)
self.relative_tint_strength_slider.setMinimumSize(QtCore.QSize(200, 0))
self.relative_tint_strength_slider.setStyleSheet("QSlider{\n"
"border: 1px solid rgb(20, 20, 20);\n"
"margin: 2px;\n"
"background: rgb(150, 150, 150);\n"
"height: 15px;\n"
"}\n"
"QSlider::handle{\n"
"height: 4px;\n"
"background: rgb(50, 50, 50);\n"
"border: 1px solid rgb(20, 20, 20);\n"
"margin: -2px -2px;\n"
"}\n"
"QSlider::groove{\n"
"background: grey;\n"
"}\n"
"QSlider::sub-page{\n"
"background: rgb(75, 75, 75);\n"
"}\n"
"QSlider::add-page{\n"
"background: rgb(150, 150, 150);\n"
"}")
self.relative_tint_strength_slider.setMaximum(100)
self.relative_tint_strength_slider.setProperty("value", 100)
self.relative_tint_strength_slider.setOrientation(QtCore.Qt.Horizontal)
self.relative_tint_strength_slider.setObjectName("relative_tint_strength_slider")
self.gridLayout_2.addWidget(self.relative_tint_strength_slider, 3, 1, 1, 1)
self.globalOpacity_slider = QtWidgets.QSlider(self.onionSkinSettings_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.globalOpacity_slider.sizePolicy().hasHeightForWidth())
self.globalOpacity_slider.setSizePolicy(sizePolicy)
self.globalOpacity_slider.setMinimumSize(QtCore.QSize(200, 0))
self.globalOpacity_slider.setStyleSheet("QSlider{\n"
"border: 1px solid rgb(20, 20, 20);\n"
"margin: 2px;\n"
"background: rgb(150, 150, 150);\n"
"height: 15px;\n"
"}\n"
"QSlider::handle{\n"
"height: 4px;\n"
"background: rgb(50, 50, 50);\n"
"border: 1px solid rgb(20, 20, 20);\n"
"margin: -2px -2px;\n"
"}\n"
"QSlider::groove{\n"
"background: grey;\n"
"}\n"
"QSlider::sub-page{\n"
"background: rgb(75, 75, 75);\n"
"}\n"
"QSlider::add-page{\n"
"background: rgb(150, 150, 150);\n"
"}")
self.globalOpacity_slider.setMaximum(100)
self.globalOpacity_slider.setProperty("value", 100)
self.globalOpacity_slider.setOrientation(QtCore.Qt.Horizontal)
self.globalOpacity_slider.setObjectName("globalOpacity_slider")
self.gridLayout_2.addWidget(self.globalOpacity_slider, 2, 1, 1, 1)
self.globalOpacity_label = QtWidgets.QLabel(self.onionSkinSettings_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.globalOpacity_label.sizePolicy().hasHeightForWidth())
self.globalOpacity_label.setSizePolicy(sizePolicy)
self.globalOpacity_label.setMinimumSize(QtCore.QSize(90, 20))
self.globalOpacity_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.globalOpacity_label.setObjectName("globalOpacity_label")
self.gridLayout_2.addWidget(self.globalOpacity_label, 2, 0, 1, 1)
self.onionType_cBox = QtWidgets.QComboBox(self.onionSkinSettings_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.onionType_cBox.sizePolicy().hasHeightForWidth())
self.onionType_cBox.setSizePolicy(sizePolicy)
self.onionType_cBox.setMinimumSize(QtCore.QSize(80, 0))
self.onionType_cBox.setObjectName("onionType_cBox")
self.onionType_cBox.addItem("")
self.onionType_cBox.addItem("")
self.onionType_cBox.addItem("")
self.gridLayout_2.addWidget(self.onionType_cBox, 0, 1, 1, 1)
self.relative_tint_strength_label = QtWidgets.QLabel(self.onionSkinSettings_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.relative_tint_strength_label.sizePolicy().hasHeightForWidth())
self.relative_tint_strength_label.setSizePolicy(sizePolicy)
self.relative_tint_strength_label.setMinimumSize(QtCore.QSize(90, 20))
self.relative_tint_strength_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.relative_tint_strength_label.setObjectName("relative_tint_strength_label")
self.gridLayout_2.addWidget(self.relative_tint_strength_label, 3, 0, 1, 1)
self.tint_type_cBox = QtWidgets.QComboBox(self.onionSkinSettings_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tint_type_cBox.sizePolicy().hasHeightForWidth())
self.tint_type_cBox.setSizePolicy(sizePolicy)
self.tint_type_cBox.setMinimumSize(QtCore.QSize(80, 0))
self.tint_type_cBox.setObjectName("tint_type_cBox")
self.tint_type_cBox.addItem("")
self.tint_type_cBox.addItem("")
self.tint_type_cBox.addItem("")
self.gridLayout_2.addWidget(self.tint_type_cBox, 4, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.onionSkinSettings_grp)
self.label_5.setMinimumSize(QtCore.QSize(0, 20))
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 1, 0, 1, 1)
self.onionType_label = QtWidgets.QLabel(self.onionSkinSettings_grp)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.onionType_label.sizePolicy().hasHeightForWidth())
self.onionType_label.setSizePolicy(sizePolicy)
self.onionType_label.setMinimumSize(QtCore.QSize(90, 20))
self.onionType_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.onionType_label.setObjectName("onionType_label")
self.gridLayout_2.addWidget(self.onionType_label, 0, 0, 1, 1)
self.drawBehind_chkBx = QtWidgets.QCheckBox(self.onionSkinSettings_grp)
self.drawBehind_chkBx.setText("")
self.drawBehind_chkBx.setChecked(True)
self.drawBehind_chkBx.setObjectName("drawBehind_chkBx")
self.gridLayout_2.addWidget(self.drawBehind_chkBx, 1, 1, 1, 1)
self.constant_col_widget = QtWidgets.QWidget(self.onionSkinSettings_grp)
self.constant_col_widget.setObjectName("constant_col_widget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.constant_col_widget)
self.horizontalLayout_2.setContentsMargins(1, 1, 1, 1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.relative_pastTint_btn = QtWidgets.QPushButton(self.constant_col_widget)
self.relative_pastTint_btn.setStyleSheet("background-color:rgb(255, 26, 75)")
self.relative_pastTint_btn.setObjectName("relative_pastTint_btn")
self.horizontalLayout_2.addWidget(self.relative_pastTint_btn)
self.relative_futureTint_btn = QtWidgets.QPushButton(self.constant_col_widget)
self.relative_futureTint_btn.setStyleSheet("background-color: rgb(20, 255, 114)")
self.relative_futureTint_btn.setObjectName("relative_futureTint_btn")
self.horizontalLayout_2.addWidget(self.relative_futureTint_btn)
self.absolute_tint_btn = QtWidgets.QPushButton(self.constant_col_widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.absolute_tint_btn.sizePolicy().hasHeightForWidth())
self.absolute_tint_btn.setSizePolicy(sizePolicy)
self.absolute_tint_btn.setMinimumSize(QtCore.QSize(0, 0))
self.absolute_tint_btn.setStyleSheet("background:rgb(200, 200, 50)")
self.absolute_tint_btn.setObjectName("absolute_tint_btn")
self.horizontalLayout_2.addWidget(self.absolute_tint_btn)
self.gridLayout_2.addWidget(self.constant_col_widget, 5, 1, 1, 1)
self.verticalLayout_5.addLayout(self.gridLayout_2)
self.verticalLayout_6.addWidget(self.onionSkinSettings_grp)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem2)
self.main_scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.main_scrollArea)
self.verticalLayout_3.addWidget(self.onionSkins_grp)
self.toggleRenderer_btn = QtWidgets.QPushButton(self.onionSkinRenderer_mainLayout)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.toggleRenderer_btn.sizePolicy().hasHeightForWidth())
self.toggleRenderer_btn.setSizePolicy(sizePolicy)
self.toggleRenderer_btn.setMinimumSize(QtCore.QSize(40, 30))
self.toggleRenderer_btn.setObjectName("toggleRenderer_btn")
self.verticalLayout_3.addWidget(self.toggleRenderer_btn)
onionSkinRenderer.setCentralWidget(self.onionSkinRenderer_mainLayout)
self.onionSkinRenderer_menubar = QtWidgets.QMenuBar(onionSkinRenderer)
self.onionSkinRenderer_menubar.setGeometry(QtCore.QRect(0, 0, 488, 21))
self.onionSkinRenderer_menubar.setObjectName("onionSkinRenderer_menubar")
self.menubar_settings = QtWidgets.QMenu(self.onionSkinRenderer_menubar)
self.menubar_settings.setObjectName("menubar_settings")
onionSkinRenderer.setMenuBar(self.onionSkinRenderer_menubar)
self.statusbar = QtWidgets.QStatusBar(onionSkinRenderer)
self.statusbar.setObjectName("statusbar")
onionSkinRenderer.setStatusBar(self.statusbar)
self.settings_clearBuffer = QtWidgets.QAction(onionSkinRenderer)
self.settings_clearBuffer.setCheckable(False)
self.settings_clearBuffer.setObjectName("settings_clearBuffer")
self.settings_autoClearBuffer = QtWidgets.QAction(onionSkinRenderer)
self.settings_autoClearBuffer.setCheckable(True)
self.settings_autoClearBuffer.setChecked(True)
self.settings_autoClearBuffer.setObjectName("settings_autoClearBuffer")
self.settings_preferences = QtWidgets.QAction(onionSkinRenderer)
self.settings_preferences.setObjectName("settings_preferences")
self.settings_saveSettings = QtWidgets.QAction(onionSkinRenderer)
self.settings_saveSettings.setObjectName("settings_saveSettings")
self.menubar_settings.addAction(self.settings_clearBuffer)
self.menubar_settings.addAction(self.settings_autoClearBuffer)
self.menubar_settings.addAction(self.settings_preferences)
self.menubar_settings.addAction(self.settings_saveSettings)
self.onionSkinRenderer_menubar.addAction(self.menubar_settings.menuAction())
self.retranslateUi(onionSkinRenderer)
self.onionType_cBox.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(onionSkinRenderer)
def retranslateUi(self, onionSkinRenderer):
onionSkinRenderer.setWindowTitle(QtWidgets.QApplication.translate("onionSkinRenderer", "OnionSkinRenderer", None, -1))
self.targetObjects_grp.setTitle(QtWidgets.QApplication.translate("onionSkinRenderer", "Onion Skin Objects", None, -1))
self.targetObjects_add_btn.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Add Selected", None, -1))
self.targetObjects_remove_btn.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Remove Selected", None, -1))
self.targetObjects_clear_btn.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Clear", None, -1))
self.onionSkinFrames_grp.setTitle(QtWidgets.QApplication.translate("onionSkinRenderer", "Onion Skin Frames", None, -1))
self.relative_step_label.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Relative Step", None, -1))
self.relative_label.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Relative", None, -1))
self.label_4.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Keyframes", None, -1))
self.absolute_label.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Absolute", None, -1))
self.absolute_add_btn.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Add Specific", None, -1))
self.absolute_addCrnt_btn.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Add Current", None, -1))
self.absolute_clear_btn.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "ClearAll", None, -1))
self.onionSkinSettings_grp.setTitle(QtWidgets.QApplication.translate("onionSkinRenderer", "Onion Skin Settings", None, -1))
self.label.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Tint Type", None, -1))
self.globalOpacity_label.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Global Opacity", None, -1))
self.onionType_cBox.setItemText(0, QtWidgets.QApplication.translate("onionSkinRenderer", "Shaded", None, -1))
self.onionType_cBox.setItemText(1, QtWidgets.QApplication.translate("onionSkinRenderer", "Shape", None, -1))
self.onionType_cBox.setItemText(2, QtWidgets.QApplication.translate("onionSkinRenderer", "Outline", None, -1))
self.relative_tint_strength_label.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Tint Strength", None, -1))
self.tint_type_cBox.setItemText(0, QtWidgets.QApplication.translate("onionSkinRenderer", "Constant", None, -1))
self.tint_type_cBox.setItemText(1, QtWidgets.QApplication.translate("onionSkinRenderer", "Relative Random", None, -1))
self.tint_type_cBox.setItemText(2, QtWidgets.QApplication.translate("onionSkinRenderer", "Static Random", None, -1))
self.label_5.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Draw Behind", None, -1))
self.onionType_label.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Onion Skin Type", None, -1))
self.relative_pastTint_btn.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Past", None, -1))
self.relative_futureTint_btn.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Future", None, -1))
self.absolute_tint_btn.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Absolute", None, -1))
self.toggleRenderer_btn.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Toggle Renderer", None, -1))
self.menubar_settings.setTitle(QtWidgets.QApplication.translate("onionSkinRenderer", "Settings", None, -1))
self.settings_clearBuffer.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Clear Buffer", None, -1))
self.settings_autoClearBuffer.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Auto Clear Buffer", None, -1))
self.settings_preferences.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Preferences", None, -1))
self.settings_saveSettings.setText(QtWidgets.QApplication.translate("onionSkinRenderer", "Save Settings", None, -1))
|
|
# -*- coding: utf-8 -*-
import functools
import sys
from argparse import ArgumentParser
import tensorflow as tf
from pprint import pformat
from tensorflow.contrib.framework import arg_scope, add_arg_scope
import tfsnippet as spt
from tfsnippet.examples.utils import (MLResults,
save_images_collection,
bernoulli_as_pixel,
print_with_title,
bernoulli_flow)
class ExpConfig(spt.Config):
# model parameters
z_dim = 80
x_dim = 784
# training parameters
result_dir = None
write_summary = False
max_epoch = 3000
max_step = None
batch_size = 128
l2_reg = 0.0001
initial_lr = 0.001
lr_anneal_factor = 0.5
lr_anneal_epoch_freq = 300
lr_anneal_step_freq = None
# evaluation parameters
test_n_z = 500
test_batch_size = 128
config = ExpConfig()
@spt.global_reuse
@add_arg_scope
def q_net(x, observed=None, n_z=None):
net = spt.BayesianNet(observed=observed)
# compute the hidden features
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_x = tf.to_float(x)
h_x = spt.layers.dense(h_x, 500)
h_x = spt.layers.dense(h_x, 500)
# sample z ~ q(z|x)
z_logits = spt.layers.dense(h_x, config.z_dim, name='z_logits')
z = net.add('z', spt.Bernoulli(logits=z_logits), n_samples=n_z,
group_ndims=1)
return net
@spt.global_reuse
@add_arg_scope
def p_net(observed=None, n_z=None):
net = spt.BayesianNet(observed=observed)
# sample z ~ p(z)
z = net.add('z', spt.Bernoulli(tf.zeros([1, config.z_dim])),
group_ndims=1, n_samples=n_z)
# compute the hidden features
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_z = tf.to_float(z)
h_z = spt.layers.dense(h_z, 500)
h_z = spt.layers.dense(h_z, 500)
# sample x ~ p(x|z)
x_logits = spt.layers.dense(h_z, config.x_dim, name='x_logits')
x = net.add('x', spt.Bernoulli(logits=x_logits), group_ndims=1)
return net
@spt.global_reuse
def baseline_net(x):
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_x = tf.to_float(x)
h_x = spt.layers.dense(h_x, 500)
return tf.squeeze(spt.layers.dense(h_x, 1), -1)
def main():
# parse the arguments
arg_parser = ArgumentParser()
spt.register_config_arguments(config, arg_parser, title='Model options')
spt.register_config_arguments(spt.settings, arg_parser, prefix='tfsnippet',
title='TFSnippet options')
arg_parser.parse_args(sys.argv[1:])
# print the config
print_with_title('Configurations', pformat(config.to_dict()), after='\n')
# open the result object and prepare for result directories
results = MLResults(config.result_dir)
results.save_config(config) # save experiment settings for review
results.make_dirs('plotting', exist_ok=True)
results.make_dirs('train_summary', exist_ok=True)
# input placeholders
input_x = tf.placeholder(
dtype=tf.int32, shape=(None, config.x_dim), name='input_x')
learning_rate = spt.AnnealingVariable(
'learning_rate', config.initial_lr, config.lr_anneal_factor)
# derive the loss and lower-bound for training
with tf.name_scope('training'):
train_q_net = q_net(input_x)
train_chain = train_q_net.chain(p_net, observed={'x': input_x})
baseline = baseline_net(input_x)
nvil_loss = tf.reduce_mean(
train_chain.vi.training.nvil(baseline=baseline))
loss = tf.losses.get_regularization_loss() + nvil_loss
# derive the nll and logits output for testing
with tf.name_scope('testing'):
test_q_net = q_net(input_x, n_z=config.test_n_z)
test_chain = test_q_net.chain(
p_net, latent_axis=0, observed={'x': input_x})
test_nll = -tf.reduce_mean(
test_chain.vi.evaluation.is_loglikelihood())
test_lb = tf.reduce_mean(test_chain.vi.lower_bound.elbo())
# derive the optimizer
with tf.name_scope('optimizing'):
optimizer = tf.train.AdamOptimizer(learning_rate)
params = tf.trainable_variables()
grads = optimizer.compute_gradients(loss, var_list=params)
with tf.control_dependencies(
tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.apply_gradients(grads)
# derive the plotting function
with tf.name_scope('plotting'):
plot_p_net = p_net(n_z=100)
x_plots = tf.reshape(bernoulli_as_pixel(plot_p_net['x']), (-1, 28, 28))
def plot_samples(loop):
with loop.timeit('plot_time'):
session = spt.utils.get_default_session_or_error()
images = session.run(x_plots)
save_images_collection(
images=images,
filename='plotting/{}.png'.format(loop.epoch),
grid_size=(10, 10),
results=results
)
# prepare for training and testing data
(x_train, y_train), (x_test, y_test) = \
spt.datasets.load_mnist(x_shape=[784])
train_flow = bernoulli_flow(
x_train, config.batch_size, shuffle=True, skip_incomplete=True)
test_flow = bernoulli_flow(
x_test, config.test_batch_size, sample_now=True)
with spt.utils.create_session().as_default():
# train the network
with spt.TrainLoop(params,
max_epoch=config.max_epoch,
max_step=config.max_step,
summary_dir=(results.system_path('train_summary')
if config.write_summary else None),
summary_graph=tf.get_default_graph(),
early_stopping=False) as loop:
trainer = spt.Trainer(
loop, train_op, [input_x], train_flow,
metrics={'loss': loss},
summaries=tf.summary.merge_all(spt.GraphKeys.AUTO_HISTOGRAM)
)
trainer.anneal_after(
learning_rate,
epochs=config.lr_anneal_epoch_freq,
steps=config.lr_anneal_step_freq
)
evaluator = spt.Evaluator(
loop,
metrics={'test_nll': test_nll, 'test_lb': test_lb},
inputs=[input_x],
data_flow=test_flow,
time_metric_name='test_time'
)
evaluator.events.on(
spt.EventKeys.AFTER_EXECUTION,
lambda e: results.update_metrics(evaluator.last_metrics_dict)
)
trainer.evaluate_after_epochs(evaluator, freq=10)
trainer.evaluate_after_epochs(
functools.partial(plot_samples, loop), freq=10)
trainer.log_after_epochs(freq=1)
trainer.run()
# print the final metrics and close the results object
print_with_title('Results', results.format_metrics(), before='\n')
results.close()
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""POSIX timestamp implementation."""
import decimal
from dfdatetime import definitions
from dfdatetime import factory
from dfdatetime import interface
class PosixTimeEpoch(interface.DateTimeEpoch):
"""POSIX time epoch."""
def __init__(self):
"""Initializes a POSIX time epoch."""
super(PosixTimeEpoch, self).__init__(1970, 1, 1)
class PosixTime(interface.DateTimeValues):
"""POSIX timestamp.
The POSIX timestamp is a signed integer that contains the number of
seconds since 1970-01-01 00:00:00 (also known as the POSIX epoch).
Negative values represent date and times predating the POSIX epoch.
The POSIX timestamp was initially 32-bit though 64-bit variants
are known to be used.
Attributes:
is_local_time (bool): True if the date and time value is in local time.
"""
_EPOCH = PosixTimeEpoch()
def __init__(self, time_zone_offset=None, timestamp=None):
"""Initializes a POSIX timestamp.
Args:
time_zone_offset (Optional[int]): time zone offset in number of minutes
from UTC or None if not set.
timestamp (Optional[int]): POSIX timestamp.
"""
super(PosixTime, self).__init__(time_zone_offset=time_zone_offset)
self._precision = definitions.PRECISION_1_SECOND
self._timestamp = timestamp
@property
def timestamp(self):
"""int: POSIX timestamp or None if not set."""
return self._timestamp
def _GetNormalizedTimestamp(self):
"""Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined.
"""
if self._normalized_timestamp is None:
if self._timestamp is not None:
self._normalized_timestamp = decimal.Decimal(self._timestamp)
if self._time_zone_offset:
self._normalized_timestamp -= self._time_zone_offset * 60
return self._normalized_timestamp
def CopyFromDateTimeString(self, time_string):
"""Copies a POSIX timestamp from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
fraction and time zone offset are optional. The default time zone
is UTC.
"""
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minutes = date_time_values.get('minutes', 0)
seconds = date_time_values.get('seconds', 0)
time_zone_offset = date_time_values.get('time_zone_offset', 0)
self._timestamp = self._GetNumberOfSecondsFromElements(
year, month, day_of_month, hours, minutes, seconds)
self._time_zone_offset = time_zone_offset
def CopyToDateTimeString(self):
"""Copies the POSIX timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss" or None
if the timestamp is missing.
"""
if self._timestamp is None:
return None
number_of_days, hours, minutes, seconds = self._GetTimeValues(
self._timestamp)
year, month, day_of_month = self._GetDateValuesWithEpoch(
number_of_days, self._EPOCH)
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'.format(
year, month, day_of_month, hours, minutes, seconds)
class PosixTimeInMilliseconds(interface.DateTimeValues):
"""POSIX timestamp in milliseconds.
Variant of the POSIX timestamp in milliseconds.
Attributes:
is_local_time (bool): True if the date and time value is in local time.
"""
_EPOCH = PosixTimeEpoch()
def __init__(self, time_zone_offset=None, timestamp=None):
"""Initializes a POSIX timestamp in milliseconds.
Args:
time_zone_offset (Optional[int]): time zone offset in number of minutes
from UTC or None if not set.
timestamp (Optional[int]): POSIX timestamp in milliseconds.
"""
super(PosixTimeInMilliseconds, self).__init__(
time_zone_offset=time_zone_offset)
self._precision = definitions.PRECISION_1_MILLISECOND
self._timestamp = timestamp
@property
def timestamp(self):
"""int: POSIX timestamp in milliseconds or None if not set."""
return self._timestamp
def _GetNormalizedTimestamp(self):
"""Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined.
"""
if self._normalized_timestamp is None:
if self._timestamp is not None:
self._normalized_timestamp = (
decimal.Decimal(self._timestamp) /
definitions.MILLISECONDS_PER_SECOND)
if self._time_zone_offset:
self._normalized_timestamp -= self._time_zone_offset * 60
return self._normalized_timestamp
def CopyFromDateTimeString(self, time_string):
"""Copies a POSIX timestamp from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
fraction and time zone offset are optional. The default time zone
is UTC.
"""
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minutes = date_time_values.get('minutes', 0)
seconds = date_time_values.get('seconds', 0)
microseconds = date_time_values.get('microseconds', 0)
time_zone_offset = date_time_values.get('time_zone_offset', 0)
timestamp = self._GetNumberOfSecondsFromElements(
year, month, day_of_month, hours, minutes, seconds)
timestamp *= definitions.MILLISECONDS_PER_SECOND
if microseconds:
milliseconds, _ = divmod(
microseconds, definitions.MILLISECONDS_PER_SECOND)
timestamp += milliseconds
self._timestamp = timestamp
self._time_zone_offset = time_zone_offset
def CopyToDateTimeString(self):
"""Copies the POSIX timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.######" or
None if the timestamp is missing.
"""
if self._timestamp is None:
return None
timestamp, milliseconds = divmod(
self._timestamp, definitions.MILLISECONDS_PER_SECOND)
number_of_days, hours, minutes, seconds = self._GetTimeValues(timestamp)
year, month, day_of_month = self._GetDateValuesWithEpoch(
number_of_days, self._EPOCH)
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:03d}'.format(
year, month, day_of_month, hours, minutes, seconds, milliseconds)
class PosixTimeInMicroseconds(interface.DateTimeValues):
"""POSIX timestamp in microseconds.
Variant of the POSIX timestamp in microseconds.
Attributes:
is_local_time (bool): True if the date and time value is in local time.
"""
_EPOCH = PosixTimeEpoch()
def __init__(self, time_zone_offset=None, timestamp=None):
"""Initializes a POSIX timestamp in microseconds.
Args:
time_zone_offset (Optional[int]): time zone offset in number of minutes
from UTC or None if not set.
timestamp (Optional[int]): POSIX timestamp in microseconds.
"""
super(PosixTimeInMicroseconds, self).__init__(
time_zone_offset=time_zone_offset)
self._precision = definitions.PRECISION_1_MICROSECOND
self._timestamp = timestamp
@property
def timestamp(self):
"""int: POSIX timestamp in microseconds or None if not set."""
return self._timestamp
def _GetNormalizedTimestamp(self):
"""Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined.
"""
if self._normalized_timestamp is None:
if self._timestamp is not None:
self._normalized_timestamp = (
decimal.Decimal(self._timestamp) /
definitions.MICROSECONDS_PER_SECOND)
if self._time_zone_offset:
self._normalized_timestamp -= self._time_zone_offset * 60
return self._normalized_timestamp
def CopyFromDateTimeString(self, time_string):
"""Copies a POSIX timestamp from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
fraction and time zone offset are optional. The default time zone
is UTC.
"""
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minutes = date_time_values.get('minutes', 0)
seconds = date_time_values.get('seconds', 0)
microseconds = date_time_values.get('microseconds', 0)
time_zone_offset = date_time_values.get('time_zone_offset', 0)
timestamp = self._GetNumberOfSecondsFromElements(
year, month, day_of_month, hours, minutes, seconds)
timestamp *= definitions.MICROSECONDS_PER_SECOND
timestamp += microseconds
self._timestamp = timestamp
self._time_zone_offset = time_zone_offset
def CopyToDateTimeString(self):
"""Copies the POSIX timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.######" or
None if the timestamp is missing.
"""
if self._timestamp is None:
return None
timestamp, microseconds = divmod(
self._timestamp, definitions.MICROSECONDS_PER_SECOND)
number_of_days, hours, minutes, seconds = self._GetTimeValues(timestamp)
year, month, day_of_month = self._GetDateValuesWithEpoch(
number_of_days, self._EPOCH)
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:06d}'.format(
year, month, day_of_month, hours, minutes, seconds, microseconds)
class PosixTimeInNanoseconds(interface.DateTimeValues):
"""POSIX timestamp in nanoseconds.
Variant of the POSIX timestamp in nanoseconds.
Attributes:
is_local_time (bool): True if the date and time value is in local time.
"""
_EPOCH = PosixTimeEpoch()
def __init__(self, time_zone_offset=None, timestamp=None):
"""Initializes a POSIX timestamp in nanoseconds.
Args:
time_zone_offset (Optional[int]): time zone offset in number of minutes
from UTC or None if not set.
timestamp (Optional[int]): POSIX timestamp in nanoseconds.
"""
super(PosixTimeInNanoseconds, self).__init__(
time_zone_offset=time_zone_offset)
self._precision = definitions.PRECISION_1_NANOSECOND
self._timestamp = timestamp
@property
def timestamp(self):
"""int: POSIX timestamp or None if not set."""
return self._timestamp
def _GetNormalizedTimestamp(self):
"""Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined.
"""
if self._normalized_timestamp is None:
if self._timestamp is not None:
self._normalized_timestamp = (
decimal.Decimal(self._timestamp) /
definitions.NANOSECONDS_PER_SECOND)
if self._time_zone_offset:
self._normalized_timestamp -= self._time_zone_offset * 60
return self._normalized_timestamp
def _CopyFromDateTimeString(self, time_string):
"""Copies a POSIX timestamp from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
fraction and time zone offset are optional. The default time zone
is UTC.
"""
date_time_values = self._CopyDateTimeFromString(time_string)
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minutes = date_time_values.get('minutes', 0)
seconds = date_time_values.get('seconds', 0)
microseconds = date_time_values.get('microseconds', None)
time_zone_offset = date_time_values.get('time_zone_offset', 0)
timestamp = self._GetNumberOfSecondsFromElements(
year, month, day_of_month, hours, minutes, seconds)
timestamp *= definitions.NANOSECONDS_PER_SECOND
if microseconds:
nanoseconds = microseconds * definitions.MILLISECONDS_PER_SECOND
timestamp += nanoseconds
self._normalized_timestamp = None
self._timestamp = timestamp
self._time_zone_offset = time_zone_offset
def CopyFromDateTimeString(self, time_string):
"""Copies a POSIX timestamp from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
fraction and time zone offset are optional. The default time zone
is UTC.
"""
self._CopyFromDateTimeString(time_string)
def _CopyToDateTimeString(self):
"""Copies the POSIX timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#########" or
None if the timestamp is missing or invalid.
"""
if self._timestamp is None:
return None
timestamp, nanoseconds = divmod(
self._timestamp, definitions.NANOSECONDS_PER_SECOND)
number_of_days, hours, minutes, seconds = self._GetTimeValues(timestamp)
year, month, day_of_month = self._GetDateValuesWithEpoch(
number_of_days, self._EPOCH)
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:09d}'.format(
year, month, day_of_month, hours, minutes, seconds, nanoseconds)
def CopyToDateTimeString(self):
"""Copies the POSIX timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#########" or
None if the timestamp is missing or invalid.
"""
return self._CopyToDateTimeString()
factory.Factory.RegisterDateTimeValues(PosixTime)
factory.Factory.RegisterDateTimeValues(PosixTimeInMilliseconds)
factory.Factory.RegisterDateTimeValues(PosixTimeInMicroseconds)
factory.Factory.RegisterDateTimeValues(PosixTimeInNanoseconds)
|
|
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descriptor wallet function."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error
)
class WalletDescriptorTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-keypool=100']]
self.wallet_names = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_sqlite()
def run_test(self):
if self.is_bdb_compiled():
# Make a legacy wallet and check it is BDB
self.nodes[0].createwallet(wallet_name="legacy1", descriptors=False)
wallet_info = self.nodes[0].getwalletinfo()
assert_equal(wallet_info['format'], 'bdb')
self.nodes[0].unloadwallet("legacy1")
else:
self.log.warning("Skipping BDB test")
# Make a descriptor wallet
self.log.info("Making a descriptor wallet")
self.nodes[0].createwallet(wallet_name="desc1", descriptors=True)
# A descriptor wallet should have 100 addresses * 3 types = 300 keys
self.log.info("Checking wallet info")
wallet_info = self.nodes[0].getwalletinfo()
assert_equal(wallet_info['format'], 'sqlite')
assert_equal(wallet_info['keypoolsize'], 300)
assert_equal(wallet_info['keypoolsize_hd_internal'], 300)
assert 'keypoololdest' not in wallet_info
# Check that getnewaddress works
self.log.info("Test that getnewaddress and getrawchangeaddress work")
addr = self.nodes[0].getnewaddress("", "legacy")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('pkh(')
assert_equal(addr_info['hdkeypath'], 'm/44\'/1\'/0\'/0/0')
addr = self.nodes[0].getnewaddress("", "p2sh-segwit")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('sh(wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/49\'/1\'/0\'/0/0')
addr = self.nodes[0].getnewaddress("", "bech32")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/84\'/1\'/0\'/0/0')
# Check that getrawchangeaddress works
addr = self.nodes[0].getrawchangeaddress("legacy")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('pkh(')
assert_equal(addr_info['hdkeypath'], 'm/44\'/1\'/0\'/1/0')
addr = self.nodes[0].getrawchangeaddress("p2sh-segwit")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('sh(wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/49\'/1\'/0\'/1/0')
addr = self.nodes[0].getrawchangeaddress("bech32")
addr_info = self.nodes[0].getaddressinfo(addr)
assert addr_info['desc'].startswith('wpkh(')
assert_equal(addr_info['hdkeypath'], 'm/84\'/1\'/0\'/1/0')
# Make a wallet to receive coins at
self.nodes[0].createwallet(wallet_name="desc2", descriptors=True)
recv_wrpc = self.nodes[0].get_wallet_rpc("desc2")
send_wrpc = self.nodes[0].get_wallet_rpc("desc1")
# Generate some coins
send_wrpc.generatetoaddress(101, send_wrpc.getnewaddress())
# Make transactions
self.log.info("Test sending and receiving")
addr = recv_wrpc.getnewaddress()
send_wrpc.sendtoaddress(addr, 10)
# Make sure things are disabled
self.log.info("Test disabled RPCs")
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importprivkey, "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW")
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importpubkey, send_wrpc.getaddressinfo(send_wrpc.getnewaddress()))
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importaddress, recv_wrpc.getnewaddress())
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importmulti, [])
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.addmultisigaddress, 1, [recv_wrpc.getnewaddress()])
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpprivkey, recv_wrpc.getnewaddress())
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.dumpwallet, 'wallet.dump')
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.importwallet, 'wallet.dump')
assert_raises_rpc_error(-4, "This type of wallet does not support this command", recv_wrpc.rpc.sethdseed)
self.log.info("Test encryption")
# Get the master fingerprint before encrypt
info1 = send_wrpc.getaddressinfo(send_wrpc.getnewaddress())
# Encrypt wallet 0
send_wrpc.encryptwallet('pass')
send_wrpc.walletpassphrase('pass', 10)
addr = send_wrpc.getnewaddress()
info2 = send_wrpc.getaddressinfo(addr)
assert info1['hdmasterfingerprint'] != info2['hdmasterfingerprint']
send_wrpc.walletlock()
assert 'hdmasterfingerprint' in send_wrpc.getaddressinfo(send_wrpc.getnewaddress())
info3 = send_wrpc.getaddressinfo(addr)
assert_equal(info2['desc'], info3['desc'])
self.log.info("Test that getnewaddress still works after keypool is exhausted in an encrypted wallet")
for _ in range(500):
send_wrpc.getnewaddress()
self.log.info("Test that unlock is needed when deriving only hardened keys in an encrypted wallet")
send_wrpc.walletpassphrase('pass', 10)
send_wrpc.importdescriptors([{
"desc": "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/*h)#y4dfsj7n",
"timestamp": "now",
"range": [0,10],
"active": True
}])
send_wrpc.walletlock()
# Exhaust keypool of 100
for _ in range(100):
send_wrpc.getnewaddress(address_type='bech32')
# This should now error
assert_raises_rpc_error(-12, "Keypool ran out, please call keypoolrefill first", send_wrpc.getnewaddress, '', 'bech32')
self.log.info("Test born encrypted wallets")
self.nodes[0].createwallet('desc_enc', False, False, 'pass', False, True)
enc_rpc = self.nodes[0].get_wallet_rpc('desc_enc')
enc_rpc.getnewaddress() # Makes sure that we can get a new address from a born encrypted wallet
self.log.info("Test blank descriptor wallets")
self.nodes[0].createwallet(wallet_name='desc_blank', blank=True, descriptors=True)
blank_rpc = self.nodes[0].get_wallet_rpc('desc_blank')
assert_raises_rpc_error(-4, 'This wallet has no available keys', blank_rpc.getnewaddress)
self.log.info("Test descriptor wallet with disabled private keys")
self.nodes[0].createwallet(wallet_name='desc_no_priv', disable_private_keys=True, descriptors=True)
nopriv_rpc = self.nodes[0].get_wallet_rpc('desc_no_priv')
assert_raises_rpc_error(-4, 'This wallet has no available keys', nopriv_rpc.getnewaddress)
self.log.info("Test descriptor exports")
self.nodes[0].createwallet(wallet_name='desc_export', descriptors=True)
exp_rpc = self.nodes[0].get_wallet_rpc('desc_export')
self.nodes[0].createwallet(wallet_name='desc_import', disable_private_keys=True, descriptors=True)
imp_rpc = self.nodes[0].get_wallet_rpc('desc_import')
addr_types = [('legacy', False, 'pkh(', '44\'/1\'/0\'', -13),
('p2sh-segwit', False, 'sh(wpkh(', '49\'/1\'/0\'', -14),
('bech32', False, 'wpkh(', '84\'/1\'/0\'', -13),
('legacy', True, 'pkh(', '44\'/1\'/0\'', -13),
('p2sh-segwit', True, 'sh(wpkh(', '49\'/1\'/0\'', -14),
('bech32', True, 'wpkh(', '84\'/1\'/0\'', -13)]
for addr_type, internal, desc_prefix, deriv_path, int_idx in addr_types:
int_str = 'internal' if internal else 'external'
self.log.info("Testing descriptor address type for {} {}".format(addr_type, int_str))
if internal:
addr = exp_rpc.getrawchangeaddress(address_type=addr_type)
else:
addr = exp_rpc.getnewaddress(address_type=addr_type)
desc = exp_rpc.getaddressinfo(addr)['parent_desc']
assert_equal(desc_prefix, desc[0:len(desc_prefix)])
idx = desc.index('/') + 1
assert_equal(deriv_path, desc[idx:idx + 9])
if internal:
assert_equal('1', desc[int_idx])
else:
assert_equal('0', desc[int_idx])
self.log.info("Testing the same descriptor is returned for address type {} {}".format(addr_type, int_str))
for i in range(0, 10):
if internal:
addr = exp_rpc.getrawchangeaddress(address_type=addr_type)
else:
addr = exp_rpc.getnewaddress(address_type=addr_type)
test_desc = exp_rpc.getaddressinfo(addr)['parent_desc']
assert_equal(desc, test_desc)
self.log.info("Testing import of exported {} descriptor".format(addr_type))
imp_rpc.importdescriptors([{
'desc': desc,
'active': True,
'next_index': 11,
'timestamp': 'now',
'internal': internal
}])
for i in range(0, 10):
if internal:
exp_addr = exp_rpc.getrawchangeaddress(address_type=addr_type)
imp_addr = imp_rpc.getrawchangeaddress(address_type=addr_type)
else:
exp_addr = exp_rpc.getnewaddress(address_type=addr_type)
imp_addr = imp_rpc.getnewaddress(address_type=addr_type)
assert_equal(exp_addr, imp_addr)
if __name__ == '__main__':
WalletDescriptorTest().main ()
|
|
"""Test stepping over vrs. hitting breakpoints & subsequent stepping in various forms."""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCStepping(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers that we will step to in main:
self.main_source = "main.c"
@add_test_categories(['pyapi', 'basic_process'])
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr17932')
@expectedFailureAll(oslist=["linux"], bugnumber="llvm.org/pr14437")
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24777")
@expectedFailureNetBSD
def test_and_python_api(self):
"""Test stepping over vrs. hitting breakpoints & subsequent stepping in various forms."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec(self.main_source)
breakpoints_to_disable = []
break_1_in_main = target.BreakpointCreateBySourceRegex(
'// frame select 2, thread step-out while stopped at .c.1..',
self.main_source_spec)
self.assertTrue(break_1_in_main, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_1_in_main)
break_in_a = target.BreakpointCreateBySourceRegex(
'// break here to stop in a before calling b', self.main_source_spec)
self.assertTrue(break_in_a, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_in_a)
break_in_b = target.BreakpointCreateBySourceRegex(
'// thread step-out while stopped at .c.2..', self.main_source_spec)
self.assertTrue(break_in_b, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_in_b)
break_in_c = target.BreakpointCreateBySourceRegex(
'// Find the line number of function .c. here.', self.main_source_spec)
self.assertTrue(break_in_c, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_in_c)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_1_in_main)
if len(threads) != 1:
self.fail("Failed to stop at first breakpoint in main.")
thread = threads[0]
# Get the stop id and for fun make sure it increases:
old_stop_id = process.GetStopID()
# Now step over, which should cause us to hit the breakpoint in "a"
thread.StepOver()
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_in_a)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint in a.")
# Check that the stop ID increases:
new_stop_id = process.GetStopID()
self.assertTrue(
new_stop_id > old_stop_id,
"Stop ID increases monotonically.")
thread = threads[0]
# Step over, and we should hit the breakpoint in b:
thread.StepOver()
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_in_b)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint in b.")
thread = threads[0]
# Now try running some function, and make sure that we still end up in the same place
# and with the same stop reason.
frame = thread.GetFrameAtIndex(0)
current_line = frame.GetLineEntry().GetLine()
current_file = frame.GetLineEntry().GetFileSpec()
current_bp = []
current_bp.append(thread.GetStopReasonDataAtIndex(0))
current_bp.append(thread.GetStopReasonDataAtIndex(1))
stop_id_before_expression = process.GetStopID()
stop_id_before_including_expressions = process.GetStopID(True)
frame.EvaluateExpression("(int) printf (print_string)")
frame = thread.GetFrameAtIndex(0)
self.assertTrue(
current_line == frame.GetLineEntry().GetLine(),
"The line stayed the same after expression.")
self.assertTrue(
current_file == frame.GetLineEntry().GetFileSpec(),
"The file stayed the same after expression.")
self.assertTrue(
thread.GetStopReason() == lldb.eStopReasonBreakpoint,
"We still say we stopped for a breakpoint.")
self.assertTrue(thread.GetStopReasonDataAtIndex(0) == current_bp[
0] and thread.GetStopReasonDataAtIndex(1) == current_bp[1], "And it is the same breakpoint.")
# Also make sure running the expression didn't change the public stop id
# but did change if we are asking for expression stops as well.
stop_id_after_expression = process.GetStopID()
stop_id_after_including_expressions = process.GetStopID(True)
self.assertTrue(
stop_id_before_expression == stop_id_after_expression,
"Expression calling doesn't change stop ID")
self.assertTrue(
stop_id_after_including_expressions > stop_id_before_including_expressions,
"Stop ID including expressions increments over expression call.")
# Do the same thing with an expression that's going to crash, and make
# sure we are still unchanged.
frame.EvaluateExpression("((char *) 0)[0] = 'a'")
frame = thread.GetFrameAtIndex(0)
self.assertTrue(
current_line == frame.GetLineEntry().GetLine(),
"The line stayed the same after expression.")
self.assertTrue(
current_file == frame.GetLineEntry().GetFileSpec(),
"The file stayed the same after expression.")
self.assertTrue(
thread.GetStopReason() == lldb.eStopReasonBreakpoint,
"We still say we stopped for a breakpoint.")
self.assertTrue(thread.GetStopReasonDataAtIndex(0) == current_bp[
0] and thread.GetStopReasonDataAtIndex(1) == current_bp[1], "And it is the same breakpoint.")
# Now continue and make sure we just complete the step:
# Disable all our breakpoints first - sometimes the compiler puts two line table entries in for the
# breakpoint a "b" and we don't want to hit that.
for bkpt in breakpoints_to_disable:
bkpt.SetEnabled(False)
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "a")
self.assertTrue(thread.GetStopReason() == lldb.eStopReasonPlanComplete)
# And one more time should get us back to main:
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "main")
self.assertTrue(thread.GetStopReason() == lldb.eStopReasonPlanComplete)
# Now make sure we can call a function, break in the called function,
# then have "continue" get us back out again:
frame = thread.GetFrameAtIndex(0)
frame = thread.GetFrameAtIndex(0)
current_line = frame.GetLineEntry().GetLine()
current_file = frame.GetLineEntry().GetFileSpec()
break_in_b.SetEnabled(True)
options = lldb.SBExpressionOptions()
options.SetIgnoreBreakpoints(False)
options.SetFetchDynamicValue(False)
options.SetUnwindOnError(False)
frame.EvaluateExpression("b (4)", options)
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_in_b)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint in b when calling b.")
thread = threads[0]
# So do a step over here to make sure we can still do that:
thread.StepOver()
# See that we are still in b:
func_name = thread.GetFrameAtIndex(0).GetFunctionName()
self.assertTrue(
func_name == "b",
"Should be in 'b', were in %s" %
(func_name))
# Okay, now if we continue, we will finish off our function call and we
# should end up back in "a" as if nothing had happened:
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(
0).GetLineEntry().GetLine() == current_line)
self.assertTrue(thread.GetFrameAtIndex(
0).GetLineEntry().GetFileSpec() == current_file)
# Now we are going to test step in targeting a function:
break_in_b.SetEnabled(False)
break_before_complex_1 = target.BreakpointCreateBySourceRegex(
'// Stop here to try step in targeting b.', self.main_source_spec)
self.assertTrue(break_before_complex_1, VALID_BREAKPOINT)
break_before_complex_2 = target.BreakpointCreateBySourceRegex(
'// Stop here to try step in targeting complex.', self.main_source_spec)
self.assertTrue(break_before_complex_2, VALID_BREAKPOINT)
break_before_complex_3 = target.BreakpointCreateBySourceRegex(
'// Stop here to step targeting b and hitting breakpoint.', self.main_source_spec)
self.assertTrue(break_before_complex_3, VALID_BREAKPOINT)
break_before_complex_4 = target.BreakpointCreateBySourceRegex(
'// Stop here to make sure bogus target steps over.', self.main_source_spec)
self.assertTrue(break_before_complex_4, VALID_BREAKPOINT)
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_1)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_1.SetEnabled(False)
thread.StepInto("b")
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "b")
# Now continue out and stop at the next call to complex. This time
# step all the way into complex:
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_2)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_2.SetEnabled(False)
thread.StepInto("complex")
self.assertTrue(thread.GetFrameAtIndex(
0).GetFunctionName() == "complex")
# Now continue out and stop at the next call to complex. This time
# enable breakpoints in a and c and then step targeting b:
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_3)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_3.SetEnabled(False)
break_at_start_of_a = target.BreakpointCreateByName('a')
break_at_start_of_c = target.BreakpointCreateByName('c')
thread.StepInto("b")
threads = lldbutil.get_stopped_threads(
process, lldb.eStopReasonBreakpoint)
self.assertTrue(len(threads) == 1)
thread = threads[0]
stop_break_id = thread.GetStopReasonDataAtIndex(0)
self.assertTrue(stop_break_id == break_at_start_of_a.GetID()
or stop_break_id == break_at_start_of_c.GetID())
break_at_start_of_a.SetEnabled(False)
break_at_start_of_c.SetEnabled(False)
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "b")
# Now continue out and stop at the next call to complex. This time
# enable breakpoints in a and c and then step targeting b:
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_4)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_4.SetEnabled(False)
thread.StepInto("NoSuchFunction")
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "main")
|
|
import os
import sys
import time
import math
import numpy as np
import theano
import theano.tensor as T
import theano.tensor.shared_randomstreams
from theano.tensor.signal import downsample
from util import datapy, color, paramgraphics
#from optimization import optimizer
from optimization import optimizer_separated
from layer import FullyConnected, nonlinearity
from layer import GaussianHidden, NoParamsGaussianVisiable,Pegasos
#from layer import ConvMaxPool_GauInit_DNN, UnpoolConvNon_GauInit_DNN
from layer import ConvMaxPool_GauInit_DNN, UnpoolConvNon_GauInit_DNN
def cmmva_6layer_svhn(learning_rate=0.01,
n_epochs=600,
dataset='svhngcn_var',
batch_size=500,
dropout_flag=1,
seed=0,
predir=None,
activation=None,
n_batch=625,
weight_decay=1e-4,
super_predir=None,
super_preepoch=None):
"""
Implementation of convolutional MMVA
"""
'''
svhn
'''
n_channels = 3
colorImg = True
dim_w = 32
dim_h = 32
dim_input=(dim_h, dim_w)
n_classes = 10
D = 1.0
C = 1.0
if os.environ.has_key('C'):
C = np.cast['float32'](float((os.environ['C'])))
if os.environ.has_key('D'):
D = np.cast['float32'](float((os.environ['D'])))
color.printRed('D '+str(D)+' C '+str(C))
first_drop=0.5
if os.environ.has_key('first_drop'):
first_drop = float(os.environ['first_drop'])
last_drop=1
if os.environ.has_key('last_drop'):
last_drop = float(os.environ['last_drop'])
nkerns_1=96
if os.environ.has_key('nkerns_1'):
nkerns_1 = int(os.environ['nkerns_1'])
nkerns_2=96
if os.environ.has_key('nkerns_2'):
nkerns_2 = int(os.environ['nkerns_2'])
n_z=512
if os.environ.has_key('n_z'):
n_z = int(os.environ['n_z'])
opt_med='adam'
if os.environ.has_key('opt_med'):
opt_med = os.environ['opt_med']
train_logvar=True
if os.environ.has_key('train_logvar'):
train_logvar = bool(int(os.environ['train_logvar']))
std = 2e-2
if os.environ.has_key('std'):
std = os.environ['std']
Loss_L = 1
if os.environ.has_key('Loss_L'):
Loss_L = int(os.environ['Loss_L'])
pattern = 'hinge'
if os.environ.has_key('pattern'):
pattern = os.environ['pattern']
#cp->cd->cpd->cd->c
nkerns=[nkerns_1, nkerns_1, nkerns_1, nkerns_2, nkerns_2]
drops=[0, 1, 1, 1, 0, 1]
drop_p=[1, first_drop, first_drop, first_drop, 1, last_drop]
n_hidden=[n_z]
logdir = 'results/supervised/cmmva/svhn/cmmva_6layer_'+dataset+pattern+'_D_'+str(D)+'_C_'+str(C)+'_'#+str(nkerns)+str(n_hidden)+'_'+str(weight_decay)+'_'+str(learning_rate)+'_'
#if predir is not None:
# logdir +='pre_'
#if dropout_flag == 1:
# logdir += ('dropout_'+str(drops)+'_')
# logdir += ('drop_p_'+str(drop_p)+'_')
#logdir += ('trainvar_'+str(train_logvar)+'_')
#logdir += (opt_med+'_')
#logdir += (str(Loss_L)+'_')
#if super_predir is not None:
# logdir += (str(super_preepoch)+'_')
logdir += str(int(time.time()))+'/'
if not os.path.exists(logdir): os.makedirs(logdir)
print 'logdir:', logdir, 'predir', predir
print 'cmmva_6layer_svhn_fix', nkerns, n_hidden, seed, dropout_flag, drops, drop_p
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'logdir:', logdir, 'predir', predir
print >>f, 'cmmva_6layer_svhn_fix', nkerns, n_hidden, seed, dropout_flag, drops, drop_p
color.printRed('dataset '+dataset)
datasets = datapy.load_data_svhn(dataset, have_matrix=True)
train_set_x, train_set_y, train_y_matrix = datasets[0]
test_set_x, test_set_y, test_y_matrix = datasets[1]
valid_set_x, valid_set_y, valid_y_matrix = datasets[2]
#datasets = datapy.load_data_svhn(dataset, have_matrix=False)
#train_set_x, train_set_y = datasets[0]
#test_set_x, test_set_y = datasets[1]
#valid_set_x, valid_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
random_z = T.matrix('random_z')
y_matrix = T.imatrix('y_matrix')
drop = T.iscalar('drop')
activation = nonlinearity.relu
rng = np.random.RandomState(seed)
rng_share = theano.tensor.shared_randomstreams.RandomStreams(0)
input_x = x.reshape((batch_size, n_channels, dim_h, dim_w))
recg_layer = []
cnn_output = []
l = []
d = []
#1
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, n_channels, dim_h, dim_w),
filter_shape=(nkerns[0], n_channels, 5, 5),
poolsize=(2, 2),
border_mode='same',
activation=activation,
std=std
))
if drops[0]==1:
cnn_output.append(recg_layer[-1].drop_output(input=input_x, drop=drop, rng=rng_share, p=drop_p[0]))
else:
cnn_output.append(recg_layer[-1].output(input=input_x))
l+=[1, 2]
d+=[1, 0]
#2
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[0], 16, 16),
filter_shape=(nkerns[1], nkerns[0], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation,
std=std
))
if drops[1]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[1]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 0]
#3
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[1], 16, 16),
filter_shape=(nkerns[2], nkerns[1], 3, 3),
poolsize=(2, 2),
border_mode='same',
activation=activation,
std=std
))
if drops[2]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[2]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 0]
#4
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[2], 8, 8),
filter_shape=(nkerns[3], nkerns[2], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation,
std=std
))
if drops[3]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[3]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 0]
#5
recg_layer.append(ConvMaxPool_GauInit_DNN.ConvMaxPool_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[3], 8, 8),
filter_shape=(nkerns[4], nkerns[3], 3, 3),
poolsize=(2, 2),
border_mode='same',
activation=activation,
std=std
))
if drops[4]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share, p=drop_p[4]))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
l+=[1, 2]
d+=[1, 0]
mlp_input_x = cnn_output[-1].flatten(2)
activations = []
activations.append(mlp_input_x)
classifier = Pegasos.Pegasos(
input= activations[-1],
rng=rng,
n_in=nkerns[-1]*4*4,
n_out=n_classes,
weight_decay=0,
loss=Loss_L,
pattern=pattern
)
l+=[1, 2]
d+=[1, 0]
#stochastic layer
recg_layer.append(GaussianHidden.GaussianHidden(
rng=rng,
input=mlp_input_x,
n_in=4*4*nkerns[-1],
n_out=n_hidden[0],
activation=None
))
l+=[1, 2]
d+=[1, 0]
l+=[1, 2]
d+=[1, 0]
z = recg_layer[-1].sample_z(rng_share)
gene_layer = []
z_output = []
random_z_output = []
#1
gene_layer.append(FullyConnected.FullyConnected(
rng=rng,
n_in=n_hidden[-1],
n_out=4*4*nkerns[-1],
activation=activation
))
z_output.append(gene_layer[-1].output(input=z))
random_z_output.append(gene_layer[-1].output(input=random_z))
l+=[1, 2]
d+=[1, 0]
input_z = z_output[-1].reshape((batch_size, nkerns[-1], 4, 4))
input_random_z = random_z_output[-1].reshape((n_batch, nkerns[-1], 4, 4))
#1
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-1], 4, 4),
filter_shape=(nkerns[-2], nkerns[-1], 3, 3),
poolsize=(2, 2),
border_mode='same',
activation=activation
))
l+=[1, 2]
d+=[1, 0]
z_output.append(gene_layer[-1].output(input=input_z))
random_z_output.append(gene_layer[-1].output_random_generation(input=input_random_z, n_batch=n_batch))
#2
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-2], 8, 8),
filter_shape=(nkerns[-3], nkerns[-2], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
l+=[1, 2]
d+=[1, 0]
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#3
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-3], 8, 8),
filter_shape=(nkerns[-4], nkerns[-3], 3, 3),
poolsize=(2, 2),
border_mode='same',
activation=activation
))
l+=[1, 2]
d+=[1, 0]
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#4
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-4], 16, 16),
filter_shape=(nkerns[-5], nkerns[-4], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
l+=[1, 2]
d+=[1, 0]
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#5-1 stochastic layer
# for this layer, the activation is None to get a Guassian mean
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-5], 16, 16),
filter_shape=(n_channels, nkerns[-5], 5, 5),
poolsize=(2, 2),
border_mode='same',
activation=None
))
l+=[1, 2]
d+=[1, 0]
x_mean=gene_layer[-1].output(input=z_output[-1])
random_x_mean=gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch)
#5-2 stochastic layer
# for this layer, the activation is None to get logvar
if train_logvar:
gene_layer.append(UnpoolConvNon_GauInit_DNN.UnpoolConvNon_GauInit_DNN(
rng,
image_shape=(batch_size, nkerns[-5], 16, 16),
filter_shape=(n_channels, nkerns[-5], 5, 5),
poolsize=(2, 2),
border_mode='same',
activation=None
))
l+=[1, 2]
d+=[1, 0]
x_logvar=gene_layer[-1].output(input=z_output[-1])
random_x_logvar=gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch)
else:
x_logvar = theano.shared(np.ones((batch_size, n_channels, dim_h, dim_w), dtype='float32'))
random_x_logvar = theano.shared(np.ones((n_batch, n_channels, dim_h, dim_w), dtype='float32'))
gene_layer.append(NoParamsGaussianVisiable.NoParamsGaussianVisiable(
#rng=rng,
#mean=z_output[-1],
#data=input_x,
))
logpx = gene_layer[-1].logpx(mean=x_mean, logvar=x_logvar, data=input_x)
random_x = gene_layer[-1].sample_x(rng_share=rng_share, mean=random_x_mean, logvar=random_x_logvar)
#L = (logpx + logpz - logqz).sum()
lowerbound = (
(logpx + recg_layer[-1].logpz - recg_layer[-1].logqz).mean()
)
hinge_loss = classifier.hinge_loss(10, y, y_matrix)
cost = D * lowerbound - C * hinge_loss
px = (logpx.mean())
pz = (recg_layer[-1].logpz.mean())
qz = (- recg_layer[-1].logqz.mean())
super_params=[]
for r in recg_layer[:-1]:
super_params+=r.params
super_params+=classifier.params
params=[]
for g in gene_layer:
params+=g.params
for r in recg_layer:
params+=r.params
params+=classifier.params
grads = [T.grad(cost, param) for param in params]
l_r = theano.shared(np.asarray(learning_rate, dtype=np.float32))
#get_optimizer = optimizer.get_adam_optimizer(learning_rate=learning_rate)
if opt_med=='adam':
get_optimizer = optimizer_separated.get_adam_optimizer_max(learning_rate=l_r, decay1 = 0.1, decay2 = 0.001, weight_decay=weight_decay)
elif opt_med=='mom':
get_optimizer = optimizer_separated.get_momentum_optimizer_max(learning_rate=l_r, weight_decay=weight_decay)
updates = get_optimizer(w=params,g=grads, l=l, d=d)
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=[classifier.errors(y), lowerbound, hinge_loss, cost],
#outputs=layer[-1].errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size],
y_matrix: test_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
valid_model = theano.function(
inputs=[index],
outputs=[classifier.errors(y), lowerbound, hinge_loss, cost],
#outputs=layer[-1].errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size],
y_matrix: valid_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
valid_error = theano.function(
inputs=[index],
outputs=classifier.errors(y),
#outputs=layer[-1].errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size],
#y_matrix: valid_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
'''
Save parameters and activations
'''
pog = []
for (p,g) in zip(params, grads):
pog.append(p.max())
pog.append((p**2).mean())
pog.append((g**2).mean())
pog.append((T.sqrt(pog[-2] / pog[-1]))/ 1e3)
paramovergrad = theano.function(
inputs=[index],
outputs=pog,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size],
y_matrix: train_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](dropout_flag)
}
)
parameters = theano.function(
inputs=[],
outputs=params,
)
generation_check = theano.function(
inputs=[index],
outputs=[x, x_mean.flatten(2), x_logvar.flatten(2)],
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
#y: train_set_y[index * batch_size: (index + 1) * batch_size],
#y_matrix: train_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
train_activations = theano.function(
inputs=[index],
outputs=T.concatenate(activations, axis=1),
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0),
#y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
valid_activations = theano.function(
inputs=[index],
outputs=T.concatenate(activations, axis=1),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0),
#y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
test_activations = theano.function(
inputs=[index],
outputs=T.concatenate(activations, axis=1),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0),
#y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
debug_model = theano.function(
inputs=[index],
outputs=[classifier.errors(y), lowerbound, px, pz, qz, hinge_loss, cost],
#updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size],
y_matrix: train_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](dropout_flag)
}
)
random_generation = theano.function(
inputs=[random_z],
outputs=[random_x_mean.flatten(2), random_x.flatten(2)],
givens={
#drop: np.cast['int32'](0)
}
)
train_bound_without_dropout = theano.function(
inputs=[index],
outputs=[classifier.errors(y), lowerbound, hinge_loss, cost],
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size],
y_matrix: train_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0)
}
)
train_model = theano.function(
inputs=[index],
outputs=[classifier.errors(y), lowerbound, hinge_loss, cost, px, pz, qz, z],
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size],
y_matrix: train_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](dropout_flag),
}
)
##################
# Pretrain MODEL #
##################
if predir is not None:
color.printBlue('... setting parameters')
color.printBlue(predir)
pre_train = np.load(predir+'model.npz')
pre_train = pre_train['model']
for (para, pre) in zip(params, pre_train):
para.set_value(pre)
tmp = [debug_model(i) for i in xrange(n_train_batches)]
tmp = (np.asarray(tmp)).mean(axis=0)
print '------------------', tmp
if super_predir is not None:
color.printBlue('... setting parameters')
color.printBlue(super_predir)
pre_train = np.load(super_predir+'svhn_model-'+str(super_preepoch)+'.npz')
pre_train = pre_train['model']
for (para, pre) in zip(super_params, pre_train):
para.set_value(pre)
this_test_losses = [test_model(i) for i in xrange(n_test_batches)]
this_test_score = np.mean(this_test_losses, axis=0)
#print predir
print 'preepoch', super_preepoch, 'pre_test_score', this_test_score
with open(logdir+'hook.txt', 'a') as f:
print >>f, predir
print >>f, 'preepoch', super_preepoch, 'pre_test_score', this_test_score
###############
# TRAIN MODEL #
###############
print '... training'
validation_frequency = n_train_batches
predy_valid_stats = [1, 1, 0]
start_time = time.clock()
NaN_count = 0
epoch = 0
threshold = 0
generatition_frequency = 1
if predir is not None:
threshold = 0
color.printRed('threshold, '+str(threshold) +
' generatition_frequency, '+str(generatition_frequency)
+' validation_frequency, '+str(validation_frequency))
done_looping = False
n_epochs = 80
decay_epochs = 40
record = 0
'''
print 'test initialization...'
pre_model = parameters()
for i in xrange(len(pre_model)):
pre_model[i] = np.asarray(pre_model[i])
print pre_model[i].shape, np.mean(pre_model[i]), np.var(pre_model[i])
print 'end test...'
'''
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
minibatch_avg_cost = 0
train_error = 0
train_lowerbound = 0
train_hinge_loss = 0
_____z = 0
pxx = 0
pzz = 0
qzz = 0
preW = None
currentW = None
tmp_start1 = time.clock()
if epoch == 30:
validation_frequency = n_train_batches/5
if epoch == 50:
validation_frequency = n_train_batches/10
if epoch == 30 or epoch == 50 or epoch == 70 or epoch == 90:
record = epoch
l_r.set_value(np.cast['float32'](l_r.get_value()/3.0))
print '---------', epoch, l_r.get_value()
with open(logdir+'hook.txt', 'a') as f:
print >>f,'---------', epoch, l_r.get_value()
'''
test_epoch = epoch - decay_epochs
if test_epoch > 0 and test_epoch % 5 == 0:
l_r.set_value(np.cast['float32'](l_r.get_value()/3.0))
print '---------------', l_r.get_value()
with open(logdir+'hook.txt', 'a') as f:
print >>f, '---------------', l_r.get_value()
'''
for minibatch_index in xrange(n_train_batches):
e, l, h, ttt, tpx, tpz, tqz, _z = train_model(minibatch_index)
pxx+=tpx
pzz+=tpz
qzz+=tqz
#_____z += (np.asarray(_z)**2).sum() / (n_hidden[-1] * batch_size)
train_error += e
train_lowerbound += l
train_hinge_loss += h
minibatch_avg_cost += ttt
'''
llll = debug_model(minibatch_index)
with open(logdir+'hook.txt', 'a') as f:
print >>f,'[]', llll
'''
if math.isnan(ttt):
color.printRed('--------'+str(epoch)+'--------'+str(minibatch_index))
exit()
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
'''
if (minibatch_index <11):
preW = currentW
currentW = parameters()
for i in xrange(len(currentW)):
currentW[i] = np.asarray(currentW[i]).astype(np.float32)
if preW is not None:
for (c,p) in zip(currentW, preW):
#print minibatch_index, (c**2).mean(), ((c-p)**2).mean(), np.sqrt((c**2).mean()/((c-p)**2).mean())
with open(logdir+'delta_w.txt', 'a') as f:
print >>f,minibatch_index, (c**2).mean(), ((c-p)**2).mean(), np.sqrt((c**2).mean()/((c-p)**2).mean())
'''
# check valid error only, to speed up
'''
if (iter + 1) % validation_frequency != 0 and (iter + 1) %(validation_frequency/10) == 0:
vt = [valid_error(i) for i in xrange(n_valid_batches)]
vt = np.mean(vt)
print 'quick valid error', vt
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'quick valid error', vt
print 'So far best model', predy_valid_stats
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'So far best model', predy_valid_stats
'''
if (iter + 1) % validation_frequency == 0:
print minibatch_index, 'stochastic training error', train_error/float(minibatch_index), train_lowerbound/float(minibatch_index), train_hinge_loss/float(minibatch_index), minibatch_avg_cost /float(minibatch_index), pxx/float(minibatch_index), pzz/float(minibatch_index), qzz/float(minibatch_index)#, 'z_norm', _____z/float(minibatch_index)
with open(logdir+'hook.txt', 'a') as f:
print >>f, minibatch_index, 'stochastic training error', train_error/float(minibatch_index), train_lowerbound/float(minibatch_index), train_hinge_loss/float(minibatch_index), minibatch_avg_cost /float(minibatch_index), pxx/float(minibatch_index), pzz/float(minibatch_index), qzz/float(minibatch_index)#, 'z_norm', _____z/float(minibatch_index)
valid_stats = [valid_model(i) for i in xrange(n_valid_batches)]
this_valid_stats = np.mean(valid_stats, axis=0)
print epoch, minibatch_index, 'validation stats', this_valid_stats
#print tmp
with open(logdir+'hook.txt', 'a') as f:
print >>f, epoch, minibatch_index, 'validation stats', this_valid_stats
print 'So far best model', predy_valid_stats
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'So far best model', predy_valid_stats
if this_valid_stats[0] < predy_valid_stats[0]:
test_stats = [test_model(i) for i in xrange(n_test_batches)]
this_test_stats = np.mean(test_stats, axis=0)
predy_valid_stats[0] = this_valid_stats[0]
predy_valid_stats[1] = this_test_stats[0]
predy_valid_stats[2] = epoch
record = epoch
print 'Update best model', this_test_stats
with open(logdir+'hook.txt', 'a') as f:
print >>f,'Update best model', this_test_stats
model = parameters()
for i in xrange(len(model)):
model[i] = np.asarray(model[i]).astype(np.float32)
#print model[i].shape, np.mean(model[i]), np.var(model[i])
np.savez(logdir+'best-model', model=model)
genezero = generation_check(0)
with open(logdir+'gene_check.txt', 'a') as f:
print >>f, 'epoch-----------------------', epoch
print >>f, 'x', 'x_mean', 'x_logvar'
'''
for i in xrange(len(genezero)):
genezero[i] = np.asarray(genezero[i])
with open(logdir+'gene_check.txt', 'a') as f:
print >>f, genezero[i].max(), genezero[i].min(), genezero[i].mean()
with open(logdir+'gene_check.txt', 'a') as f:
print >>f, 'norm', np.sqrt(((genezero[0]- genezero[1])**2).sum())
'''
if epoch==1:
xxx = genezero[0]
image = paramgraphics.mat_to_img(xxx.T, dim_input, colorImg=colorImg, scale=True)
image.save(logdir+'data.png', 'PNG')
if epoch%1==0:
tail='-'+str(epoch)+'.png'
xxx_now = genezero[1]
image = paramgraphics.mat_to_img(xxx_now.T, dim_input, colorImg=colorImg, scale=True)
image.save(logdir+'data_re'+tail, 'PNG')
if math.isnan(minibatch_avg_cost):
NaN_count+=1
color.printRed("NaN detected. Reverting to saved best parameters")
print '---------------NaN_count:', NaN_count
with open(logdir+'hook.txt', 'a') as f:
print >>f, '---------------NaN_count:', NaN_count
tmp = [debug_model(i) for i in xrange(n_train_batches)]
tmp = (np.asarray(tmp)).mean(axis=0)
print '------------------NaN check:', tmp
with open(logdir+'hook.txt', 'a') as f:
print >>f, '------------------NaN check:', tmp
model = parameters()
for i in xrange(len(model)):
model[i] = np.asarray(model[i]).astype(np.float32)
print model[i].shape, np.mean(model[i]), np.var(model[i])
print np.max(model[i]), np.min(model[i])
print np.all(np.isfinite(model[i])), np.any(np.isnan(model[i]))
with open(logdir+'hook.txt', 'a') as f:
print >>f, model[i].shape, np.mean(model[i]), np.var(model[i])
print >>f, np.max(model[i]), np.min(model[i])
print >>f, np.all(np.isfinite(model[i])), np.any(np.isnan(model[i]))
best_before = np.load(logdir+'model.npz')
best_before = best_before['model']
for (para, pre) in zip(params, best_before):
para.set_value(pre)
tmp = [debug_model(i) for i in xrange(n_train_batches)]
tmp = (np.asarray(tmp)).mean(axis=0)
print '------------------', tmp
return
if epoch%1==0:
model = parameters()
for i in xrange(len(model)):
model[i] = np.asarray(model[i]).astype(np.float32)
np.savez(logdir+'model-'+str(epoch), model=model)
tmp_start4=time.clock()
if epoch % generatition_frequency == 0:
tail='-'+str(epoch)+'.png'
random_z = np.random.standard_normal((n_batch, n_hidden[-1])).astype(np.float32)
_x_mean, _x = random_generation(random_z)
#print _x.shape
#print _x_mean.shape
image = paramgraphics.mat_to_img(_x.T, dim_input, colorImg=colorImg, scale=True)
image.save(logdir+'samples'+tail, 'PNG')
image = paramgraphics.mat_to_img(_x_mean.T, dim_input, colorImg=colorImg, scale=True)
image.save(logdir+'mean_samples'+tail, 'PNG')
#print 'generation_time', time.clock() - tmp_start4
#print 'one epoch time', time.clock() - tmp_start1
end_time = time.clock()
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if NaN_count > 0:
print '---------------NaN_count:', NaN_count
with open(logdir+'hook.txt', 'a') as f:
print >>f, '---------------NaN_count:', NaN_count
if __name__ == '__main__':
predir = None
if os.environ.has_key('predir'):
predir = os.environ['predir']
learning_rate=3e-4
if os.environ.has_key('learning_rate'):
learning_rate = float(os.environ['learning_rate'])
weight_decay=4e-3
if os.environ.has_key('weight_decay'):
weight_decay = float(os.environ['weight_decay'])
dropout_flag = 1
if os.environ.has_key('dropout_flag'):
dropout_flag = int(os.environ['dropout_flag'])
dataset = 'svhngcn_var'
if os.environ.has_key('dataset'):
dataset = os.environ['dataset']
super_predir=None
super_preepoch=None
if len(sys.argv) > 2:
super_predir = sys.argv[1]
super_preepoch = int(sys.argv[2])
cmmva_6layer_svhn(predir=predir, dropout_flag=dropout_flag,
weight_decay=weight_decay, learning_rate=learning_rate,
dataset=dataset,super_predir=super_predir,super_preepoch=super_preepoch)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuelclient.objects import environment as environment_obj
from octane.util import env as env_util
from octane.util import ssh
LOG = logging.getLogger(__name__)
def get_endpoint_ip(ep_name, yaml_data):
endpoint = yaml_data['network_scheme']['endpoints'].get(ep_name)
if not endpoint:
return None
net_data = endpoint["IP"][0]
if net_data:
return net_data.split('/')[0]
def get_glance_password(yaml_data):
return yaml_data['glance']['user_password']
def parse_swift_out(output, field):
for line in output.splitlines()[1:-1]:
parts = line.split(': ')
if parts[0].strip() == field:
return parts[1]
raise Exception(
"Field {0} not found in output:\n{1}".format(field, output))
def get_swift_objects(node, tenant, user, password, token, container):
cmd = ". /root/openrc; swift --os-project-name {0} --os-username {1}"\
" --os-password {2} --os-auth-token {3} list {4}".format(tenant,
user,
password,
token,
container)
objects_list = ssh.call_output(["sh", "-c", cmd], node=node)
return objects_list.split('\n')[:-1]
def get_object_property(node, tenant, user, password, token, container,
object_id, prop):
cmd = ". /root/openrc; swift --os-project-name {0} --os-username {1}"\
" --os-password {2} --os-auth-token {3} stat {4} {5}"\
.format(tenant,
user,
password,
token,
container,
object_id)
object_data = ssh.call_output(["sh", "-c", cmd], node=node)
return parse_swift_out(object_data, prop)
def get_auth_token(node, tenant, user, password):
cmd = ". /root/openrc; keystone --os-tenant-name {0}"\
" --os-username {1} --os-password {2} token-get".format(tenant,
user,
password)
token_info = ssh.call_output(["sh", "-c", cmd], node=node)
return env_util.parse_tenant_get(token_info, 'id')
def download_image(node, tenant, user, password, token, container, object_id):
cmd = ". /root/openrc; swift --os-project-name {0} --os-username {1}"\
" --os-password {2} --os-auth-token {3} download {4} {5}"\
.format(tenant,
user,
password,
token,
container,
object_id)
ssh.call(["sh", "-c", cmd], node=node)
LOG.info("Swift %s image has been downloaded" % object_id)
def delete_image(node, tenant, user, password, token, container, object_id):
cmd = ". /root/openrc; swift --os-project-name {0}"\
" --os-username {1} --os-password {2} --os-auth-token {3}"\
" delete {4} {5}".format(tenant, user, password, token,
container, object_id)
ssh.call(["sh", "-c", cmd], node=node)
LOG.info("Swift %s image has been deleted" % object_id)
def transfer_image(node, tenant, user, password, token, container, object_id,
storage_ip, tenant_id):
storage_url = "http://{0}:8080/v1/AUTH_{1}".format(storage_ip, tenant_id)
cmd = ['swift', '--os-project-name', tenant, '--os-username', user,
'--os-password', password, '--os-auth-token', token,
'--os-storage-url', storage_url, 'upload', container,
object_id]
ssh.call(cmd, node=node)
LOG.info("Swift %s image has been transferred" % object_id)
def sync_glance_images(source_env_id, seed_env_id, seed_swift_ep):
"""Sync glance images from original ENV to seed ENV
Args:
source_env_id (int): ID of original ENV.
seed_env_id (int): ID of seed ENV.
seed_swift_ep (str): endpoint's name where swift-proxy service is
listening on.
Examples:
sync_glance_images(2, 3, 'br-mgmt')
"""
# set glance username
glance_user = "glance"
# set swift container value
container = "glance"
# choose tenant
tenant = "services"
# get clusters by id
source_env = environment_obj.Environment(source_env_id)
seed_env = environment_obj.Environment(seed_env_id)
# gather cics admin IPs
source_node = next(env_util.get_controllers(source_env))
seed_node = next(env_util.get_controllers(seed_env))
# get cics yaml files
source_yaml = env_util.get_astute_yaml(source_env, source_node)
seed_yaml = env_util.get_astute_yaml(seed_env, seed_node)
# get glance passwords
source_glance_pass = get_glance_password(source_yaml)
seed_glance_pass = get_glance_password(seed_yaml)
# get seed node swift ip
seed_swift_ip = get_endpoint_ip(seed_swift_ep, seed_yaml)
# get service tenant id & lists of objects for source env
source_token = get_auth_token(source_node, tenant, glance_user,
source_glance_pass)
source_swift_list = set(get_swift_objects(source_node,
tenant,
glance_user,
source_glance_pass,
source_token,
container))
# get service tenant id & lists of objects for seed env
seed_token = get_auth_token(seed_node, tenant, glance_user,
seed_glance_pass)
seed_swift_list = set(get_swift_objects(seed_node,
tenant,
glance_user,
seed_glance_pass,
seed_token,
container))
# get service tenant for seed env
seed_tenant = env_util.get_service_tenant_id(seed_env)
# check consistency of matched images
source_token = get_auth_token(source_node, tenant, glance_user,
source_glance_pass)
seed_token = get_auth_token(seed_node, tenant, glance_user,
seed_glance_pass)
for image in source_swift_list & seed_swift_list:
source_obj_etag = get_object_property(source_node,
tenant,
glance_user,
source_glance_pass,
source_token,
container,
image,
'ETag')
seed_obj_etag = get_object_property(seed_node, tenant,
glance_user, seed_glance_pass,
seed_token, container, image,
'ETag')
if source_obj_etag != seed_obj_etag:
# image should be resynced
delete_image(seed_node, tenant, glance_user, seed_glance_pass,
seed_token, container, image)
LOG.info("Swift %s image should be resynced" % image)
seed_swift_list.remove(image)
# migrate new images
for image in source_swift_list - seed_swift_list:
# download image on source's node local drive
source_token = get_auth_token(source_node, tenant, glance_user,
source_glance_pass)
download_image(source_node, tenant, glance_user, source_glance_pass,
source_token, container, image)
# transfer image
source_token = get_auth_token(source_node, tenant,
glance_user, source_glance_pass)
seed_token = get_auth_token(seed_node, tenant, glance_user,
seed_glance_pass)
transfer_image(source_node, tenant, glance_user, seed_glance_pass,
seed_token, container, image, seed_swift_ip,
seed_tenant)
# remove transferred image
ssh.sftp(source_node).remove(image)
# delete outdated images
for image in seed_swift_list - source_swift_list:
token = get_auth_token(seed_node, tenant, glance_user,
seed_glance_pass)
delete_image(seed_node, tenant, glance_user, seed_glance_pass,
token, container, image)
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Hfrcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Hfrcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
from filebeat import BaseTest
import os
import time
import unittest
"""
Tests for the prospector functionality.
"""
class Test(BaseTest):
def test_ignore_older_files(self):
"""
Should ignore files there were not modified for longer then
the `ignore_older` setting.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignore_older="1s"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
iterations = 5
for n in range(0, iterations):
file.write("hello world") # 11 chars
file.write("\n") # 1 char
file.close()
# sleep for more than ignore older
time.sleep(2)
proc = self.start_beat()
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.log_contains(
"Ignore file because ignore_older reached"),
max_timeout=10)
proc.check_kill_and_wait()
def test_not_ignore_old_files(self):
"""
Should not ignore files there were modified more recent than
the ignore_older settings.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignore_older="15s"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
iterations = 5
for n in range(0, iterations):
file.write("hello world") # 11 chars
file.write("\n") # 1 char
file.close()
proc = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=iterations), max_timeout=10)
proc.check_kill_and_wait()
objs = self.read_output()
assert len(objs) == 5
def test_stdin(self):
"""
Test stdin input. Checks if reading is continued after the first read.
"""
self.render_config_template(
input_type="stdin"
)
proc = self.start_beat()
self.wait_until(
lambda: self.log_contains(
"Harvester started for file: -"),
max_timeout=10)
iterations1 = 5
for n in range(0, iterations1):
os.write(proc.stdin_write, "Hello World\n")
self.wait_until(
lambda: self.output_has(lines=iterations1),
max_timeout=15)
iterations2 = 10
for n in range(0, iterations2):
os.write(proc.stdin_write, "Hello World\n")
self.wait_until(
lambda: self.output_has(lines=iterations1 + iterations2),
max_timeout=15)
proc.check_kill_and_wait()
objs = self.read_output()
assert len(objs) == iterations1 + iterations2
def test_rotating_close_inactive_larger_write_rate(self):
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignore_older="10s",
close_inactive="1s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
proc = self.start_beat()
time.sleep(1)
rotations = 2
iterations = 3
for r in range(rotations):
with open(testfile, 'w', 0) as file:
for n in range(iterations):
file.write("hello world {}\n".format(r * iterations + n))
time.sleep(0.1)
os.rename(testfile, testfile + str(time.time()))
lines = rotations * iterations
self.wait_until(
# allow for events to be send multiple times due to log rotation
lambda: self.output_count(lambda x: x >= lines),
max_timeout=15)
proc.check_kill_and_wait()
def test_exclude_files(self):
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
exclude_files=[".gz$"]
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.gz"
file = open(testfile, 'w')
file.write("line in gz file\n")
file.close()
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
file.write("line in log file\n")
file.close()
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
filebeat.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert 1 == len(output)
assert output[0]["message"] == "line in log file"
def test_rotating_close_inactive_low_write_rate(self):
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignore_older="10s",
close_inactive="1s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=10)
lines = 0
# write first line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
# log rotate
os.rename(testfile, testfile + ".1")
open(testfile, 'w').close()
# wait for file to be closed due to close_inactive
self.wait_until(
lambda: self.log_contains(
"Stopping harvester, closing file: {}\n".format(os.path.abspath(testfile))),
max_timeout=10)
# wait a bit longer (on 1.0.1 this would cause the harvester
# to get in a state that resulted in it watching the wrong
# inode for changes)
time.sleep(2)
# write second line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
self.wait_until(
# allow for events to be send multiple times due to log rotation
lambda: self.output_count(lambda x: x >= lines),
max_timeout=5)
filebeat.check_kill_and_wait()
def test_shutdown_no_prospectors(self):
"""
In case no prospectors are defined, filebeat must shut down and report an error
"""
self.render_config_template(
prospectors=False,
)
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"No prospectors defined"),
max_timeout=10)
self.wait_until(
lambda: self.log_contains("No prospectors defined"),
max_timeout=10)
filebeat.check_wait(exit_code=1)
def test_no_paths_defined(self):
"""
In case a prospector is defined but doesn't contain any paths, prospector must return error which
leads to shutdown of filebeat because of configuration error
"""
self.render_config_template(
)
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"No paths were defined for prospector"),
max_timeout=10)
self.wait_until(
lambda: self.log_contains(
"Exiting"),
max_timeout=10)
filebeat.check_wait(exit_code=1)
def test_files_added_late(self):
"""
Tests that prospectors stay running even though no harvesters are started yet
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
)
os.mkdir(self.working_dir + "/log/")
filebeat = self.start_beat()
# wait until events are sent for the first time
self.wait_until(
lambda: self.log_contains(
"Events flushed"),
max_timeout=10)
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'a') as file:
file.write("Hello World1\n")
file.write("Hello World2\n")
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=15)
filebeat.check_kill_and_wait()
def test_close_inactive(self):
"""
Test that close_inactive closes the file but reading
is picked up again after scan_frequency
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignore_older="1h",
close_inactive="1s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=10)
lines = 0
# write first line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
# wait for file to be closed due to close_inactive
self.wait_until(
lambda: self.log_contains(
"Stopping harvester, closing file: {}\n".format(os.path.abspath(testfile))),
max_timeout=10)
# write second line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
self.wait_until(
# allow for events to be sent multiple times due to log rotation
lambda: self.output_count(lambda x: x >= lines),
max_timeout=5)
filebeat.check_kill_and_wait()
def test_close_inactive_file_removal(self):
"""
Test that close_inactive still applies also if the file to close was removed
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignore_older="1h",
close_inactive="3s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=10)
lines = 0
# write first line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
os.remove(testfile)
# wait for file to be closed due to close_inactive
self.wait_until(
lambda: self.log_contains(
"Stopping harvester, closing file: {}\n".format(os.path.abspath(testfile))),
max_timeout=10)
filebeat.check_kill_and_wait()
def test_close_inactive_file_rotation_and_removal(self):
"""
Test that close_inactive still applies also if the file to close was removed
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
ignore_older="1h",
close_inactive="3s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
renamed_file = self.working_dir + "/log/test_renamed.log"
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=10)
lines = 0
# write first line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
os.rename(testfile, renamed_file)
os.remove(renamed_file)
# wait for file to be closed due to close_inactive
self.wait_until(
lambda: self.log_contains(
# Still checking for old file name as filename does not change in harvester
"Closing file: {}\n".format(os.path.abspath(testfile))),
max_timeout=10)
filebeat.check_kill_and_wait()
def test_close_inactive_file_rotation_and_removal(self):
"""
Test that close_inactive still applies also if file was rotated,
new file created, and rotated file removed.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
ignore_older="1h",
close_inactive="3s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
renamed_file = self.working_dir + "/log/test_renamed.log"
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=10)
lines = 0
# write first line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
os.rename(testfile, renamed_file)
# write second line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
os.remove(renamed_file)
# Wait until both files are closed
self.wait_until(
lambda: self.log_contains_count(
# Checking if two files were closed
"Stopping harvester, closing file: ") == 2,
max_timeout=10)
filebeat.check_kill_and_wait()
def test_skip_symlinks(self):
"""
Test that symlinks are skipped
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test-2016.log"
symlink_file = self.working_dir + "/log/test.log"
# write first line
with open(testfile, 'a') as file:
file.write("Hello world\n")
if os.name == "nt":
import win32file
win32file.CreateSymbolicLink(symlink_file, testfile, 0)
else:
os.symlink(testfile, symlink_file)
filebeat = self.start_beat()
# wait for file to be skipped
self.wait_until(
lambda: self.log_contains("skipped as it is a symlink"),
max_timeout=10)
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
time.sleep(5)
filebeat.check_kill_and_wait()
data = self.read_output()
# Make sure there is only one entry, means it didn't follow the symlink
assert len(data) == 1
|
|
import json
import datetime
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader, Context
from django.contrib.sites.models import Site
from django.contrib.auth.decorators import login_required
from pycon.tutorials.models import PyConTutorialProposal
from pycon.tutorials.utils import process_tutorial_request
from symposion.schedule.forms import SlotEditForm
from symposion.schedule.models import Schedule, Day, Slot, Presentation
from symposion.schedule.timetable import TimeTable
def fetch_schedule(slug):
qs = Schedule.objects.all()
if slug is None:
if qs.count() > 1:
raise Http404()
schedule = next(iter(qs), None)
if schedule is None:
raise Http404()
else:
schedule = get_object_or_404(qs, section__slug=slug)
return schedule
def schedule_conference(request):
days = Day.objects.filter(schedule__published=True)
days = days.select_related('schedule')
days = days.prefetch_related('schedule__section')
days = days.order_by('date')
timetables = [TimeTable(day) for day in days]
return render(request, "schedule/schedule_conference.html", {
"timetables": timetables,
})
def schedule_detail(request, slug=None):
schedule = fetch_schedule(slug)
if not schedule.published and not request.user.is_staff:
raise Http404()
days = Day.objects.filter(schedule=schedule)
days = days.select_related('schedule')
days = days.prefetch_related('schedule__section')
days = days.order_by('date')
timetables = [TimeTable(day) for day in days]
return render(request, "schedule/schedule_detail.html", {
"schedule": schedule,
"timetables": timetables,
})
def schedule_list(request, slug=None):
schedule = fetch_schedule(slug)
presentations = Presentation.objects.filter(section=schedule.section)
presentations = presentations.exclude(cancelled=True)
ctx = {
"schedule": schedule,
"presentations": presentations,
}
return render(request, "schedule/schedule_list.html", ctx)
def schedule_list_csv(request, slug=None):
schedule = fetch_schedule(slug)
presentations = Presentation.objects.filter(section=schedule.section)
presentations = presentations.exclude(cancelled=True).order_by("id")
response = HttpResponse(mimetype="text/csv")
if slug:
file_slug = slug
else:
file_slug = "presentations"
response["Content-Disposition"] = 'attachment; filename="%s.csv"' % file_slug
response.write(loader.get_template("schedule/schedule_list.csv").render(Context({
"presentations": presentations,
})))
return response
@login_required
def schedule_edit(request, slug=None):
if not request.user.is_staff:
raise Http404()
schedule = fetch_schedule(slug)
days = Day.objects.filter(schedule=schedule)
days = days.select_related('schedule')
days = days.prefetch_related('schedule__section')
days = days.order_by('date')
timetables = [TimeTable(day) for day in days]
return render(request, "schedule/schedule_edit.html", {
"schedule": schedule,
"timetables": timetables,
})
@login_required
def schedule_slot_edit(request, slug, slot_pk):
if not request.user.is_staff:
raise Http404()
slot = get_object_or_404(Slot, day__schedule__section__slug=slug, pk=slot_pk)
if request.method == "POST":
form = SlotEditForm(request.POST, slot=slot)
if form.is_valid():
save = False
if "content_override" in form.cleaned_data:
slot.content_override = form.cleaned_data["content_override"]
save = True
if "presentation" in form.cleaned_data:
presentation = form.cleaned_data["presentation"]
if presentation is None:
slot.unassign()
else:
slot.assign(presentation)
if save:
slot.save()
return redirect("schedule_edit", slug)
else:
form = SlotEditForm(slot=slot)
ctx = {
"slug": slug,
"form": form,
"slot": slot,
}
return render(request, "schedule/_slot_edit.html", ctx)
def schedule_presentation_detail(request, pk):
presentation = get_object_or_404(Presentation, pk=pk)
# Tutorials allow for communication between instructor/attendee(s).
# Offload the logic to its utility
if isinstance(presentation.proposal, PyConTutorialProposal) and \
request.method == 'POST':
return process_tutorial_request(request, presentation)
if presentation.slot:
schedule = presentation.slot.day.schedule
else:
schedule = None
ctx = {
"presentation": presentation,
"proposal": presentation.proposal,
"speakers": presentation.speakers,
"schedule": schedule,
}
return render(request, "schedule/presentation_detail.html", ctx)
def json_serializer(obj):
if isinstance(obj, datetime.time):
return obj.strftime("%H:%M")
raise TypeError
def schedule_json(request):
"""
Returns information about the schedule.
*No authentication required.*
URL: /<YEAR>/schedule/conference.json
The data returned is in JSON format, and looks like::
[ <slot>, <slot>, ..., <poster>, <poster> ...]
where a slot represents a talk, tutorial, or plenary and looks like::
{
"kind": "talk"|"tutorial"|"plenary",
"name": "Title of talk",
"room": "roomname1, roomname2, ..., roomnameN",
"start": "HH:MM:SS", # ISO format
"end": "HH:MM:SS", # ISO format
"duration": 30, # minutes
"authors" ["author name 1", "author name 2", ..., "author name N"],
"released": true | false, # recording release agreed to
"license": "xx",
"contact": ["email1", "email2", .., "emailN"], # emails of authors
"abstract": "Lorem ipsum and so forth and so on",
"description: "Lorem ipsum and so forth and so on",
"conf_key": 27,
"conf_url": "https://conference_domain/path/to/talk",
"video_url": "https://somehost/path/to/video_of_talk",
"slides_url": "https://somehost/path/to/slides_of_talk",
"assets_url": "https://somehost/path/to/assets_for_talk",
"tags": "tag1, tag2, ..., tagN"
}
and a poster looks like::
{
"kind": "poster",
"name": "Title of poster",
"authors" ["author name 1", "author name 2", ..., "author name N"],
"abstract": "Lorem ipsum and so forth and so on",
"description: "Lorem ipsum and so forth and so on",
"license": "xx",
"room": "roomname1, roomname2, ..., roomnameN",
"start": "HH:MM:SS", # Provided but meaningless, ignore...
"end": "HH:MM:SS", # Provided but meaningless, ignore...
"contact": ["email1", "email2", .., "emailN"], # emails of authors
"conf_key": 1227,
"conf_url": "https://conference_domain/path/to/page/about/talk",
"released": true | false, # recording release agreed to
}
"""
slots = Slot.objects.all().order_by("start")
data = []
for slot in slots:
if slot.kind.label in ["talk", "tutorial", "plenary"] and slot.content:
slot_data = {
"name": slot.content.title,
"room": ", ".join(room["name"] for room in slot.rooms.values()),
"start": slot.start_date.isoformat(),
"end": slot.end_date.isoformat(),
"duration": slot.duration,
"authors": [s.name for s in slot.content.speakers()],
"released": slot.content.proposal.recording_release,
"license": "CC",
"contact": [s.email for s in slot.content.speakers()],
"abstract": getattr(slot.content.abstract, 'raw', slot.content.abstract),
"description": getattr(slot.content.description, 'raw', slot.content.description),
"conf_key": slot.pk,
"conf_url": "https://%s%s" % (
Site.objects.get_current().domain,
reverse("schedule_presentation_detail", args=[slot.content.pk])
),
"kind": slot.kind.label,
"video_url": slot.content.video_url,
"slides_url": slot.content.slides_url,
"assets_url": slot.content.assets_url,
"tags": "",
}
else:
continue
data.append(slot_data)
for poster in Presentation.objects.filter(section__slug="posters", cancelled=False):
poster_data = {
"name": poster.title,
"authors": [s.name for s in poster.speakers()],
"description": getattr(poster.description, 'raw', poster.description),
"abstract": getattr(poster.abstract, 'raw', poster.abstract),
"license": "CC",
"room": "Poster Room",
"start": datetime.datetime(2014, 03, 17, 10).isoformat(),
"end": datetime.datetime(2014, 03, 17, 13, 10).isoformat(),
"contact": [s.email for s in poster.speakers()],
"conf_key": 1000 + poster.pk,
"conf_url": "https://%s%s" % (
Site.objects.get_current().domain,
reverse("schedule_presentation_detail", args=[poster.pk])
),
"kind": "poster",
"released": poster.proposal.recording_release,
}
data.append(poster_data)
return HttpResponse(
json.dumps(data, default=json_serializer),
content_type="application/json"
)
|
|
# -*- test-case-name: examplegame.test.test_mice,examplegame.test.test_japanese -*-
import random
from zope.interface import implements
from axiom import item, attributes
from imaginary import iimaginary, events, objects, action, language
from examplegame import japanese
class Mouse(item.Item):
"""
A silly mouse which squeaks when actors enter the room it is in.
@ivar _callLater: The scheduling function to use. Override in unit
tests only.
"""
implements(iimaginary.IEventObserver)
squeakiness = attributes.integer(doc="""
How likely the mouse is to squeak when intruded upon (0 - 100).
This mouse is so angry that he will pretty much always squeak.
""", default=100)
_callLater = attributes.inmemory()
def activate(self):
from twisted.internet import reactor
self._callLater = reactor.callLater
def prepare(self, concept):
"""
An event was received. Squeak if it represents the arrival of a dude.
"""
if isinstance(concept, events.ArrivalEvent):
return lambda: self._callLater(0, self.squeak)
return lambda: None
def squeak(self):
actor = self.store.findUnique(
objects.Actor,
objects.Actor._enduringIntelligence == self)
evt = events.Success(
actor=actor.thing,
otherMessage=u"SQUEAK!")
evt.broadcast()
class ChallengeCollision(Exception):
"""
Raised when a L{HiraganaMouse} is asked to start issuing challenges when it
is already issuing challenges.
"""
class ChallengeVacuum(Exception):
"""
Raised when a L{HiraganaMouse} is asked to stop issuing challenges when it
is already not issuing challenges.
"""
class HiraganaMouse(item.Item):
"""
A mouse which occasionally challenges those in its location to
transliterate Hiragana.
@ivar _callLater: The scheduling function to use. Defaults to the
reactor's callLater method. This is parameterized for the sake of
unit tests.
"""
implements(iimaginary.IEventObserver)
challenging = attributes.boolean(doc="""
Whether or not this mouse is currently creating random challenges.
""", default=False)
challengeInterval = attributes.integer(doc="""
Number of seconds between challenges.
""", default=15, allowNone=False)
_currentChallenge = attributes.text(doc="""
The Hiragana character which the mouse has most recently issued as a
challenge.
""", default=None)
_callLater = attributes.inmemory()
_currentChallengeCall = attributes.inmemory()
def activate(self):
from twisted.internet import reactor
self._callLater = reactor.callLater
def _actor(self):
"""
Get the h-mouse's associated actor. PRIVATE. WHY DID I DOCUMENT THIS.
"""
return self.store.findUnique(
objects.Actor,
objects.Actor._enduringIntelligence == self)
def _numDudes(self):
"""
Get the number of actors (other than the h-mouse) in the
h-mouse's location. PRIVATE.
"""
actor = self._actor()
numDudes = len([actor
for dude
in actor.thing.findProviders(iimaginary.IActor, 1)
if dude is not actor])
return numDudes
def maybeChallenge(self):
"""
Start challenging if there is anyone around to challenge (and
this h-mouse isn't already challenging).
"""
if not self.challenging and self._numDudes() >= 1:
self.startChallenging()
def prepare(self, concept):
"""
An event was received. Start or stop challenging as
appropriate, based on whether there is anyone to challenge.
"""
if isinstance(concept, events.ArrivalEvent):
self.maybeChallenge()
elif isinstance(concept, events.DepartureEvent) and self._numDudes() == 0:
self.stopChallenging()
elif isinstance(concept, events.SpeechEvent) and concept.speaker is not self._actor().thing:
self.responseReceived(concept.speaker, concept.text)
return lambda: None
def startChallenging(self):
"""
Start shouting hiragana in the hope that someone knows what it means.
@raises ChallengeCollision: If this h-mouse is already challenging.
"""
if self.challenging:
raise ChallengeCollision()
self.challenging = True
self._scheduleChallenge()
def _scheduleChallenge(self):
"""
Schedule a challenge to happen in the number of seconds set in
the instance attribute 'challengeInterval'.
"""
self._currentChallengeCall = self._callLater(self.challengeInterval,
self._challengeAndRepeat)
def stopChallenging(self):
"""
Stop shouting hiragana.
@raises ChallengeVacuum: If this h-mouse is not currently challenging.
"""
if not self.challenging:
raise ChallengeVacuum()
self.challenging = False
self._currentChallenge = None
self._currentChallengeCall.cancel()
self._currentChallengeCall = None
def _challengeAndRepeat(self):
"""
Shout a challenge and then schedule another one.
"""
self.challenge()
self._scheduleChallenge()
def getCurrentChallenge(self):
"""
Return the Hiragana character which is this mouse's current challenge,
if it has one.
@rtype: C{unicode} or C{None}
"""
return self._currentChallenge
def vetteChallengeResponse(self, romajiResponse):
"""
Return True if the given response matches the current challenge, False
otherwise.
"""
hiragana = japanese.romajiToHiragana.get(romajiResponse.upper(), None)
return hiragana is not None and self.getCurrentChallenge() in hiragana
def responseReceived(self, responder, romajiResponse):
"""
Called when some speech is observed.
"""
me = self._actor().thing
if self.vetteChallengeResponse(romajiResponse):
self._currentChallenge = None
verb = u"salute"
else:
verb = u"bite"
evt = events.Success(
actor=me,
target=responder,
actorMessage=language.Sentence(["You ", verb, " ", responder, "."]),
targetMessage=language.Sentence([language.Noun(me).shortName(), " ", verb, "s you!"]),
otherMessage=language.Sentence([me, " ", verb, "s ", responder, "."]))
# Fuck the reactor, Fuck scheduling, why does responseReceived
# need to be concerned with these stupid scheduling details
# when all it wants to do is respond basically-immediately.
self._callLater(0, evt.broadcast)
def challenge(self, character=None):
"""
Say only a single random hiragana character.
"""
if character is None:
character = random.choice(japanese.hiragana.keys())
self._currentChallenge = character
actor = self._actor()
action.Say().do(actor, None, character)
def createMouseCreator(mouseIntelligenceFactory):
"""
Create a createMouse function, which can be called to create a
mouse object. Used for the 'Create' command plugin system.
"""
def createMouse(**kw):
store = kw['store']
mouse = objects.Thing(**kw)
mouseActor = objects.Actor.createFor(mouse)
mousehood = mouseIntelligenceFactory(store=store)
mouseActor.setEnduringIntelligence(mousehood)
return mouse
return createMouse
createMouse = createMouseCreator(Mouse)
createHiraganaMouse = createMouseCreator(HiraganaMouse)
|
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
from tornado import gen
from tornado.log import app_log
from tornado.stack_context import (StackContext, wrap, NullContext, StackContextInconsistentError,
ExceptionStackContext, run_with_stack_context, _state)
from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest
from tornado.web import asynchronous, Application, RequestHandler
import contextlib
import functools
import logging
class TestRequestHandler(RequestHandler):
def __init__(self, app, request, io_loop):
super(TestRequestHandler, self).__init__(app, request)
self.io_loop = io_loop
@asynchronous
def get(self):
logging.debug('in get()')
# call self.part2 without a self.async_callback wrapper. Its
# exception should still get thrown
self.io_loop.add_callback(self.part2)
def part2(self):
logging.debug('in part2()')
# Go through a third layer to make sure that contexts once restored
# are again passed on to future callbacks
self.io_loop.add_callback(self.part3)
def part3(self):
logging.debug('in part3()')
raise Exception('test exception')
def get_error_html(self, status_code, **kwargs):
if 'exception' in kwargs and str(kwargs['exception']) == 'test exception':
return 'got expected exception'
else:
return 'unexpected failure'
class HTTPStackContextTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', TestRequestHandler,
dict(io_loop=self.io_loop))])
def test_stack_context(self):
with ExpectLog(app_log, "Uncaught exception GET /"):
self.http_client.fetch(self.get_url('/'), self.handle_response)
self.wait()
self.assertEqual(self.response.code, 500)
self.assertTrue(b'got expected exception' in self.response.body)
def handle_response(self, response):
self.response = response
self.stop()
class StackContextTest(AsyncTestCase):
def setUp(self):
super(StackContextTest, self).setUp()
self.active_contexts = []
@contextlib.contextmanager
def context(self, name):
self.active_contexts.append(name)
yield
self.assertEqual(self.active_contexts.pop(), name)
# Simulates the effect of an asynchronous library that uses its own
# StackContext internally and then returns control to the application.
def test_exit_library_context(self):
def library_function(callback):
# capture the caller's context before introducing our own
callback = wrap(callback)
with StackContext(functools.partial(self.context, 'library')):
self.io_loop.add_callback(
functools.partial(library_inner_callback, callback))
def library_inner_callback(callback):
self.assertEqual(self.active_contexts[-2:],
['application', 'library'])
callback()
def final_callback():
# implementation detail: the full context stack at this point
# is ['application', 'library', 'application']. The 'library'
# context was not removed, but is no longer innermost so
# the application context takes precedence.
self.assertEqual(self.active_contexts[-1], 'application')
self.stop()
with StackContext(functools.partial(self.context, 'application')):
library_function(final_callback)
self.wait()
def test_deactivate(self):
deactivate_callbacks = []
def f1():
with StackContext(functools.partial(self.context, 'c1')) as c1:
deactivate_callbacks.append(c1)
self.io_loop.add_callback(f2)
def f2():
with StackContext(functools.partial(self.context, 'c2')) as c2:
deactivate_callbacks.append(c2)
self.io_loop.add_callback(f3)
def f3():
with StackContext(functools.partial(self.context, 'c3')) as c3:
deactivate_callbacks.append(c3)
self.io_loop.add_callback(f4)
def f4():
self.assertEqual(self.active_contexts, ['c1', 'c2', 'c3'])
deactivate_callbacks[1]()
# deactivating a context doesn't remove it immediately,
# but it will be missing from the next iteration
self.assertEqual(self.active_contexts, ['c1', 'c2', 'c3'])
self.io_loop.add_callback(f5)
def f5():
self.assertEqual(self.active_contexts, ['c1', 'c3'])
self.stop()
self.io_loop.add_callback(f1)
self.wait()
def test_deactivate_order(self):
# Stack context deactivation has separate logic for deactivation at
# the head and tail of the stack, so make sure it works in any order.
def check_contexts():
# Make sure that the full-context array and the exception-context
# linked lists are consistent with each other.
full_contexts, chain = _state.contexts
exception_contexts = []
while chain is not None:
exception_contexts.append(chain)
chain = chain.old_contexts[1]
self.assertEqual(list(reversed(full_contexts)), exception_contexts)
return list(self.active_contexts)
def make_wrapped_function():
"""Wraps a function in three stack contexts, and returns
the function along with the deactivation functions.
"""
# Remove the test's stack context to make sure we can cover
# the case where the last context is deactivated.
with NullContext():
partial = functools.partial
with StackContext(partial(self.context, 'c0')) as c0:
with StackContext(partial(self.context, 'c1')) as c1:
with StackContext(partial(self.context, 'c2')) as c2:
return (wrap(check_contexts), [c0, c1, c2])
# First make sure the test mechanism works without any deactivations
func, deactivate_callbacks = make_wrapped_function()
self.assertEqual(func(), ['c0', 'c1', 'c2'])
# Deactivate the tail
func, deactivate_callbacks = make_wrapped_function()
deactivate_callbacks[0]()
self.assertEqual(func(), ['c1', 'c2'])
# Deactivate the middle
func, deactivate_callbacks = make_wrapped_function()
deactivate_callbacks[1]()
self.assertEqual(func(), ['c0', 'c2'])
# Deactivate the head
func, deactivate_callbacks = make_wrapped_function()
deactivate_callbacks[2]()
self.assertEqual(func(), ['c0', 'c1'])
def test_isolation_nonempty(self):
# f2 and f3 are a chain of operations started in context c1.
# f2 is incidentally run under context c2, but that context should
# not be passed along to f3.
def f1():
with StackContext(functools.partial(self.context, 'c1')):
wrapped = wrap(f2)
with StackContext(functools.partial(self.context, 'c2')):
wrapped()
def f2():
self.assertIn('c1', self.active_contexts)
self.io_loop.add_callback(f3)
def f3():
self.assertIn('c1', self.active_contexts)
self.assertNotIn('c2', self.active_contexts)
self.stop()
self.io_loop.add_callback(f1)
self.wait()
def test_isolation_empty(self):
# Similar to test_isolation_nonempty, but here the f2/f3 chain
# is started without any context. Behavior should be equivalent
# to the nonempty case (although historically it was not)
def f1():
with NullContext():
wrapped = wrap(f2)
with StackContext(functools.partial(self.context, 'c2')):
wrapped()
def f2():
self.io_loop.add_callback(f3)
def f3():
self.assertNotIn('c2', self.active_contexts)
self.stop()
self.io_loop.add_callback(f1)
self.wait()
def test_yield_in_with(self):
@gen.engine
def f():
with StackContext(functools.partial(self.context, 'c1')):
# This yield is a problem: the generator will be suspended
# and the StackContext's __exit__ is not called yet, so
# the context will be left on _state.contexts for anything
# that runs before the yield resolves.
yield gen.Task(self.io_loop.add_callback)
with self.assertRaises(StackContextInconsistentError):
f()
self.wait()
@gen_test
def test_yield_outside_with(self):
# This pattern avoids the problem in the previous test.
cb = yield gen.Callback('k1')
with StackContext(functools.partial(self.context, 'c1')):
self.io_loop.add_callback(cb)
yield gen.Wait('k1')
def test_yield_in_with_exception_stack_context(self):
# As above, but with ExceptionStackContext instead of StackContext.
@gen.engine
def f():
with ExceptionStackContext(lambda t, v, tb: False):
yield gen.Task(self.io_loop.add_callback)
with self.assertRaises(StackContextInconsistentError):
f()
self.wait()
@gen_test
def test_yield_outside_with_exception_stack_context(self):
cb = yield gen.Callback('k1')
with ExceptionStackContext(lambda t, v, tb: False):
self.io_loop.add_callback(cb)
yield gen.Wait('k1')
def test_run_with_stack_context(self):
@gen.coroutine
def f1():
self.assertEqual(self.active_contexts, ['c1'])
yield run_with_stack_context(
StackContext(functools.partial(self.context, 'c1')),
f2)
self.assertEqual(self.active_contexts, ['c1'])
@gen.coroutine
def f2():
self.assertEqual(self.active_contexts, ['c1', 'c2'])
yield gen.Task(self.io_loop.add_callback)
self.assertEqual(self.active_contexts, ['c1', 'c2'])
self.assertEqual(self.active_contexts, [])
run_with_stack_context(
StackContext(functools.partial(self.context, 'c1')),
f1)
self.assertEqual(self.active_contexts, [])
if __name__ == '__main__':
unittest.main()
|
|
import os
import pytest
import responses
import shutil
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from mock import patch
from unicoremc.models import Project, publish_to_websocket
from unicoremc.tests.base import UnicoremcTestCase
@pytest.mark.django_db
class StatesTestCase(UnicoremcTestCase):
fixtures = ['test_users.json', 'test_social_auth.json']
def setUp(self):
self.mk_test_repos()
self.user = User.objects.get(username='testuser')
post_save.disconnect(publish_to_websocket, sender=Project)
def test_initial_state(self):
p = self.mk_project(repo={'base_url': self.base_repo_sm.repo.git_dir})
self.assertEquals(p.state, 'initial')
@responses.activate
def test_finish_state(self):
def create_db_call_mock(*call_args, **call_kwargs):
cwd = call_kwargs.get('cwd')
env = call_kwargs.get('env')
[args] = call_args
self.assertEqual(cwd, settings.UNICORE_CMS_INSTALL_DIR)
self.assertEqual(
env, {'DJANGO_SETTINGS_MODULE': 'project.ffl_za'})
self.assertTrue('/path/to/bin/python' in args)
self.assertTrue(
os.path.join(settings.UNICORE_CMS_INSTALL_DIR, 'manage.py')
in args)
self.assertTrue('syncdb' in args)
self.assertTrue('--migrate' in args)
self.assertTrue('--noinput' in args)
def init_db_call_mock(*call_args, **call_kwargs):
cwd = call_kwargs.get('cwd')
env = call_kwargs.get('env')
[args] = call_args
self.assertEqual(cwd, settings.UNICORE_CMS_INSTALL_DIR)
self.assertEqual(
env, {'DJANGO_SETTINGS_MODULE': 'project.ffl_za'})
self.assertTrue('/path/to/bin/python' in args)
self.assertTrue(
os.path.join(settings.UNICORE_CMS_INSTALL_DIR, 'manage.py')
in args)
self.assertTrue('import_from_git' in args)
self.assertTrue('--quiet' in args)
self.assertTrue('--push' in args)
self.mock_create_all()
p = self.mk_project(repo={'base_url': self.base_repo_sm.repo.git_dir})
self.addCleanup(lambda: shutil.rmtree(p.repo_path()))
pw = p.get_website_manager().workflow
pw.take_action('create_repo')
pw.take_action('clone_repo')
pw.take_action('create_remote')
pw.take_action('merge_remote')
pw.take_action('push_repo')
pw.take_action('create_webhook')
pw.take_action('init_workspace')
pw.take_action('create_nginx')
pw.take_action('create_hub_app')
pw.take_action('create_pyramid_settings')
pw.take_action('create_cms_settings')
p.db_manager.call_subprocess = create_db_call_mock
pw.take_action('create_db')
p.db_manager.call_subprocess = init_db_call_mock
pw.take_action('init_db')
pw.take_action('create_marathon_app')
pw.take_action('finish')
self.assertEquals(p.state, 'done')
@responses.activate
def test_next(self):
self.mock_create_repo()
p = self.mk_project(repo={'base_url': self.base_repo_sm.repo.git_dir})
self.assertEquals(p.state, 'initial')
pw = p.get_website_manager().workflow
pw.next()
self.assertEquals(p.state, 'repo_created')
@responses.activate
def test_automation_using_next(self):
def call_mock(*call_args, **call_kwargs):
pass
self.mock_create_all()
p = self.mk_project(repo={'base_url': self.base_repo_sm.repo.git_dir})
p.db_manager.call_subprocess = call_mock
self.addCleanup(lambda: shutil.rmtree(p.repo_path()))
self.assertEquals(p.state, 'initial')
pw = p.get_website_manager().workflow
pw.run_all()
self.assertEquals(p.state, 'done')
self.assertEquals(
p.own_repo().url,
self.source_repo_sm.repo.git_dir)
@responses.activate
def test_destroy(self):
cms_db_path = os.path.join(
settings.UNICORE_CMS_INSTALL_DIR,
'django_cms_ffl_za.db')
def call_mock(*call_args, **call_kwargs):
if not os.path.exists(settings.UNICORE_CMS_INSTALL_DIR):
os.makedirs(settings.UNICORE_CMS_INSTALL_DIR)
with open(cms_db_path, 'a'):
os.utime(cms_db_path, None)
self.mock_create_all()
p = self.mk_project(repo={'base_url': self.base_repo_sm.repo.git_dir})
p.db_manager.call_subprocess = call_mock
self.assertEquals(p.state, 'initial')
pw = p.get_website_manager().workflow
pw.run_all()
self.assertEquals(p.state, 'done')
frontend_settings_path = os.path.join(
settings.FRONTEND_SETTINGS_OUTPUT_PATH,
'ffl_za.ini')
cms_settings_path = os.path.join(
settings.CMS_SETTINGS_OUTPUT_PATH,
'ffl_za.py')
cms_uwsgi_path = os.path.join(
settings.CMS_SETTINGS_OUTPUT_PATH,
'ffl_za.ini')
cms_nginx_config_path = os.path.join(
settings.NGINX_CONFIGS_PATH,
'cms_ffl_za.conf')
self.assertTrue(os.path.exists(cms_nginx_config_path))
self.assertTrue(os.path.exists(p.repo_path()))
self.assertFalse(os.path.exists(p.frontend_repo_path()))
self.assertTrue(os.path.exists(frontend_settings_path))
self.assertTrue(os.path.exists(cms_settings_path))
self.assertTrue(os.path.exists(cms_uwsgi_path))
self.assertTrue(os.path.exists(cms_db_path))
pw.take_action('destroy')
self.assertFalse(os.path.exists(cms_nginx_config_path))
self.assertFalse(os.path.exists(p.repo_path()))
self.assertFalse(os.path.exists(p.frontend_repo_path()))
self.assertFalse(os.path.exists(frontend_settings_path))
self.assertFalse(os.path.exists(cms_settings_path))
self.assertFalse(os.path.exists(cms_uwsgi_path))
self.assertFalse(os.path.exists(cms_db_path))
@responses.activate
def test_destroy_springboard(self):
cms_db_path = os.path.join(
settings.UNICORE_CMS_INSTALL_DIR,
'django_cms_ffl_za.db')
def call_mock(*call_args, **call_kwargs):
if not os.path.exists(settings.UNICORE_CMS_INSTALL_DIR):
os.makedirs(settings.UNICORE_CMS_INSTALL_DIR)
with open(cms_db_path, 'a'):
os.utime(cms_db_path, None)
self.mock_create_all()
p = self.mk_project(
repo={'base_url': self.base_repo_sm.repo.git_dir},
app_type={'project_type': 'springboard'})
p.db_manager.call_subprocess = call_mock
self.assertEquals(p.state, 'initial')
pw = p.get_website_manager().workflow
pw.run_all()
self.assertEquals(p.state, 'done')
frontend_settings_path = os.path.join(
settings.SPRINGBOARD_SETTINGS_OUTPUT_PATH,
'ffl_za.ini')
cms_settings_path = os.path.join(
settings.CMS_SETTINGS_OUTPUT_PATH,
'ffl_za.py')
cms_uwsgi_path = os.path.join(
settings.CMS_SETTINGS_OUTPUT_PATH,
'ffl_za.ini')
cms_nginx_config_path = os.path.join(
settings.NGINX_CONFIGS_PATH,
'cms_ffl_za.conf')
self.assertTrue(os.path.exists(cms_nginx_config_path))
self.assertTrue(os.path.exists(p.repo_path()))
self.assertFalse(os.path.exists(p.frontend_repo_path()))
self.assertTrue(os.path.exists(frontend_settings_path))
self.assertTrue(os.path.exists(cms_settings_path))
self.assertTrue(os.path.exists(cms_uwsgi_path))
self.assertTrue(os.path.exists(cms_db_path))
pw.take_action('destroy')
self.assertFalse(os.path.exists(cms_nginx_config_path))
self.assertFalse(os.path.exists(p.repo_path()))
self.assertFalse(os.path.exists(frontend_settings_path))
self.assertFalse(os.path.exists(cms_settings_path))
self.assertFalse(os.path.exists(cms_uwsgi_path))
self.assertFalse(os.path.exists(cms_db_path))
@responses.activate
def test_full_run_with_unicode(self):
cms_db_path = os.path.join(
settings.UNICORE_CMS_INSTALL_DIR,
'django_cms_ffl_za.db')
def call_mock(*call_args, **call_kwargs):
if not os.path.exists(settings.UNICORE_CMS_INSTALL_DIR):
os.makedirs(settings.UNICORE_CMS_INSTALL_DIR)
with open(cms_db_path, 'a'):
os.utime(cms_db_path, None)
self.mock_create_all()
p = self.mk_project(repo={'base_url': self.base_repo_sm.repo.git_dir})
p.db_manager.call_subprocess = call_mock
self.assertEquals(p.state, 'initial')
pw = p.get_website_manager().workflow
pw.run_all()
self.assertEquals(p.state, 'done')
frontend_settings_path = os.path.join(
settings.FRONTEND_SETTINGS_OUTPUT_PATH,
'ffl_za.ini')
cms_settings_path = os.path.join(
settings.CMS_SETTINGS_OUTPUT_PATH,
'ffl_za.py')
cms_uwsgi_path = os.path.join(
settings.CMS_SETTINGS_OUTPUT_PATH,
'ffl_za.ini')
cms_nginx_config_path = os.path.join(
settings.NGINX_CONFIGS_PATH,
'cms_ffl_za.conf')
self.assertTrue(os.path.exists(cms_nginx_config_path))
self.assertTrue(os.path.exists(p.repo_path()))
self.assertFalse(os.path.exists(p.frontend_repo_path()))
self.assertTrue(os.path.exists(frontend_settings_path))
self.assertTrue(os.path.exists(cms_settings_path))
self.assertTrue(os.path.exists(cms_uwsgi_path))
self.assertTrue(os.path.exists(cms_db_path))
pw.take_action('destroy')
self.assertFalse(os.path.exists(cms_nginx_config_path))
self.assertFalse(os.path.exists(p.repo_path()))
self.assertFalse(os.path.exists(p.frontend_repo_path()))
self.assertFalse(os.path.exists(frontend_settings_path))
self.assertFalse(os.path.exists(cms_settings_path))
self.assertFalse(os.path.exists(cms_uwsgi_path))
self.assertFalse(os.path.exists(cms_db_path))
@patch('unicoremc.managers.database.DbManager.call_subprocess')
@responses.activate
def test_non_standalone_project_workflow(self, mock_subprocess):
existing_repo = self.mk_project().own_repo()
p = self.mk_project()
p.own_repo().delete()
p.external_repos.add(existing_repo)
p = Project.objects.get(pk=p.pk)
self.assertIs(p.own_repo(), None)
self.mock_create_all()
pw = p.get_website_manager().workflow
pw.run_all()
frontend_settings_path = os.path.join(
settings.FRONTEND_SETTINGS_OUTPUT_PATH,
'ffl_za.ini')
cms_settings_path = os.path.join(
settings.CMS_SETTINGS_OUTPUT_PATH,
'ffl_za.py')
cms_uwsgi_path = os.path.join(
settings.CMS_SETTINGS_OUTPUT_PATH,
'ffl_za.ini')
cms_nginx_config_path = os.path.join(
settings.NGINX_CONFIGS_PATH,
'cms_ffl_za.conf')
cms_db_path = os.path.join(
settings.UNICORE_CMS_INSTALL_DIR,
'django_cms_ffl_za.db')
# check that frontend pyramid and nginx configs were created
self.assertTrue(os.path.exists(frontend_settings_path))
# check that unicore.hub and marathon were set up for frontend
self.assertTrue(p.hub_app_id)
self.assertEqual(len(filter(
lambda c: settings.MESOS_MARATHON_HOST in c.request.url,
responses.calls)), 1)
# check that repos and CMS configs were not created
self.assertFalse(os.path.exists(cms_nginx_config_path))
self.assertFalse(os.path.exists(p.repo_path()))
self.assertFalse(os.path.exists(p.frontend_repo_path()))
self.assertFalse(os.path.exists(cms_settings_path))
self.assertFalse(os.path.exists(cms_uwsgi_path))
self.assertFalse(os.path.exists(cms_db_path))
self.assertFalse(filter(
lambda c: settings.GITHUB_HOOKS_API in c.request.url,
responses.calls))
self.assertFalse(filter(
lambda c: settings.UNICORE_DISTRIBUTE_HOST in c.request.url,
responses.calls))
pw.take_action('destroy')
self.assertFalse(os.path.exists(frontend_settings_path))
@patch('unicoremc.managers.database.DbManager.call_subprocess')
@responses.activate
def test_missing_state(self, mock_subprocess):
self.mock_create_all()
p = self.mk_project(repo={'base_url': self.base_repo_sm.repo.git_dir})
self.assertEquals(p.state, 'initial')
pw = p.get_website_manager().workflow
pw.run_all()
self.assertEquals(p.state, 'done')
# nothing should happen on next
pw.next()
self.assertEquals(p.state, 'done')
pw.take_action('missing')
self.assertEquals(p.state, 'missing')
pw.take_action('activate')
self.assertEquals(p.state, 'done')
pw.take_action('destroy')
self.assertEquals(p.state, 'destroyed')
|
|
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_circles
from sklearn.decomposition import PCA, KernelPCA
from sklearn.linear_model import Perceptron
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver). \
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
|
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions that modify resources in protobuf format.
Format reference:
https://cs.android.com/android/platform/superproject/+/master:frameworks/base/tools/aapt2/Resources.proto
"""
import logging
import os
import struct
import sys
import zipfile
from util import build_utils
from util import resource_utils
sys.path[1:1] = [
# `Resources_pb2` module imports `descriptor`, which imports `six`.
os.path.join(build_utils.DIR_SOURCE_ROOT, 'third_party', 'six', 'src'),
# Make sure the pb2 files are able to import google.protobuf
os.path.join(build_utils.DIR_SOURCE_ROOT, 'third_party', 'protobuf',
'python'),
]
from proto import Resources_pb2
# First bytes in an .flat.arsc file.
# uint32: Magic ("ARSC"), version (1), num_entries (1), type (0)
_FLAT_ARSC_HEADER = b'AAPT\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00'
# The package ID hardcoded for shared libraries. See
# _HardcodeSharedLibraryDynamicAttributes() for more details. If this value
# changes make sure to change REQUIRED_PACKAGE_IDENTIFIER in WebLayerImpl.java.
SHARED_LIBRARY_HARDCODED_ID = 36
def _ProcessZip(zip_path, process_func):
"""Filters a .zip file via: new_bytes = process_func(filename, data)."""
has_changes = False
zip_entries = []
with zipfile.ZipFile(zip_path) as src_zip:
for info in src_zip.infolist():
data = src_zip.read(info)
new_data = process_func(info.filename, data)
if new_data is not data:
has_changes = True
data = new_data
zip_entries.append((info, data))
# Overwrite the original zip file.
if has_changes:
with zipfile.ZipFile(zip_path, 'w') as f:
for info, data in zip_entries:
f.writestr(info, data)
def _ProcessProtoItem(item):
if not item.HasField('ref'):
return
# If this is a dynamic attribute (type ATTRIBUTE, package ID 0), hardcode
# the package to SHARED_LIBRARY_HARDCODED_ID.
if item.ref.type == Resources_pb2.Reference.ATTRIBUTE and not (item.ref.id
& 0xff000000):
item.ref.id |= (0x01000000 * SHARED_LIBRARY_HARDCODED_ID)
item.ref.ClearField('is_dynamic')
def _ProcessProtoValue(value):
if value.HasField('item'):
_ProcessProtoItem(value.item)
return
compound_value = value.compound_value
if compound_value.HasField('style'):
for entry in compound_value.style.entry:
_ProcessProtoItem(entry.item)
elif compound_value.HasField('array'):
for element in compound_value.array.element:
_ProcessProtoItem(element.item)
elif compound_value.HasField('plural'):
for entry in compound_value.plural.entry:
_ProcessProtoItem(entry.item)
def _ProcessProtoXmlNode(xml_node):
if not xml_node.HasField('element'):
return
for attribute in xml_node.element.attribute:
_ProcessProtoItem(attribute.compiled_item)
for child in xml_node.element.child:
_ProcessProtoXmlNode(child)
def _SplitLocaleResourceType(_type, allowed_resource_names):
"""Splits locale specific resources out of |_type| and returns them.
Any locale specific resources will be removed from |_type|, and a new
Resources_pb2.Type value will be returned which contains those resources.
Args:
_type: A Resources_pb2.Type value
allowed_resource_names: Names of locale resources that should be kept in the
main type.
"""
locale_entries = []
for entry in _type.entry:
if entry.name in allowed_resource_names:
continue
# First collect all resources values with a locale set.
config_values_with_locale = []
for config_value in entry.config_value:
if config_value.config.locale:
config_values_with_locale.append(config_value)
if config_values_with_locale:
# Remove the locale resources from the original entry
for value in config_values_with_locale:
entry.config_value.remove(value)
# Add locale resources to a new Entry, and save for later.
locale_entry = Resources_pb2.Entry()
locale_entry.CopyFrom(entry)
del locale_entry.config_value[:]
locale_entry.config_value.extend(config_values_with_locale)
locale_entries.append(locale_entry)
if not locale_entries:
return None
# Copy the original type and replace the entries with |locale_entries|.
locale_type = Resources_pb2.Type()
locale_type.CopyFrom(_type)
del locale_type.entry[:]
locale_type.entry.extend(locale_entries)
return locale_type
def _HardcodeInTable(table, is_bundle_module, shared_resources_allowlist):
translations_package = None
if is_bundle_module:
# A separate top level package will be added to the resources, which
# contains only locale specific resources. The package ID of the locale
# resources is hardcoded to SHARED_LIBRARY_HARDCODED_ID. This causes
# resources in locale splits to all get assigned
# SHARED_LIBRARY_HARDCODED_ID as their package ID, which prevents a bug
# in shared library bundles where each split APK gets a separate dynamic
# ID, and cannot be accessed by the main APK.
translations_package = Resources_pb2.Package()
translations_package.package_id.id = SHARED_LIBRARY_HARDCODED_ID
translations_package.package_name = (table.package[0].package_name +
'_translations')
# These resources are allowed in the base resources, since they are needed
# by WebView.
allowed_resource_names = set()
if shared_resources_allowlist:
allowed_resource_names = set(
resource_utils.GetRTxtStringResourceNames(shared_resources_allowlist))
for package in table.package:
for _type in package.type:
for entry in _type.entry:
for config_value in entry.config_value:
_ProcessProtoValue(config_value.value)
if translations_package is not None:
locale_type = _SplitLocaleResourceType(_type, allowed_resource_names)
if locale_type:
translations_package.type.add().CopyFrom(locale_type)
if translations_package is not None:
table.package.add().CopyFrom(translations_package)
def HardcodeSharedLibraryDynamicAttributes(zip_path,
is_bundle_module,
shared_resources_allowlist=None):
"""Hardcodes the package IDs of dynamic attributes and locale resources.
Hardcoding dynamic attribute package IDs is a workaround for b/147674078,
which affects Android versions pre-N. Hardcoding locale resource package IDs
is a workaround for b/155437035, which affects resources built with
--shared-lib on all Android versions
Args:
zip_path: Path to proto APK file.
is_bundle_module: True for bundle modules.
shared_resources_allowlist: Set of resource names to not extract out of the
main package.
"""
def process_func(filename, data):
if filename == 'resources.pb':
table = Resources_pb2.ResourceTable()
table.ParseFromString(data)
_HardcodeInTable(table, is_bundle_module, shared_resources_allowlist)
data = table.SerializeToString()
elif filename.endswith('.xml') and not filename.startswith('res/raw'):
xml_node = Resources_pb2.XmlNode()
xml_node.ParseFromString(data)
_ProcessProtoXmlNode(xml_node)
data = xml_node.SerializeToString()
return data
_ProcessZip(zip_path, process_func)
class _ResourceStripper(object):
def __init__(self, partial_path, keep_predicate):
self.partial_path = partial_path
self.keep_predicate = keep_predicate
self._has_changes = False
@staticmethod
def _IterStyles(entry):
for config_value in entry.config_value:
value = config_value.value
if value.HasField('compound_value'):
compound_value = value.compound_value
if compound_value.HasField('style'):
yield compound_value.style
def _StripStyles(self, entry, type_and_name):
# Strip style entries that refer to attributes that have been stripped.
for style in self._IterStyles(entry):
entries = style.entry
new_entries = []
for entry in entries:
full_name = '{}/{}'.format(type_and_name, entry.key.name)
if not self.keep_predicate(full_name):
logging.debug('Stripped %s/%s', self.partial_path, full_name)
else:
new_entries.append(entry)
if len(new_entries) != len(entries):
self._has_changes = True
del entries[:]
entries.extend(new_entries)
def _StripEntries(self, entries, type_name):
new_entries = []
for entry in entries:
type_and_name = '{}/{}'.format(type_name, entry.name)
if not self.keep_predicate(type_and_name):
logging.debug('Stripped %s/%s', self.partial_path, type_and_name)
else:
new_entries.append(entry)
self._StripStyles(entry, type_and_name)
if len(new_entries) != len(entries):
self._has_changes = True
del entries[:]
entries.extend(new_entries)
def StripTable(self, table):
self._has_changes = False
for package in table.package:
for _type in package.type:
self._StripEntries(_type.entry, _type.name)
return self._has_changes
def _TableFromFlatBytes(data):
# https://cs.android.com/android/platform/superproject/+/master:frameworks/base/tools/aapt2/format/Container.cpp
size_idx = len(_FLAT_ARSC_HEADER)
proto_idx = size_idx + 8
if data[:size_idx] != _FLAT_ARSC_HEADER:
raise Exception('Error parsing {} in {}'.format(info.filename, zip_path))
# Size is stored as uint64.
size = struct.unpack('<Q', data[size_idx:proto_idx])[0]
table = Resources_pb2.ResourceTable()
proto_bytes = data[proto_idx:proto_idx + size]
table.ParseFromString(proto_bytes)
return table
def _FlatBytesFromTable(table):
proto_bytes = table.SerializeToString()
size = struct.pack('<Q', len(proto_bytes))
overage = len(proto_bytes) % 4
padding = b'\0' * (4 - overage) if overage else b''
return b''.join((_FLAT_ARSC_HEADER, size, proto_bytes, padding))
def StripUnwantedResources(partial_path, keep_predicate):
"""Removes resources from .arsc.flat files inside of a .zip.
Args:
partial_path: Path to a .zip containing .arsc.flat entries
keep_predicate: Given "$partial_path/$res_type/$res_name", returns
whether to keep the resource.
"""
stripper = _ResourceStripper(partial_path, keep_predicate)
def process_file(filename, data):
if filename.endswith('.arsc.flat'):
table = _TableFromFlatBytes(data)
if stripper.StripTable(table):
data = _FlatBytesFromTable(table)
return data
_ProcessZip(partial_path, process_file)
|
|
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from polymorphic import PolymorphicModel
from django.db.models import F
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from celery.exceptions import SoftTimeLimitExceeded
from .jenkins import get_job_status
from .alert import send_alert
from .calendar import get_events
from .graphite import parse_metric
from .tasks import update_service, update_instance
from datetime import datetime, timedelta
from django.utils import timezone
import json
import re
import time
import os
import subprocess
import requests
from celery.utils.log import get_task_logger
RAW_DATA_LIMIT = 5000
logger = get_task_logger(__name__)
CHECK_TYPES = (
('>', 'Greater than'),
('>=', 'Greater than or equal'),
('<', 'Less than'),
('<=', 'Less than or equal'),
('==', 'Equal to'),
)
def serialize_recent_results(recent_results):
if not recent_results:
return ''
def result_to_value(result):
if result.succeeded:
return '1'
else:
return '-1'
vals = [result_to_value(r) for r in recent_results]
vals.reverse()
return ','.join(vals)
def calculate_debounced_passing(recent_results, debounce=0):
"""
`debounce` is the number of previous failures we need (not including this)
to mark a search as passing or failing
Returns:
True if passing given debounce factor
False if failing
"""
if not recent_results:
return True
debounce_window = recent_results[:debounce + 1]
for r in debounce_window:
if r.succeeded:
return True
return False
class CheckGroupMixin(models.Model):
class Meta:
abstract = True
PASSING_STATUS = 'PASSING'
WARNING_STATUS = 'WARNING'
ERROR_STATUS = 'ERROR'
CRITICAL_STATUS = 'CRITICAL'
CALCULATED_PASSING_STATUS = 'passing'
CALCULATED_INTERMITTENT_STATUS = 'intermittent'
CALCULATED_FAILING_STATUS = 'failing'
STATUSES = (
(CALCULATED_PASSING_STATUS, CALCULATED_PASSING_STATUS),
(CALCULATED_INTERMITTENT_STATUS, CALCULATED_INTERMITTENT_STATUS),
(CALCULATED_FAILING_STATUS, CALCULATED_FAILING_STATUS),
)
IMPORTANCES = (
(WARNING_STATUS, 'Warning'),
(ERROR_STATUS, 'Error'),
(CRITICAL_STATUS, 'Critical'),
)
name = models.TextField()
users_to_notify = models.ManyToManyField(
User,
blank=True,
help_text='Users who should receive alerts.',
)
alerts_enabled = models.BooleanField(
default=True,
help_text='Alert when this service is not healthy.',
)
status_checks = models.ManyToManyField(
'StatusCheck',
blank=True,
help_text='Checks used to calculate service status.',
)
last_alert_sent = models.DateTimeField(
null=True,
blank=True,
)
email_alert = models.BooleanField(default=False)
hipchat_alert = models.BooleanField(default=True)
sms_alert = models.BooleanField(default=False)
telephone_alert = models.BooleanField(
default=False,
help_text='Must be enabled, and check importance set to Critical, to receive telephone alerts.',
)
overall_status = models.TextField(default=PASSING_STATUS)
old_overall_status = models.TextField(default=PASSING_STATUS)
hackpad_id = models.TextField(
null=True,
blank=True,
verbose_name='Recovery instructions',
help_text='Gist, Hackpad or Refheap js embed with recovery instructions e.g. https://you.hackpad.com/some_document.js'
)
def __unicode__(self):
return self.name
def most_severe(self, check_list):
failures = [c.importance for c in check_list]
if self.CRITICAL_STATUS in failures:
return self.CRITICAL_STATUS
if self.ERROR_STATUS in failures:
return self.ERROR_STATUS
if self.WARNING_STATUS in failures:
return self.WARNING_STATUS
return self.PASSING_STATUS
@property
def is_critical(self):
"""
Break out separately because it's a bit of a pain to
get wrong.
"""
if self.old_overall_status != self.CRITICAL_STATUS and self.overall_status == self.CRITICAL_STATUS:
return True
return False
def alert(self):
if not self.alerts_enabled:
return
if self.overall_status != self.PASSING_STATUS:
# Don't alert every time
if self.overall_status == self.WARNING_STATUS:
if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.NOTIFICATION_INTERVAL)) < self.last_alert_sent:
return
elif self.overall_status in (self.CRITICAL_STATUS, self.ERROR_STATUS):
if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.ALERT_INTERVAL)) < self.last_alert_sent:
return
self.last_alert_sent = timezone.now()
else:
# We don't count "back to normal" as an alert
self.last_alert_sent = None
self.save()
self.snapshot.did_send_alert = True
self.snapshot.save()
send_alert(self, duty_officers=get_duty_officers())
@property
def recent_snapshots(self):
snapshots = self.snapshots.filter(
time__gt=(timezone.now() - timedelta(minutes=60 * 24)))
snapshots = list(snapshots.values())
for s in snapshots:
s['time'] = time.mktime(s['time'].timetuple())
return snapshots
def graphite_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='graphitestatuscheck')
def http_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='httpstatuscheck')
def jenkins_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='jenkinsstatuscheck')
def active_graphite_status_checks(self):
return self.graphite_status_checks().filter(active=True)
def active_http_status_checks(self):
return self.http_status_checks().filter(active=True)
def active_jenkins_status_checks(self):
return self.jenkins_status_checks().filter(active=True)
def active_status_checks(self):
return self.status_checks.filter(active=True)
def inactive_status_checks(self):
return self.status_checks.filter(active=False)
def all_passing_checks(self):
return self.active_status_checks().filter(calculated_status=self.CALCULATED_PASSING_STATUS)
def all_failing_checks(self):
return self.active_status_checks().exclude(calculated_status=self.CALCULATED_PASSING_STATUS)
class Service(CheckGroupMixin):
def update_status(self):
self.old_overall_status = self.overall_status
# Only active checks feed into our calculation
status_checks_failed_count = self.all_failing_checks().count()
self.overall_status = self.most_severe(self.all_failing_checks())
self.snapshot = ServiceStatusSnapshot(
service=self,
num_checks_active=self.active_status_checks().count(),
num_checks_passing=self.active_status_checks(
).count() - status_checks_failed_count,
num_checks_failing=status_checks_failed_count,
overall_status=self.overall_status,
time=timezone.now(),
)
self.snapshot.save()
self.save()
if not (self.overall_status == Service.PASSING_STATUS and self.old_overall_status == Service.PASSING_STATUS):
self.alert()
instances = models.ManyToManyField(
'Instance',
blank=True,
help_text='Instances this service is running on.',
)
url = models.TextField(
blank=True,
help_text="URL of service."
)
class Meta:
ordering = ['name']
class Instance(CheckGroupMixin):
def duplicate(self):
checks = self.status_checks.all()
new_instance = self
new_instance.pk = None
new_instance.id = None
new_instance.name = u"Copy of %s" % self.name
new_instance.save()
for check in checks:
check.duplicate(inst_set=(new_instance,), serv_set=())
return new_instance.pk
def update_status(self):
self.old_overall_status = self.overall_status
# Only active checks feed into our calculation
status_checks_failed_count = self.all_failing_checks().count()
self.overall_status = self.most_severe(self.all_failing_checks())
self.snapshot = InstanceStatusSnapshot(
instance=self,
num_checks_active=self.active_status_checks().count(),
num_checks_passing=self.active_status_checks(
).count() - status_checks_failed_count,
num_checks_failing=status_checks_failed_count,
overall_status=self.overall_status,
time=timezone.now(),
)
self.snapshot.save()
self.save()
class Meta:
ordering = ['name']
address = models.TextField(
blank=True,
help_text="Address (IP/Hostname) of service."
)
def icmp_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='icmpstatuscheck')
def active_icmp_status_checks(self):
return self.icmp_status_checks().filter(active=True)
def delete(self, *args, **kwargs):
self.icmp_status_checks().delete()
return super(Instance, self).delete(*args, **kwargs)
class Snapshot(models.Model):
class Meta:
abstract = True
time = models.DateTimeField(db_index=True)
num_checks_active = models.IntegerField(default=0)
num_checks_passing = models.IntegerField(default=0)
num_checks_failing = models.IntegerField(default=0)
overall_status = models.TextField(default=Service.PASSING_STATUS)
did_send_alert = models.IntegerField(default=False)
class ServiceStatusSnapshot(Snapshot):
service = models.ForeignKey(Service, related_name='snapshots')
def __unicode__(self):
return u"%s: %s" % (self.service.name, self.overall_status)
class InstanceStatusSnapshot(Snapshot):
instance = models.ForeignKey(Instance, related_name='snapshots')
def __unicode__(self):
return u"%s: %s" % (self.instance.name, self.overall_status)
class StatusCheck(PolymorphicModel):
"""
Base class for polymorphic models. We're going to use
proxy models for inheriting because it makes life much simpler,
but this allows us to stick different methods etc on subclasses.
You can work out what (sub)class a model is an instance of by accessing `instance.polymorphic_ctype.model`
We are using django-polymorphic for polymorphism
"""
# Common attributes to all
name = models.TextField()
active = models.BooleanField(
default=True,
help_text='If not active, check will not be used to calculate service status and will not trigger alerts.',
)
importance = models.CharField(
max_length=30,
choices=Service.IMPORTANCES,
default=Service.ERROR_STATUS,
help_text='Severity level of a failure. Critical alerts are for failures you want to wake you up at 2am, Errors are things you can sleep through but need to fix in the morning, and warnings for less important things.'
)
frequency = models.IntegerField(
default=5,
help_text='Minutes between each check.',
)
debounce = models.IntegerField(
default=0,
null=True,
help_text='Number of successive failures permitted before check will be marked as failed. Default is 0, i.e. fail on first failure.'
)
created_by = models.ForeignKey(User, null=True)
calculated_status = models.CharField(
max_length=50, choices=Service.STATUSES, default=Service.CALCULATED_PASSING_STATUS, blank=True)
last_run = models.DateTimeField(null=True)
cached_health = models.TextField(editable=False, null=True)
# Graphite checks
metric = models.TextField(
null=True,
help_text='fully.qualified.name of the Graphite metric you want to watch. This can be any valid Graphite expression, including wildcards, multiple hosts, etc.',
)
check_type = models.CharField(
choices=CHECK_TYPES,
max_length=100,
null=True,
)
value = models.TextField(
null=True,
help_text='If this expression evaluates to true, the check will fail (possibly triggering an alert).',
)
expected_num_hosts = models.IntegerField(
default=0,
null=True,
help_text='The minimum number of data series (hosts) you expect to see.',
)
# HTTP checks
endpoint = models.TextField(
null=True,
help_text='HTTP(S) endpoint to poll.',
)
username = models.TextField(
blank=True,
null=True,
help_text='Basic auth username.',
)
password = models.TextField(
blank=True,
null=True,
help_text='Basic auth password.',
)
text_match = models.TextField(
blank=True,
null=True,
help_text='Regex to match against source of page.',
)
status_code = models.TextField(
default=200,
null=True,
help_text='Status code expected from endpoint.'
)
timeout = models.IntegerField(
default=30,
null=True,
help_text='Time out after this many seconds.',
)
verify_ssl_certificate = models.BooleanField(
default=True,
help_text='Set to false to allow not try to verify ssl certificates (default True)',
)
# Jenkins checks
max_queued_build_time = models.IntegerField(
null=True,
blank=True,
help_text='Alert if build queued for more than this many minutes.',
)
class Meta(PolymorphicModel.Meta):
ordering = ['name']
def __unicode__(self):
return self.name
def recent_results(self):
# Not great to use id but we are getting lockups, possibly because of something to do with index
# on time_complete
return StatusCheckResult.objects.filter(check=self).order_by('-id').defer('raw_data')[:10]
def last_result(self):
try:
return StatusCheckResult.objects.filter(check=self).order_by('-id').defer('raw_data')[0]
except:
return None
def run(self):
start = timezone.now()
try:
result = self._run()
except SoftTimeLimitExceeded as e:
result = StatusCheckResult(check=self)
result.error = u'Error in performing check: Celery soft time limit exceeded'
result.succeeded = False
except Exception as e:
result = StatusCheckResult(check=self)
result.error = u'Error in performing check: %s' % (e,)
result.succeeded = False
finish = timezone.now()
result.time = start
result.time_complete = finish
result.save()
self.last_run = finish
self.save()
def _run(self):
"""
Implement on subclasses. Should return a `CheckResult` instance.
"""
raise NotImplementedError('Subclasses should implement')
def save(self, *args, **kwargs):
if self.last_run:
recent_results = list(self.recent_results())
if calculate_debounced_passing(recent_results, self.debounce):
self.calculated_status = Service.CALCULATED_PASSING_STATUS
else:
self.calculated_status = Service.CALCULATED_FAILING_STATUS
self.cached_health = serialize_recent_results(recent_results)
try:
updated = StatusCheck.objects.get(pk=self.pk)
except StatusCheck.DoesNotExist as e:
logger.error('Cannot find myself (check %s) in the database, presumably have been deleted' % self.pk)
return
else:
self.cached_health = ''
self.calculated_status = Service.CALCULATED_PASSING_STATUS
ret = super(StatusCheck, self).save(*args, **kwargs)
self.update_related_services()
self.update_related_instances()
return ret
def duplicate(self, inst_set=(), serv_set=()):
new_check = self
new_check.pk = None
new_check.id = None
new_check.last_run = None
new_check.save()
for linked in list(inst_set) + list(serv_set):
linked.status_checks.add(new_check)
return new_check.pk
def update_related_services(self):
services = self.service_set.all()
for service in services:
update_service.delay(service.id)
def update_related_instances(self):
instances = self.instance_set.all()
for instance in instances:
update_instance.delay(instance.id)
class ICMPStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "ICMP/Ping Check"
def _run(self):
result = StatusCheckResult(check=self)
instances = self.instance_set.all()
target = self.instance_set.get().address
# We need to read both STDOUT and STDERR because ping can write to both, depending on the kind of error. Thanks a lot, ping.
ping_process = subprocess.Popen("ping -c 1 " + target, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
response = ping_process.wait()
if response == 0:
result.succeeded = True
else:
output = ping_process.stdout.read()
result.succeeded = False
result.error = output
return result
class GraphiteStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "Metric check"
def format_error_message(self, failure_value, actual_hosts):
"""
A summary of why the check is failing for inclusion in hipchat, sms etc
Returns something like:
"5.0 > 4 | 1/2 hosts"
"""
hosts_string = u''
if self.expected_num_hosts > 0:
hosts_string = u' | %s/%s hosts' % (actual_hosts,
self.expected_num_hosts)
if self.expected_num_hosts > actual_hosts:
return u'Hosts missing%s' % hosts_string
if failure_value is None:
return "Failed to get metric from Graphite"
return u"%0.1f %s %0.1f%s" % (
failure_value,
self.check_type,
float(self.value),
hosts_string
)
def _run(self):
series = parse_metric(self.metric, mins_to_check=self.frequency)
failure_value = None
if series['error']:
failed = True
else:
failed = None
result = StatusCheckResult(
check=self,
)
if series['num_series_with_data'] > 0:
result.average_value = series['average_value']
if self.check_type == '<':
failed = float(series['min']) < float(self.value)
if failed:
failure_value = series['min']
elif self.check_type == '<=':
failed = float(series['min']) <= float(self.value)
if failed:
failure_value = series['min']
elif self.check_type == '>':
failed = float(series['max']) > float(self.value)
if failed:
failure_value = series['max']
elif self.check_type == '>=':
failed = float(series['max']) >= float(self.value)
if failed:
failure_value = series['max']
elif self.check_type == '==':
failed = float(self.value) in series['all_values']
if failed:
failure_value = float(self.value)
else:
raise Exception(u'Check type %s not supported' %
self.check_type)
if series['num_series_with_data'] < self.expected_num_hosts:
failed = True
try:
result.raw_data = json.dumps(series['raw'])
except:
result.raw_data = series['raw']
result.succeeded = not failed
if not result.succeeded:
result.error = self.format_error_message(
failure_value,
series['num_series_with_data'],
)
result.actual_hosts = series['num_series_with_data']
result.failure_value = failure_value
return result
class HttpStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "HTTP check"
def _run(self):
result = StatusCheckResult(check=self)
auth = (self.username, self.password)
try:
if self.username or self.password:
resp = requests.get(
self.endpoint,
timeout=self.timeout,
verify=self.verify_ssl_certificate,
auth=auth
)
else:
resp = requests.get(
self.endpoint,
timeout=self.timeout,
verify=self.verify_ssl_certificate,
)
except requests.RequestException as e:
result.error = u'Request error occurred: %s' % (e,)
result.succeeded = False
else:
if self.status_code and resp.status_code != int(self.status_code):
result.error = u'Wrong code: got %s (expected %s)' % (
resp.status_code, int(self.status_code))
result.succeeded = False
result.raw_data = resp.content
elif self.text_match:
if not re.search(self.text_match, resp.content):
result.error = u'Failed to find match regex /%s/ in response body' % self.text_match
result.raw_data = resp.content
result.succeeded = False
else:
result.succeeded = True
else:
result.succeeded = True
return result
class JenkinsStatusCheck(StatusCheck):
class Meta(StatusCheck.Meta):
proxy = True
@property
def check_category(self):
return "Jenkins check"
@property
def failing_short_status(self):
return 'Job failing on Jenkins'
def _run(self):
result = StatusCheckResult(check=self)
try:
status = get_job_status(self.name)
active = status['active']
if status['status_code'] == 404:
result.error = u'Job %s not found on Jenkins' % self.name
result.succeeded = False
return result
elif status['status_code'] > 400:
# Will fall through to next block
raise Exception(u'returned %s' % status['status_code'])
except Exception as e:
# If something else goes wrong, we will *not* fail - otherwise
# a lot of services seem to fail all at once.
# Ugly to do it here but...
result.error = u'Error fetching from Jenkins - %s' % e
result.succeeded = True
return result
if not active:
# We will fail if the job has been disabled
result.error = u'Job "%s" disabled on Jenkins' % self.name
result.succeeded = False
else:
if self.max_queued_build_time and status['blocked_build_time']:
if status['blocked_build_time'] > self.max_queued_build_time * 60:
result.succeeded = False
result.error = u'Job "%s" has blocked build waiting for %ss (> %sm)' % (
self.name,
int(status['blocked_build_time']),
self.max_queued_build_time,
)
else:
result.succeeded = status['succeeded']
else:
result.succeeded = status['succeeded']
if not status['succeeded']:
if result.error:
result.error += u'; Job "%s" failing on Jenkins' % self.name
else:
result.error = u'Job "%s" failing on Jenkins' % self.name
result.raw_data = status
return result
class StatusCheckResult(models.Model):
"""
We use the same StatusCheckResult model for all check types,
because really they are not so very different.
Checks don't have to use all the fields, so most should be
nullable
"""
check = models.ForeignKey(StatusCheck)
time = models.DateTimeField(null=False, db_index=True)
time_complete = models.DateTimeField(null=True, db_index=True)
raw_data = models.TextField(null=True)
succeeded = models.BooleanField(default=False)
error = models.TextField(null=True)
def __unicode__(self):
return '%s: %s @%s' % (self.status, self.check.name, self.time)
@property
def status(self):
if self.succeeded:
return 'succeeded'
else:
return 'failed'
@property
def took(self):
try:
return (self.time_complete - self.time).microseconds / 1000
except:
return None
@property
def short_error(self):
snippet_len = 30
if len(self.error) > snippet_len:
return u"%s..." % self.error[:snippet_len - 3]
else:
return self.error
def save(self, *args, **kwargs):
if isinstance(self.raw_data, basestring):
self.raw_data = self.raw_data[:RAW_DATA_LIMIT]
return super(StatusCheckResult, self).save(*args, **kwargs)
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
mobile_number = models.CharField(max_length=20, blank=True, default='')
hipchat_alias = models.CharField(max_length=50, blank=True, default='')
fallback_alert_user = models.BooleanField(default=False)
def __unicode__(self):
return 'User profile: %s' % self.user.username
def save(self, *args, **kwargs):
if self.mobile_number.startswith('+'):
self.mobile_number = self.mobile_number[1:]
# Enforce uniqueness
if self.fallback_alert_user:
profiles = UserProfile.objects.exclude(id=self.id)
profiles.update(fallback_alert_user=False)
return super(UserProfile, self).save(*args, **kwargs)
@property
def prefixed_mobile_number(self):
return '+%s' % self.mobile_number
class Shift(models.Model):
start = models.DateTimeField()
end = models.DateTimeField()
user = models.ForeignKey(User)
uid = models.TextField()
deleted = models.BooleanField(default=False)
def __unicode__(self):
deleted = ''
if self.deleted:
deleted = ' (deleted)'
return "%s: %s to %s%s" % (self.user.username, self.start, self.end, deleted)
def get_duty_officers(at_time=None):
"""Returns a list of duty officers for a given time or now if none given"""
duty_officers = []
if not at_time:
at_time = timezone.now()
current_shifts = Shift.objects.filter(
deleted=False,
start__lt=at_time,
end__gt=at_time,
)
if current_shifts:
duty_officers = [shift.user for shift in current_shifts]
return duty_officers
else:
try:
u = UserProfile.objects.get(fallback_alert_user=True)
return [u.user]
except UserProfile.DoesNotExist:
return []
def update_shifts():
events = get_events()
users = User.objects.filter(is_active=True)
user_lookup = {}
for u in users:
user_lookup[u.username.lower()] = u
future_shifts = Shift.objects.filter(start__gt=timezone.now())
future_shifts.update(deleted=True)
for event in events:
e = event['summary'].lower().strip()
if e in user_lookup:
user = user_lookup[e]
try:
s = Shift.objects.get(uid=event['uid'])
except Shift.DoesNotExist:
s = Shift(uid=event['uid'])
s.start = event['start']
s.end = event['end']
s.user = user
s.deleted = False
s.save()
|
|
from __future__ import print_function
import mxnet as mx
import mxnet.ndarray as nd
import time
import logging
from utils import *
def calc_potential(exe, params, label_name, noise_precision, prior_precision):
exe.copy_params_from(params)
exe.forward(is_train=False)
ret = 0.0
ret += (nd.norm(
exe.outputs[0] - exe.arg_dict[label_name]).asscalar() ** 2) / 2.0 * noise_precision
for v in params.values():
ret += (nd.norm(v).asscalar() ** 2) / 2.0 * prior_precision
return ret
def calc_grad(exe, exe_grads, params, X, Y, label_name=None, outgrad_f=None):
exe.copy_params_from(params)
exe.arg_dict['data'][:] = X
if outgrad_f is None:
exe.arg_dict[label_name][:] = Y
exe.forward(is_train=True)
exe.backward()
else:
exe.forward(is_train=True)
exe.backward(outgrad_f(exe.outpus, Y))
for k, v in exe_grads.items():
v.wait_to_read()
def step_HMC(exe, exe_params, exe_grads, label_key, noise_precision, prior_precision, L=10,
eps=1E-6):
init_params = {k: v.copyto(v.context) for k, v in exe_params.items()}
end_params = {k: v.copyto(v.context) for k, v in exe_params.items()}
init_momentums = {k: mx.random.normal(0, 1, v.shape) for k, v in init_params.items()}
end_momentums = {k: v.copyto(v.context) for k, v in init_momentums.items()}
init_potential = calc_potential(exe, init_params, label_key, noise_precision, prior_precision)
# 0. Calculate Initial Energy and Kinetic
init_kinetic = sum([nd.sum(nd.square(momentum)) / 2.0
for momentum in init_momentums.values()]).asscalar()
# 1. Make a half step for momentum at the beginning
exe.copy_params_from(end_params)
exe.forward(is_train=True)
exe.backward()
for k, v in exe_grads.items():
v.wait_to_read()
for k, momentum in end_momentums.items():
momentum[:] = momentum - (eps / 2) * exe_grads[k]
# 2. Alternate full steps for position and momentum
for i in range(L):
# 2.1 Full step for position
for k, param in exe_params.items():
param[:] = param + eps * end_momentums[k]
# 2.2 Full step for the momentum, except at the end of trajectory we perform a half step
exe.forward(is_train=True)
exe.backward()
for v in exe_grads.values():
v.wait_to_read()
if i != L - 1:
for k, momentum in end_momentums.items():
momentum[:] = momentum - eps * exe_grads[k]
else:
for k, momentum in end_momentums.items():
# We should reverse the sign of the momentum at the end
momentum[:] = -(momentum - eps / 2.0 * exe_grads[k])
copy_param(exe, end_params)
# 3. Calculate acceptance ratio and accept/reject the move
end_potential = calc_potential(exe, end_params, label_key, noise_precision, prior_precision)
end_kinetic = sum([nd.sum(nd.square(momentum)) / 2.0
for momentum in end_momentums.values()]).asscalar()
# print init_potential, init_kinetic, end_potential, end_kinetic
r = numpy.random.rand(1)
if r < numpy.exp(-(end_potential + end_kinetic) + (init_potential + init_kinetic)):
exe.copy_params_from(end_params)
return end_params, 1
else:
exe.copy_params_from(init_params)
return init_params, 0
def HMC(sym, data_inputs, X, Y, X_test, Y_test, sample_num,
initializer=None, noise_precision=1 / 9.0, prior_precision=0.1,
learning_rate=1E-6, L=10, dev=mx.gpu()):
label_key = list(set(data_inputs.keys()) - set(['data']))[0]
exe, exe_params, exe_grads, _ = get_executor(sym, dev, data_inputs, initializer)
exe.arg_dict['data'][:] = X
exe.arg_dict[label_key][:] = Y
sample_pool = []
accept_num = 0
start = time.time()
for i in range(sample_num):
sample_params, is_accept = step_HMC(exe, exe_params, exe_grads, label_key, noise_precision,
prior_precision, L, learning_rate)
accept_num += is_accept
if (i + 1) % 10 == 0:
sample_pool.append(sample_params)
if (i + 1) % 100000 == 0:
end = time.time()
print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start), "MSE:",
sample_test_regression(exe, X=X_test, Y=Y_test, sample_pool=sample_pool,
minibatch_size=Y.shape[0],
save_path='regression_HMC.txt'))
start = time.time()
exe.copy_params_from(sample_params)
print('accept ratio', accept_num / float(sample_num))
return sample_pool
def SGD(sym, data_inputs, X, Y, X_test, Y_test, total_iter_num,
lr=None,
lr_scheduler=None, prior_precision=1,
out_grad_f=None,
initializer=None,
minibatch_size=100, dev=mx.gpu()):
if out_grad_f is None:
label_key = list(set(data_inputs.keys()) - set(['data']))[0]
exe, params, params_grad, _ = get_executor(sym, dev, data_inputs, initializer)
optimizer = mx.optimizer.create('sgd', learning_rate=lr,
rescale_grad=X.shape[0] / minibatch_size,
lr_scheduler=lr_scheduler,
wd=prior_precision,
arg_names=params.keys())
updater = mx.optimizer.get_updater(optimizer)
start = time.time()
for i in range(total_iter_num):
indices = numpy.random.randint(X.shape[0], size=minibatch_size)
X_batch = X[indices]
Y_batch = Y[indices]
exe.arg_dict['data'][:] = X_batch
if out_grad_f is None:
exe.arg_dict[label_key][:] = Y_batch
exe.forward(is_train=True)
exe.backward()
else:
exe.forward(is_train=True)
exe.backward(out_grad_f(exe.outputs, nd.array(Y_batch, ctx=dev)))
for k in params:
updater(k, params_grad[k], params[k])
if (i + 1) % 500 == 0:
end = time.time()
print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start))
sample_test_acc(exe, X=X_test, Y=Y_test, label_num=10, minibatch_size=100)
start = time.time()
return exe, params, params_grad
def SGLD(sym, X, Y, X_test, Y_test, total_iter_num,
data_inputs=None,
learning_rate=None,
lr_scheduler=None, prior_precision=1,
out_grad_f=None,
initializer=None,
minibatch_size=100, thin_interval=100, burn_in_iter_num=1000, task='classification',
dev=mx.gpu()):
if out_grad_f is None:
label_key = list(set(data_inputs.keys()) - set(['data']))[0]
exe, params, params_grad, _ = get_executor(sym, dev, data_inputs, initializer)
optimizer = mx.optimizer.create('sgld', learning_rate=learning_rate,
rescale_grad=X.shape[0] / minibatch_size,
lr_scheduler=lr_scheduler,
wd=prior_precision)
updater = mx.optimizer.get_updater(optimizer)
sample_pool = []
start = time.time()
for i in range(total_iter_num):
indices = numpy.random.randint(X.shape[0], size=minibatch_size)
X_batch = X[indices]
Y_batch = Y[indices]
exe.arg_dict['data'][:] = X_batch
if out_grad_f is None:
exe.arg_dict[label_key][:] = Y_batch
exe.forward(is_train=True)
exe.backward()
else:
exe.forward(is_train=True)
exe.backward(out_grad_f(exe.outputs, nd.array(Y_batch, ctx=dev)))
for k in params:
updater(k, params_grad[k], params[k])
if i < burn_in_iter_num:
continue
else:
if 0 == (i - burn_in_iter_num) % thin_interval:
if optimizer.lr_scheduler is not None:
lr = optimizer.lr_scheduler(optimizer.num_update)
else:
lr = learning_rate
sample_pool.append([lr, copy_param(exe)])
if (i + 1) % 100000 == 0:
end = time.time()
if task == 'classification':
print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start))
test_correct, test_total, test_acc = \
sample_test_acc(exe, sample_pool=sample_pool, X=X_test, Y=Y_test, label_num=10,
minibatch_size=minibatch_size)
print("Test %d/%d=%f" % (test_correct, test_total, test_acc))
else:
print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start), "MSE:",
sample_test_regression(exe=exe, sample_pool=sample_pool,
X=X_test,
Y=Y_test, minibatch_size=minibatch_size,
save_path='regression_SGLD.txt'))
start = time.time()
return exe, sample_pool
def DistilledSGLD(teacher_sym, student_sym,
teacher_data_inputs, student_data_inputs,
X, Y, X_test, Y_test, total_iter_num,
teacher_learning_rate, student_learning_rate,
teacher_lr_scheduler=None, student_lr_scheduler=None,
student_optimizing_algorithm='sgd',
teacher_grad_f=None, student_grad_f=None,
teacher_prior_precision=1, student_prior_precision=0.001,
perturb_deviation=0.001,
student_initializer=None,
teacher_initializer=None,
minibatch_size=100,
task='classification',
dev=mx.gpu()):
teacher_exe, teacher_params, teacher_params_grad, _ = \
get_executor(teacher_sym, dev, teacher_data_inputs, teacher_initializer)
student_exe, student_params, student_params_grad, _ = \
get_executor(student_sym, dev, student_data_inputs, student_initializer)
if teacher_grad_f is None:
teacher_label_key = list(set(teacher_data_inputs.keys()) - set(['data']))[0]
if student_grad_f is None:
student_label_key = list(set(student_data_inputs.keys()) - set(['data']))[0]
teacher_optimizer = mx.optimizer.create('sgld',
learning_rate=teacher_learning_rate,
rescale_grad=X.shape[0] / float(minibatch_size),
lr_scheduler=teacher_lr_scheduler,
wd=teacher_prior_precision)
student_optimizer = mx.optimizer.create(student_optimizing_algorithm,
learning_rate=student_learning_rate,
rescale_grad=1.0 / float(minibatch_size),
lr_scheduler=student_lr_scheduler,
wd=student_prior_precision)
teacher_updater = mx.optimizer.get_updater(teacher_optimizer)
student_updater = mx.optimizer.get_updater(student_optimizer)
start = time.time()
for i in range(total_iter_num):
# 1.1 Draw random minibatch
indices = numpy.random.randint(X.shape[0], size=minibatch_size)
X_batch = X[indices]
Y_batch = Y[indices]
# 1.2 Update teacher
teacher_exe.arg_dict['data'][:] = X_batch
if teacher_grad_f is None:
teacher_exe.arg_dict[teacher_label_key][:] = Y_batch
teacher_exe.forward(is_train=True)
teacher_exe.backward()
else:
teacher_exe.forward(is_train=True)
teacher_exe.backward(
teacher_grad_f(teacher_exe.outputs, nd.array(Y_batch, ctx=dev)))
for k in teacher_params:
teacher_updater(k, teacher_params_grad[k], teacher_params[k])
# 2.1 Draw random minibatch and do random perturbation
if task == 'classification':
indices = numpy.random.randint(X.shape[0], size=minibatch_size)
X_student_batch = X[indices] + numpy.random.normal(0,
perturb_deviation,
X_batch.shape).astype('float32')
else:
X_student_batch = mx.random.uniform(-6, 6, X_batch.shape, mx.cpu())
# 2.2 Get teacher predictions
teacher_exe.arg_dict['data'][:] = X_student_batch
teacher_exe.forward(is_train=False)
teacher_pred = teacher_exe.outputs[0]
teacher_pred.wait_to_read()
# 2.3 Update student
student_exe.arg_dict['data'][:] = X_student_batch
if student_grad_f is None:
student_exe.arg_dict[student_label_key][:] = teacher_pred
student_exe.forward(is_train=True)
student_exe.backward()
else:
student_exe.forward(is_train=True)
student_exe.backward(student_grad_f(student_exe.outputs, teacher_pred))
for k in student_params:
student_updater(k, student_params_grad[k], student_params[k])
if (i + 1) % 2000 == 0:
end = time.time()
if task == 'classification':
print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start))
test_correct, test_total, test_acc = \
sample_test_acc(student_exe, X=X_test, Y=Y_test, label_num=10,
minibatch_size=minibatch_size)
train_correct, train_total, train_acc = \
sample_test_acc(student_exe, X=X, Y=Y, label_num=10,
minibatch_size=minibatch_size)
teacher_test_correct, teacher_test_total, teacher_test_acc = \
sample_test_acc(teacher_exe, X=X_test, Y=Y_test, label_num=10,
minibatch_size=minibatch_size)
teacher_train_correct, teacher_train_total, teacher_train_acc = \
sample_test_acc(teacher_exe, X=X, Y=Y, label_num=10,
minibatch_size=minibatch_size)
print("Student: Test ACC %d/%d=%f, Train ACC %d/%d=%f" % (test_correct, test_total,
test_acc, train_correct, train_total, train_acc))
print("Teacher: Test ACC %d/%d=%f, Train ACC %d/%d=%f" \
% (teacher_test_correct, teacher_test_total, teacher_test_acc,
teacher_train_correct, teacher_train_total, teacher_train_acc))
else:
print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start), "MSE:",
sample_test_regression(exe=student_exe, X=X_test, Y=Y_test,
minibatch_size=minibatch_size,
save_path='regression_DSGLD.txt'))
start = time.time()
return student_exe, student_params, student_params_grad
|
|
from main import test
from random import choice
from collections import deque
class BefungeInterpreter(object):
def __init__(self, code, start_compiler=True):
self._queue = deque()
self._code = code
self._direction = ">"
self._pointer = (0, 0)
self._code_map = self._generate_code_map()
self._output = []
self._string_mode = False
if start_compiler: self._compile()
def next(self):
"""
Run current command, advance pointer.
:return:
"""
command = self._code_map.get(self._pointer)
self._execute(command)
self._pointer = self._next_step()
def _compile(self):
"""
Run code.
:return:
"""
while True:
try:
self.next()
except StopIteration:
break
def _generate_code_map(self):
"""
Map code to coordinates.
:return:
"""
x, y, result = 0, 0, {}
for k, v in enumerate(self._code):
if self._code[k:k + 1] == "\n":
y += 1
x = 0
else:
result[(x, y)] = v
x += 1
return result
def _next_step(self, direction=None):
"""
Given a direction and coordinates, find the location of the next step.
:param direction:
:return:
"""
if direction is None: direction = self._direction
(x, y) = self._pointer
max_x = max([_x for _x, _y in self._code_map.keys() if _y == y])
max_y = max([_y for _x, _y in self._code_map.keys() if _x == x])
def right(x, y):
if x == max_x:
x = 0
y += 1
y = 0 if y > max_y else y
else:
x += 1
return x, y
def left(x, y):
if x == 0:
x = max_x
y -= 1
y = max_y if y < 0 else y
else:
x -= 1
return x, y
def down(x, y):
if y == max_y:
y = 0
x += 1
x = 0 if x > max_x else x
else:
y += 1
return x, y
def up(x, y):
if y == 0:
y = max_y
x -= 1
x = max_x if x < 0 else x
else:
y -= 1
return x, y
operation = {">": right, "v": down, "<": left, "^": up}
self._pointer = operation[direction](x, y)
return self._pointer
def __str__(self):
"""
Return object as string.
:return:
"""
output = self._output
if not output:
return ""
output = map(str, output)
return "".join(output)
def _execute(self, command):
"""
Execute command at current pointer.
:param command:
:return:
"""
def push_value(_):
self._queue.append(int(command))
def math(command):
a = int(self._queue.pop())
b = int(self._queue.pop())
if command == "+":
self._queue.append(a + b)
if command == "-":
self._queue.append(b - a)
if command == "*":
self._queue.append(a * b)
if command == "/":
result = 0 if a is 0 else b // a
self._queue.append(result)
if command == "%":
result = 0 if a is 0 else b % a
self._queue.append(result)
if command == "`":
result = 1 if b > a else 0
self._queue.append(result)
def direction(_):
self._direction = choice(
[">", "v", "<", "^"]) if command == "?" else command
def logical_not(_):
result = self._queue.pop()
result = 1 if result == 0 else 0
self._queue.append(result)
def pop_move(command):
value = self._queue.pop()
if command == "_":
if value == 0:
self._direction = ">"
else:
self._direction = "<"
if command == "|":
if value == 0:
self._direction = "v"
else:
self._direction = "^"
def string_mode_toggle(_):
self._string_mode = True if self._string_mode is False else False
def string_push(_):
self._queue.append(ord(command))
def duplicate_top(_):
if len(self._queue) > 0:
value = self._queue[-1]
self._queue.append(value)
else:
self._queue.append(0)
def swap_top(_):
if len(self._queue) < 2:
self._queue.appendleft(0)
a = self._queue.pop()
b = self._queue.pop()
self._queue.append(a)
self._queue.append(b)
def pop_value(command):
value = self._queue.pop()
if command == ".":
self._output.append(int(value))
if command == ",":
self._output.append(chr(value))
def trampoline(_):
self._next_step()
def storage(command):
y = int(self._queue.pop())
x = int(self._queue.pop())
if command == "p":
v = int(self._queue.pop())
self._code_map[(x, y)] = chr(v)
if command == "g":
character = ord(self._code_map.get((x, y)))
self._queue.append(character)
def end(_):
raise StopIteration
def skip(_):
pass
if self._string_mode and command != "\"":
string_push(command)
else:
commands_dict = {"0": push_value, "1": push_value, "2": push_value,
"3": push_value, "4": push_value, "5": push_value,
"6": push_value, "7": push_value, "8": push_value,
"9": push_value, "+": math, "-": math, "*": math,
"/": math, "%": math, "!": logical_not, "`": math,
">": direction, "<": direction, "^": direction,
"v": direction, "?": direction, "_": pop_move,
"|": pop_move, "\"": string_mode_toggle,
":": duplicate_top, "\\": swap_top, "$": pop_value,
".": pop_value, ",": pop_value, "#": trampoline,
"p": storage, "g": storage, "@": end, " ": skip}
commands_dict[command](command)
def interpret(code):
interpreter = BefungeInterpreter(code)
return str(interpreter)
test.it("Builds a code map on init")
test_code = "123\n" \
"456\n" \
"78@"
test_interpreter_1 = BefungeInterpreter(test_code, start_compiler=False)
test_code_map = {(0, 0): '1', (1, 0): '2', (2, 0): '3', (0, 1): '4',
(1, 1): '5', (2, 1): '6', (0, 2): '7', (1, 2): '8',
(2, 2): '@'}
test.assert_equals(test_interpreter_1._code_map, test_code_map)
test_code = "12a\n" \
"4 *\n" \
" \@"
test_interpreter_2 = BefungeInterpreter(test_code, start_compiler=False)
test_code_map = {(0, 0): '1', (1, 0): '2', (2, 0): 'a', (0, 1): '4',
(1, 1): ' ', (2, 1): '*', (0, 2): ' ', (1, 2): '\\',
(2, 2): '@'}
test.assert_equals(test_interpreter_2._code_map, test_code_map)
del test_code_map
test.it("It finds the coordinates of the next step")
test.assert_equals(test_interpreter_1._next_step(), (1, 0))
test.assert_equals(test_interpreter_1._next_step(direction="<"), (0, 0))
test.assert_equals(test_interpreter_1._next_step(direction="<"), (2, 2))
test.assert_equals(test_interpreter_1._next_step(direction="<"), (1, 2))
test.assert_equals(test_interpreter_1._next_step(direction="v"), (2, 0))
test.assert_equals(test_interpreter_1._next_step(direction="v"), (2, 1))
test.assert_equals(test_interpreter_1._next_step(direction="v"), (2, 2))
test.assert_equals(test_interpreter_1._next_step(direction="v"), (0, 0))
test.assert_equals(test_interpreter_1._next_step(direction="^"), (2, 2))
test.assert_equals(test_interpreter_1._next_step(direction="^"), (2, 1))
test.assert_equals(test_interpreter_1._next_step(direction="^"), (2, 0))
test.assert_equals(test_interpreter_1._next_step(direction="^"), (1, 2))
test.assert_equals(test_interpreter_1._next_step(direction="v"), (2, 0))
test.assert_equals(test_interpreter_1._next_step(direction=">"), (0, 1))
test.assert_equals(test_interpreter_1._next_step(direction="^"), (0, 0))
test.it("It executes commands: direction")
test_code = ">>v\n" \
">@?\n" \
"^<<"
test_interpreter_3 = BefungeInterpreter(test_code, start_compiler=False)
test_interpreter_3.next()
test_interpreter_3.next()
test.assert_equals(test_interpreter_3._pointer, (2, 0))
test_interpreter_3.next()
test.assert_equals(test_interpreter_3._direction, "v")
test_interpreter_3.next()
del test_interpreter_1
del test_interpreter_2
del test_interpreter_3
test.describe("Execute Commands")
test.it("push")
test_code = "132@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([1, 3, 2]))
test.it("math")
test_code = "12-@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([-1]))
test_code = "12+@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([3]))
test_code = "46*@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([24]))
test_code = "74695*@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([7, 4, 6, 45]))
test_code = "10/@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([0]))
test_code = "01/@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([0]))
test_code = "92/@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([4]))
test_code = "93/@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([3]))
test_code = "93%@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([0]))
test_code = "92%@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([1]))
test_code = "97%@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([2]))
test_code = "20%@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([0]))
test_code = "07%@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([0]))
test_code = "32`@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([1]))
test_code = "59`@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([0]))
test_code = "55`@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([0]))
test_code = "0!@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([1]))
test_code = "1!@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([0]))
test_code = "2!@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([0]))
test.it("direction")
test_code = "@"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._execute("<")
test.assert_equals(test_commands._direction, "<")
test_commands._execute("<")
test.assert_equals(test_commands._direction, "<")
test_commands._execute(">")
test.assert_equals(test_commands._direction, ">")
test_commands._execute("^")
test.assert_equals(test_commands._direction, "^")
test_commands._execute("v")
test.assert_equals(test_commands._direction, "v")
test.it("pop move")
test_code = "@"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._queue.append(1)
test_commands._execute("_")
test.assert_equals(test_commands._direction, "<")
test_commands._queue.append(0)
test_commands._execute("_")
test.assert_equals(test_commands._direction, ">")
test_commands._queue.append(0)
test_commands._execute("|")
test.assert_equals(test_commands._direction, "v")
test_commands._queue.append(1)
test_commands._execute("|")
test.assert_equals(test_commands._direction, "^")
test_code = "0 v \n" \
"@3_4@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([4]))
test_code = "1 v \n" \
"@3_4@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([3]))
test_code = 'v >3@\n' \
'>0| \n' \
' >4@'
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([4]))
test_code = 'v >3@\n' \
'>1| \n' \
' >4@'
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([3]))
test.it("string mode")
test_code = "@"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test.assert_equals(test_commands._string_mode, False)
test_commands._execute("\"")
test.assert_equals(test_commands._string_mode, True)
test_commands._execute("\"")
test.assert_equals(test_commands._string_mode, False)
test_code = '"Test"@'
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([84, 101, 115, 116]))
test.it("duplicates value on top of the stack")
test_code = "@"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._queue.append(1)
test_commands._execute(":")
test.assert_equals(test_commands._queue, deque([1, 1]))
test_code = "@"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._execute(":")
test.assert_equals(test_commands._queue, deque([0]))
test_code = "@"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._queue.append(1)
test_commands._queue.append(2)
test_commands._execute(":")
test.assert_equals(test_commands._queue, deque([1, 2, 2]))
test_code = "1:@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([1, 1]))
test_code = ":@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([0]))
test_code = "12:@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([1, 2, 2]))
test.it("Swaps top 2 values")
test_code = "@"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._queue.append(1)
test_commands._queue.append(2)
test.assert_equals(test_commands._queue, deque([1, 2]))
test_commands._execute("\\")
test.assert_equals(test_commands._queue, deque([2, 1]))
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._queue.append(1)
test.assert_equals(test_commands._queue, deque([1]))
test_commands._execute("\\")
test.assert_equals(test_commands._queue, deque([1, 0]))
test_code = "@"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._queue.append(1)
test_commands._queue.append(2)
test_commands._queue.append(3)
test_commands._queue.append(4)
test_commands._queue.append(5)
test_commands._execute("\\")
test.assert_equals(test_commands._queue, deque([1, 2, 3, 5, 4]))
test_code = "12\@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([2, 1]))
test_code = "1\@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([1, 0]))
test_code = "12345\@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([1, 2, 3, 5, 4]))
test.it("pops values")
test_code = "1$@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([]))
test.assert_equals(test_commands._output, [])
test_code = "12345$$$@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([1, 2]))
test.assert_equals(test_commands._output, [])
test_code = "1.@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([]))
test.assert_equals(test_commands._output, [1])
test_code = "12345...@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([1, 2]))
test.assert_equals(test_commands._output, [5, 4, 3])
test_code = "@"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._queue.append(53)
test_commands._execute(",")
test.assert_equals(test_commands._queue, deque([]))
test.assert_equals(test_commands._output, ['5'])
test_code = "@"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._queue.append(49)
test_commands._queue.append(50)
test_commands._queue.append(51)
test_commands._queue.append(52)
test_commands._queue.append(53)
test_commands._execute(",")
test_commands._execute(",")
test_commands._execute(",")
test.assert_equals(test_commands._queue, deque([49, 50]))
test.assert_equals(test_commands._output, ['5', '4', '3'])
test_code = "96*5-,@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([]))
test.assert_equals(test_commands._output, ['1'])
test_code = "96*5-96*4-96*3-96*2-96*1-,,,@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([49, 50]))
test.assert_equals(test_commands._output, ['5', '4', '3'])
test.it("trampoline")
test_code = "#1@"
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([]))
test_code = ' v\n' \
' #\n' \
' 1\n' \
' 2\n' \
' @'
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([2]))
test_code = '#@v\n' \
' 3#\n' \
' 21\n' \
' # \n' \
' ^<'
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([3]))
test_code = '#1#@ #2# #5<'
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([5]))
test.it("gets and puts")
test_code = " @"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._queue.append(49)
test_commands._queue.append(0)
test_commands._queue.append(0)
test.assert_equals(test_commands._code_map.get((0, 0)), ' ')
test_commands._execute("p")
test.assert_equals(test_commands._queue, deque([]))
test.assert_equals(test_commands._code_map.get((0, 0)), '1')
test_code = " @"
test_commands = BefungeInterpreter(test_code, start_compiler=False)
test_commands._queue.append(118)
test_commands._queue.append(1)
test_commands._queue.append(2)
test_commands._execute("p")
test.assert_equals(test_commands._queue, deque([]))
test.assert_equals(test_commands._code_map.get((1, 2)), 'v')
test_code = '#@96*5-12pv\n' \
' \n' \
' 2 \n' \
' ^ <'
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._code_map.get((1, 2)), '1')
test.assert_equals(test_commands._queue, deque([1]))
test_code = '#210g@'
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([50]))
test_code = '34g@\n' \
' \n' \
' \n' \
' \n' \
' z'
test_commands = BefungeInterpreter(test_code)
test.assert_equals(test_commands._queue, deque([122]))
test.it("Converts output to string")
test_code = '"!olleH",,,,,,@'
test_commands = BefungeInterpreter(test_code)
test.assert_equals(str(test_commands), "Hello!")
del test_commands
test.describe("Sample Programs")
test_code = '"!olleH",,,,,,@'
test.assert_equals(interpret(test_code), "Hello!")
test_code = '>987v>.v\n' \
'v456< :\n' \
'>321 ^ _@'
test.assert_equals(interpret(test_code), '123456789')
test_code = '> v\n' \
'v ,,,,,"Hello"<\n' \
'>48*, v\n' \
'v,,,,,,"World!"<\n' \
'>25*,@'
test.assert_equals(interpret(test_code), 'Hello World!\n', "Hello World!")
test_code = '>25*"!dlrow ,olleH":v \n' \
' v:,_@\n' \
' > ^ '
test.assert_equals(interpret(test_code), 'Hello, world!\n', "Hello, world!")
test_code = '0"!dlroW ,olleH">:#,_@'
test.assert_equals(interpret(test_code), 'Hello, World!')
test_code = '01->1# +# :# 0# g# ,# :# 5# 8# *# 4# +# -# _@'
test.assert_equals(interpret(test_code), test_code)
test_code = '08>:1-:v v *_$.@\n' \
' ^ _$>\:^'
test.assert_equals(interpret(test_code), '40320')
|
|
"""
Module for application's configuration management.
The class is usually a base class for particular command line
interface.
The Config class handles configuration based on a configuration file
while defined command line options override values from the config file.
Instance of Config is a store of application's configuration values.
"""
import os
import ast
import logging
from ConfigParser import RawConfigParser
from optparse import OptionParser, TitledHelpFormatter, OptionGroup
from future import standard_library
standard_library.install_aliases()
class ConfigurationException(Exception):
"""
Erroneous config file or illegal CLI options detected.
"""
pass
class Config(object):
"""
Class holding various options and settings which are either predefined
in the configuration file, overriding from command line options is
considered.
Subclass has to define self._options (dictionary)
Subclass accesses self._parser to define command line interface
"""
def __init__(self, args, configFileLocations=None, usage=None, mandInt=None, mandStr=None):
"""
configFileLocations are full paths to the configuration files
to use - first location which founds the file is taken, if
the config file is not specified as a command line argument
(config option).
"""
if not configFileLocations:
configFileLocations = []
if not mandInt:
mandInt = []
if not mandStr:
mandStr = []
form = TitledHelpFormatter(width=78)
self.parser = OptionParser(usage=usage,
formatter=form,
add_help_option=None)
self.options = {}
self.expertOptions = OptionGroup(self.parser, "Expert Options", "Caution: use these options at your own risk.")
self.mandatoryInt = mandInt
self.mandatoryStr = mandStr
# implemented in the subclass - particular command line interface
self.processCommandLineOptions(args)
# self._options is now available - modify / add values according
# to the values found in the config file
self.processConfigFile(configFileLocations)
def parseConfigFile(self, fileName):
""" Parse configuration file, otherwise raise exception"""
if not os.path.exists(fileName):
raise ConfigurationException("Config file %s does not exist." %
fileName)
# Raw - doesn't do any interpolation
config = RawConfigParser()
# by default it seems that value names are converted to lower case,
# this way they should be case-sensitive
config.optionxform = str
config.read(fileName) # does not fail even on non existing file
self.options['configcontrols'] = {}
try:
for sectionName in config.sections():
for (name, value) in config.items(sectionName):
# setting only values which do not already exist, if a value
# already exists - it means it was specified on the command
# line and such value takes precedence over configuration file
# beware - attribute may exist from command line parser
# and be None - then set proper value here
if sectionName == 'configcontrols':
newVal = {}
try:
newVal = ast.literal_eval(value)
except Exception:
msg = "Error while parsing %s, reason %s" % (fileName, ex)
raise ConfigurationException(msg)
self.options[sectionName][name] = newVal
continue
if self.get(name) is None:
self.options[name] = value
# to some strings types processing ...
# Maybe it is a json?
if isinstance(self.get(name), bytes):
# convert 'True', 'False' strings to bool values
# True, False
if value.lower() == 'true':
self.options[name] = True
if value.lower() == 'false':
self.options[name] = False
# if the configuration value is string and has been defined
# (in config file or CLI) with surrounding " or ', remove that
# have to check type because among self._mandatoryStr may be
# boolean types ...
rVal = self.get(name)
if isinstance(rVal, bytes):
if rVal[0] in ("'", '"'):
rVal = rVal[1:]
if rVal[-1] in ("'", '"'):
rVal = rVal[:-1]
self.options[name] = rVal
except Exception as ex:
msg = "Error while parsing %s, reason %s" % (fileName, ex)
raise ConfigurationException(msg)
# safe location of the file from which the configuration was loaded
# apart from this newly defined config value, there will also be
# 'config' which remains None, unless specific config file
# specified on CLI
self.options["currentConfigFile"] = fileName
def processConfigFile(self, locations):
"""
Name of the configuration file may be specified
as a command line option, otherwise default file is taken.
At this moment, there either is some self._options[config]
containing the path to the configuration file or default locations
will be used
"""
if self.options.get("config", None):
self.parseConfigFile(self.options["config"])
else:
fname = ""
for name in locations:
# first existing file is taken
if os.path.exists(name):
fname = name
break
else:
msg = ("No configuration provided / found, tried: %s" %
locations)
raise ConfigurationException(msg)
self.parseConfigFile(fname)
def processCommandLineOptions(self, args):
""" processCommandLineOptions() which is subclassed """
del args
msg = ("processCommandLineOptions() not implemented, Config must be "
"subclassed.")
raise NotImplementedError(msg)
def get(self, what):
""" Custom get from the options """
val = self.options.get(what, None)
# if not defined - return None
return val
def sanitize(self):
"""
Checks that all mandatory configuration values are present and
have sensible values.
"""
# convert integer values to integers
for opt in self.mandatoryInt:
try:
val = self.get(opt)
i = int(val)
self.options[opt] = i
except (ValueError, TypeError):
msg = ("Illegal option '%s', expecting integer, got '%s'" %
(opt, val))
raise ConfigurationException(msg)
# checks only presence
for opt in self.mandatoryInt + self.mandatoryStr:
if self.get(opt) is None: # have to test specifically None
raise ConfigurationException("Mandatory option '%s' not "
"set." % opt)
# debug value is in fact string, need to convert it into proper
# logging level value (and test its validity)
name = self.get("debug")
try:
level = getattr(logging, name)
self.options["debug"] = level
except AttributeError:
raise ConfigurationException("Wrong value of debug output "
"level ('%s')." % name)
|
|
import re
import pytest
# For 'testdir' fixture, mostly
pytest_plugins = "pytester"
class Test_pytest_collect_file(object):
def test_only_loads_dot_py_files(self, testdir):
testdir.makepyfile(
somefile="""
def hello_how_are_you():
pass
"""
)
testdir.makefile(".txt", someotherfile="whatever")
stdout = testdir.runpytest().stdout.str()
# TODO: find it hard to believe pytest lacks strong "x in y" string
# testing, but I cannot find any outside of fnmatch_lines (which is
# specific to this testdir stuff, and also lacks an opposite...)
assert "somefile.py" in stdout
# This wouldn't actually even happen; we'd get an ImportError instead
# as pytest tries importing 'someotherfile'. But eh.
assert "whatever.txt" not in stdout
def test_skips_underscored_files(self, testdir):
testdir.makepyfile(
hastests="""
from _util import helper
def hello_how_are_you():
helper()
"""
)
testdir.makepyfile(
_util="""
def helper():
pass
"""
)
# TODO: why Result.str() and not str(Result)? Seems unPythonic
stdout = testdir.runpytest().stdout.str()
assert "hastests.py" in stdout
assert "_util.py" not in stdout
def test_skips_underscored_directories(self, testdir):
testdir.makepyfile(
hello="""
def hi_im_a_test_function():
pass
"""
)
# NOTE: this appears to work due to impl details of pytester._makefile;
# namely that the kwarg keys are handed directly to tmpdir.join(),
# where tmpdir is a py.path.LocalPath.
testdir.makepyfile(
**{
"_nope/yallo": """
def hi_im_not_a_test_function():
pass
"""
}
)
stdout = testdir.runpytest("-v").stdout.str()
assert "hi im a test function" in stdout
assert "hi im not a test function" not in stdout
def test_does_not_consume_conftest_files(self, testdir):
testdir.makepyfile(
actual_tests="""
def hello_how_are_you():
pass
"""
)
testdir.makepyfile(
conftest="""
def this_does_nothing_useful():
pass
"""
)
stdout = testdir.runpytest().stdout.str()
assert "actual_tests.py" in stdout
assert "conftest.py" not in stdout
class TestRelaxedMixin:
def test_selects_all_non_underscored_members(self, testdir):
testdir.makepyfile(
foo="""
def hello_how_are_you():
pass
def _help_me_understand():
pass
class YupThisIsTests:
def please_test_me_thx(self):
pass
def _helper_method_hi(self):
pass
class NestedTestClassAhoy:
def hello_I_am_a_test_method(self):
pass
def _but_I_am_not(self):
pass
class _NotSureWhyYouWouldDoThisButWhatever:
def this_should_not_appear(self):
pass
class _ForSomeReasonIAmDefinedHereButAmNotATest:
def usually_you_would_just_import_this_but_okay(self):
pass
"""
)
stdout = testdir.runpytest("-v").stdout.str()
for substring in (
"hello how are you",
"please test me thx",
"hello I am a test method",
):
assert substring in stdout
for substring in (
"help me understand",
"helper method hi",
"NotSureWhyYouWouldDoThisButWhatever",
"ForSomeReasonIAmDefinedHereButAmNotATest",
):
assert substring not in stdout
def test_skips_setup_and_teardown(self, testdir):
# TODO: probably other special names we're still missing?
testdir.makepyfile(
foo="""
def setup():
pass
def teardown():
pass
def actual_test_here():
pass
class Outer:
def setup(self):
pass
def teardown(self):
pass
def actual_nested_test_here(self):
pass
"""
)
stdout = testdir.runpytest("-v").stdout.str()
# These skipped. Gotta regex them because the test name includes the
# words 'setup' and 'teardown', heh.
assert not re.match(r"^setup$", stdout)
assert not re.match(r"^teardown$", stdout)
# Real tests not skipped
assert "actual test here" in stdout
assert "actual nested test here" in stdout
def test_setup_given_inner_class_instances_when_inherited(self, testdir):
# NOTE: without this functionality in place, we still see setup()
# called on a per-test-method basis, but where 'self' is the outer
# class, not the inner class! so anything actually touching 'self'
# breaks.
# TODO: should this pattern change to be something like a pytest
# per-class autouse fixture method?
# (https://docs.pytest.org/en/latest/fixture.html#autouse-fixtures-xunit-setup-on-steroids)
testdir.makepyfile(
foo="""
class Outer:
def setup(self):
self.some_attr = 17
class inner:
def actual_nested_test(self):
assert self.some_attr == 17
"""
)
assert testdir.runpytest().ret == 0
class TestSpecModule:
def test_skips_non_callable_items(self, testdir):
testdir.makepyfile(
foo="""
some_uncallable = 17
def some_callable():
pass
"""
)
stdout = testdir.runpytest("-v").stdout.str()
assert "some_uncallable" not in stdout
def test_skips_imported_objects(self, testdir):
testdir.makepyfile(
_util="""
def helper():
pass
class Helper:
pass
class NewHelper(object):
pass
"""
)
testdir.makepyfile(
foo="""
from _util import helper, Helper, NewHelper
def a_test_is_me():
pass
"""
)
stdout = testdir.runpytest("-v").stdout.str()
assert "a test is me" in stdout
assert "helper" not in stdout
assert "Helper" not in stdout
assert "NewHelper" not in stdout
def test_does_not_warn_about_imported_names(self, testdir):
# Trigger is something that appears callable but isn't a real function;
# almost any callable class seems to suffice. (Real world triggers are
# things like invoke/fabric Task objects.)
# Can also be triggered if our collection is buggy and does not
# explicitly reject imported classes (i.e. if we only reject funcs).
testdir.makepyfile(
_util="""
class Callable(object):
def __call__(self):
pass
helper = Callable()
class HelperClass:
def __init__(self):
pass
"""
)
testdir.makepyfile(
foo="""
from _util import helper, HelperClass
def a_test():
pass
"""
)
stdout = testdir.runpytest("-sv").stdout.str()
# TODO: more flexible test in case text changes? eh.
for warning in (
"cannot collect 'helper' because it is not a function",
"cannot collect test class 'HelperClass'",
):
assert warning not in stdout
def test_replaces_class_tests_with_custom_recursing_classes(self, testdir):
testdir.makepyfile(
foo="""
class Outer:
class Middle:
class Inner:
def oh_look_an_actual_test_method(self):
pass
"""
)
stdout = testdir.runpytest("-v").stdout.str()
expected = (
"""
Outer
Middle
Inner
oh look an actual test method
""".lstrip()
)
assert expected in stdout
def test_does_not_collect_test_prefixed_files(self, testdir):
# Incidentally also tests display stripping; the display test suite has
# explicit tests for that too tho.
testdir.makepyfile(
test_something="""
import unittest
class TestMyStuff(unittest.TestCase):
def test_things(self):
pass
"""
)
stdout = testdir.runpytest("-v").stdout.str()
expected = (
"""
MyStuff
things
""".lstrip()
)
assert expected in stdout
# Make sure no warnings were emitted; much of the time, our collection
# bits will cause nasty warnings if they end up consuming unittest
# stuff or otherwise doubling up on already-collected objects.
assert "warnings summary" not in stdout
@pytest.mark.skip
def test_correctly_handles_marked_test_cases(self, testdir):
# I.e. @pytest.mark.someflag objects at the class level...figure out
# how real collectors handle these exactly? the "actual" test class we
# normally care about is inside of it.
pass
class TestSpecInstance:
def test_methods_self_objects_exhibit_class_attributes(self, testdir):
# Mostly a sanity test; pytest seems to get out of the way enough that
# the test is truly a bound method & the 'self' is truly an instance of
# the class.
testdir.makepyfile(
foo="""
class MyClass:
an_attr = 5
def some_test(self):
assert hasattr(self, 'an_attr')
assert self.an_attr == 5
"""
)
# TODO: first thought was "why is this not automatic?", then realized
# "duh, it'd be annoying if you wanted to test failure related behavior
# a lot"...but still want some slightly nicer helper I think
assert testdir.runpytest().ret == 0
def test_nested_self_objects_exhibit_parent_attributes(self, testdir):
# TODO: really starting to think going back to 'real' fixture files
# makes more sense; this is all real python code and is eval'd as such,
# but it is only editable and viewable as a string. No highlighting.
testdir.makepyfile(
foo="""
class MyClass:
an_attr = 5
class Inner:
def inner_test(self):
assert hasattr(self, 'an_attr')
assert self.an_attr == 5
"""
)
assert testdir.runpytest().ret == 0
def test_nesting_is_infinite(self, testdir):
testdir.makepyfile(
foo="""
class MyClass:
an_attr = 5
class Inner:
class Deeper:
class EvenDeeper:
def innermost_test(self):
assert hasattr(self, 'an_attr')
assert self.an_attr == 5
"""
)
assert testdir.runpytest().ret == 0
def test_overriding_works_naturally(self, testdir):
testdir.makepyfile(
foo="""
class MyClass:
an_attr = 5
class Inner:
an_attr = 7
def inner_test(self):
assert self.an_attr == 7
"""
)
assert testdir.runpytest().ret == 0
def test_normal_methods_from_outer_classes_are_not_copied(self, testdir):
testdir.makepyfile(
foo="""
class MyClass:
def outer_test(self):
pass
class Inner:
def inner_test(self):
assert not hasattr(self, 'outer_test')
"""
)
assert testdir.runpytest().ret == 0
def test_private_methods_from_outer_classes_are_copied(self, testdir):
testdir.makepyfile(
foo="""
class MyClass:
def outer_test(self):
pass
def _outer_helper(self):
pass
class Inner:
def inner_test(self):
assert not hasattr(self, 'outer_test')
assert hasattr(self, '_outer_helper')
"""
)
assert testdir.runpytest().ret == 0
def test_module_contents_are_not_copied_into_top_level_classes(
self, testdir
):
testdir.makepyfile(
foo="""
module_constant = 17
class MyClass:
def outer_test(self):
assert not hasattr(self, 'module_constant')
"""
)
assert testdir.runpytest().ret == 0
|
|
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This file contains the DataConnector class, defined below."""
import os
import yaml
from threading import RLock
from ext.aboard.model import exceptions as mod_exceptions
from ext.aboard.model.functions import *
class DataConnector:
"""Class representing a data connector, a wrapper to access datas.
The DataConnector is an abstrat class, which SHOULD NOT be
instanciated, but inherited from the usable data connectors.
Each data connector represents a way to access organized datas,
as a SQL driver or alike.
Method to define in the subclass:
__init__(self) -- mainly check (if needed) the driver presence
setup(self) -- setup the data connector
setup_test(self) -- setup the driver with test configurations
close(self) -- close the data connector (close connection if needed)
clear(self) -- clear the stored datas (ERASE ALL)
destroy(self) -- destroy the data connector and clear all datas
record_models(self, models) -- record the given models
record_model(self, model) -- record a specifid model
get_all_objects(self, model) -- return all model's objects
find_object(self, model, pkey_values) -- find an object
add_object(self, object) -- save a new object
update_object(self, object, attribute, old_value) -- update an object
remove_object(self, object) -- delete a stored object
In addition, the created or retrieved objects are stored in cache.
Methods to access or manipulate cached objects:
get_from_cache(self, model, primary_attributes)
cache_object(self, object)
uncache(self, object)
clear_cache(self)
For more informations, see the details of each method.
"""
name = "unspecified"
def __init__(self):
"""Initialize the data connector."""
self.running = False
self.objects_tree = {}
self.models = {}
self.deleted_objects = []
# Locks for threads
self.u_lock = RLock()
def setup(self):
"""Setup the data connector."""
raise NotImplementedError
def setup_test(self):
"""Setup the data connector with test information."""
cfg_dir = "ext/aboard/tests/config/dc"
cfg_path = cfg_dir + "/" + self.name + ".yml"
def_cfg_path = "ext/aboard/dc/" + self.name + "/parameters.yml"
if not os.path.exists(cfg_path):
if not os.path.exists(cfg_dir):
os.makedirs(cfg_dir)
with open(def_cfg_path, "r") as cfg_file:
cfg_content = cfg_file.read()
with open(cfg_path, "w") as cfg_file:
cfg_file.write(cfg_content)
else:
with open(cfg_path, "r") as cfg_file:
cfg_content = cfg_file.read()
cfg_dict = yaml.load(cfg_content)
self.setup(**cfg_dict)
def close(self):
"""Close the data connector (the database connection for instance)."""
raise NotImplementedError
def clear(self):
"""Clear the stored datas and register the models."""
self.objects_tree = {}
self.record_models(list(self.models.values()))
def destroy(self):
"""Destroy and erase EVERY stored data."""
raise NotImplementedError
def record_models(self, models):
"""Record the given models.
The parameter must be a list of classes. Each class must
be a model.
"""
for model in models:
self.record_model(model)
self.running = True
def record_model(self, model):
"""Record the given model, a subclass of model.Model."""
name = get_name(model)
self.models[name] = model
self.objects_tree[name] = {}
return name
def loop(self):
"""Record some datas or commit some changes if necessary."""
pass
def get_all_objects(self, model):
"""Return all the model's object in a list."""
raise NotImplementedError
def find_object(self, model, pkey_values):
"""Return, if found, the selected object.
Raise a model.exceptions.ObjectNotFound if not found.
"""
raise NotImplementedError
def add_object(self, object):
"""Save the object, issued from a model.
Usually this method should:
- Save the object (in a database, for instance)
- Cache the object.
"""
raise NotImplementedError
def update_object(self, object, attribute, old_value):
"""Update an object."""
raise NotImplementedError
def remove_object(self, object):
"""Delete object from cache."""
raise NotImplementedError
def get_from_cache(self, model, attributes):
"""Return, if found, the cached object.
The expected parameters are:
model -- the model (Model subclass)
attributes -- a dictionary {name1: value1, ...}
"""
name = get_name(model)
pkey_names = get_pkey_names(model)
cache = self.objects_tree.get(name, {})
values = tuple(attributes.get(name) for name in pkey_names)
if len(values) == 1:
values = values[0]
return cache.get(values)
def cache_object(self, object):
"""Save the object in cache."""
pkey = get_pkey_values(object)
if len(pkey) == 1:
pkey = pkey[0]
self.objects_tree[get_name(type(object))][pkey] = object
def uncache_object(self, object):
"""Remove the object from cache."""
name = get_name(type(object))
values = tuple(get_pkey_values(object))
if len(values) == 1:
values = values[0]
cache = self.objects_tree.get(name, {})
if values in cache.keys():
del cache[values]
self.deleted_objects.append((name, values))
def update_cache(self, object, field, old_value):
"""This method is called to update the cache for an object.
If the field is one of the primary keys, then it should be
updated in the cache too.
"""
attr = field.field_name
if old_value is None:
return
if not field.pkey:
return
pkey = get_pkey_values(object)
old_pkey = get_pkey_values(object, {attr: old_value})
if len(pkey) == 1:
pkey = pkey[0]
old_pkey = old_pkey[0]
name = get_name(type(object))
tree = self.objects_tree[name]
if old_pkey in tree:
del tree[old_pkey]
tree[pkey] = object
def clear_cache(self):
"""Clear the cache."""
self.objects_tree = {}
def check_update(self, object):
"""Raise a ValueError if the object was deleted."""
if self.was_deleted(object):
raise mod_exceptions.UpdateDeletedObject(object)
def was_deleted(self, object):
"""Return whether the object was deleted (uncached)."""
name = get_name(type(object))
values = tuple(get_pkey_values(object))
if len(values) == 1:
values = values[0]
return (name, values) in self.deleted_objects
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler base class that all Schedulers should inherit from
"""
import sys
from oslo.config import cfg
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import db
from nova import exception
from nova.image import glance
from nova import notifications
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import timeutils
from nova import servicegroup
LOG = logging.getLogger(__name__)
scheduler_driver_opts = [
cfg.StrOpt('scheduler_host_manager',
default='nova.scheduler.host_manager.HostManager',
help='The scheduler host manager class to use'),
cfg.IntOpt('scheduler_max_attempts',
default=3,
help='Maximum number of attempts to schedule an instance'),
]
CONF = cfg.CONF
CONF.register_opts(scheduler_driver_opts)
def handle_schedule_error(context, ex, instance_uuid, request_spec):
if not isinstance(ex, exception.NoValidHost):
LOG.exception(_("Exception during scheduler.run_instance"))
state = vm_states.ERROR.upper()
LOG.warning(_('Setting instance to %(state)s state.'),
locals(), instance_uuid=instance_uuid)
# update instance state and notify on the transition
(old_ref, new_ref) = db.instance_update_and_get_original(context,
instance_uuid, {'vm_state': vm_states.ERROR,
'task_state': None})
notifications.send_update(context, old_ref, new_ref,
service="scheduler")
compute_utils.add_instance_fault_from_exc(context,
conductor_api.LocalAPI(),
new_ref, ex, sys.exc_info())
properties = request_spec.get('instance_properties', {})
payload = dict(request_spec=request_spec,
instance_properties=properties,
instance_id=instance_uuid,
state=vm_states.ERROR,
method='run_instance',
reason=ex)
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance', notifier.ERROR, payload)
def instance_update_db(context, instance_uuid, extra_values=None):
'''Clear the host and node - set the scheduled_at field of an Instance.
:returns: An Instance with the updated fields set properly.
'''
now = timeutils.utcnow()
values = {'host': None, 'node': None, 'scheduled_at': now}
if extra_values:
values.update(extra_values)
return db.instance_update(context, instance_uuid, values)
def encode_instance(instance, local=True):
"""Encode locally created instance for return via RPC."""
# TODO(comstud): I would love to be able to return the full
# instance information here, but we'll need some modifications
# to the RPC code to handle datetime conversions with the
# json encoding/decoding. We should be able to set a default
# json handler somehow to do it.
#
# For now, I'll just return the instance ID and let the caller
# do a DB lookup :-/
if local:
return dict(id=instance['id'], _is_precooked=False)
else:
inst = dict(instance)
inst['_is_precooked'] = True
return inst
class Scheduler(object):
"""The base class that all Scheduler classes should inherit from."""
def __init__(self):
self.host_manager = importutils.import_object(
CONF.scheduler_host_manager)
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
self.image_service = glance.get_default_image_service()
def update_service_capabilities(self, service_name, host, capabilities):
"""Process a capability update from a service node."""
self.host_manager.update_service_capabilities(service_name,
host, capabilities)
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""
services = db.service_get_all_by_topic(context, topic)
return [service['host']
for service in services
if self.servicegroup_api.service_is_up(service)]
def group_hosts(self, context, group):
"""Return the list of hosts that have VM's from the group."""
# The system_metadata 'group' will be filtered
members = db.instance_get_all_by_filters(context,
{'deleted': False, 'group': group})
return [member['host']
for member in members
if member.get('host') is not None]
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations):
"""Must override schedule_prep_resize method for scheduler to work."""
msg = _("Driver must implement schedule_prep_resize")
raise NotImplementedError(msg)
def schedule_reserve_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties):
"""Must override schedule_reserve_instance method for scheduler
to work."""
msg = _("Driver must implement schedule_reserve_instance")
raise NotImplementedError(msg)
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties):
"""Must override schedule_run_instance method for scheduler to work."""
msg = _("Driver must implement schedule_run_instance")
raise NotImplementedError(msg)
def select_hosts(self, context, request_spec, filter_properties):
"""Must override select_hosts method for scheduler to work."""
msg = _("Driver must implement select_hosts")
raise NotImplementedError(msg)
def schedule_live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
"""Live migration scheduling method.
:param context:
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, block_migration.
:param disk_over_commit: if True, consider real(not virtual)
disk size.
:return:
The host where instance is running currently.
Then scheduler send request that host.
"""
# Check we can do live migration
self._live_migration_src_check(context, instance)
if dest is None:
# Let scheduler select a dest host, retry next best until success
# or no more valid hosts.
ignore_hosts = [instance['host']]
while dest is None:
dest = self._live_migration_dest_check(context, instance, dest,
ignore_hosts)
try:
self._live_migration_common_check(context, instance, dest)
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_destination(context, instance,
dest,
block_migration,
disk_over_commit)
except exception.Invalid:
ignore_hosts.append(dest)
dest = None
continue
else:
# Test the given dest host
self._live_migration_dest_check(context, instance, dest)
self._live_migration_common_check(context, instance, dest)
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_destination(context, instance, dest,
block_migration,
disk_over_commit)
# Perform migration
src = instance['host']
self.compute_rpcapi.live_migration(context, host=src,
instance=instance, dest=dest,
block_migration=block_migration,
migrate_data=migrate_data)
def _live_migration_src_check(self, context, instance_ref):
"""Live migration check routine (for src host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# TODO(johngar) why is this not in the API layer?
# Checking instance is running.
if instance_ref['power_state'] != power_state.RUNNING:
raise exception.InstanceNotRunning(
instance_id=instance_ref['uuid'])
# Checking src host exists and compute node
src = instance_ref['host']
try:
service = db.service_get_by_compute_host(context, src)
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=src)
# Checking src host is alive.
if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest,
ignore_hosts=None):
"""Live migration check routine (for destination host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
:param ignore_hosts: hosts that should be avoided as dest host
"""
# If dest is not specified, have scheduler pick one.
if dest is None:
instance_type = db.instance_type_get(
context, instance_ref['instance_type_id'])
image = self.image_service.show(context, instance_ref['image_ref'])
request_spec = {'instance_properties': instance_ref,
'instance_type': instance_type,
'instance_uuids': [instance_ref['uuid']],
'image': image}
filter_properties = {'ignore_hosts': ignore_hosts}
return self.select_hosts(context, request_spec,
filter_properties)[0]
# Checking whether The host where instance is running
# and dest is not same.
src = instance_ref['host']
if dest == src:
raise exception.UnableToMigrateToSelf(
instance_id=instance_ref['uuid'], host=dest)
# Checking dest exists and compute node.
try:
dservice_ref = db.service_get_by_compute_host(context, dest)
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=dest)
# Checking dest host is alive.
if not self.servicegroup_api.service_is_up(dservice_ref):
raise exception.ComputeServiceUnavailable(host=dest)
# Check memory requirements
self._assert_compute_node_has_enough_memory(context,
instance_ref, dest)
return dest
def _live_migration_common_check(self, context, instance_ref, dest):
"""Live migration common check routine.
The following checks are based on
http://wiki.libvirt.org/page/TodoPreMigrationChecks
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
dservice_ref = self._get_compute_info(context, dest)
src = instance_ref['host']
oservice_ref = self._get_compute_info(context, src)
# Checking hypervisor is same.
orig_hypervisor = oservice_ref['hypervisor_type']
dest_hypervisor = dservice_ref['hypervisor_type']
if orig_hypervisor != dest_hypervisor:
raise exception.InvalidHypervisorType()
# Checking hypervisor version.
orig_hypervisor = oservice_ref['hypervisor_version']
dest_hypervisor = dservice_ref['hypervisor_version']
if orig_hypervisor > dest_hypervisor:
raise exception.DestinationHypervisorTooOld()
def _assert_compute_node_has_enough_memory(self, context,
instance_ref, dest):
"""Checks if destination host has enough memory for live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
# Getting total available memory of host
avail = self._get_compute_info(context, dest)['free_ram_mb']
mem_inst = instance_ref['memory_mb']
if not mem_inst or avail <= mem_inst:
instance_uuid = instance_ref['uuid']
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationError(reason=reason % locals())
def _get_compute_info(self, context, host):
"""get compute node's information specified by key
:param context: security context
:param host: hostname(must be compute node)
:param key: column name of compute_nodes
:return: value specified by key
"""
service_ref = db.service_get_by_compute_host(context, host)
return service_ref['compute_node'][0]
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.conf import settings
from horizon import exceptions
from openstack_dashboard.api import base as api_base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import glance
from openstack_dashboard.api import keystone
from openstack_dashboard.test import helpers as test
class APIResource(api_base.APIResourceWrapper):
"""Simple APIResource for testing."""
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerObject=None):
if innerObject is None:
class InnerAPIResource(object):
pass
innerObject = InnerAPIResource()
innerObject.foo = 'foo'
innerObject.bar = 'bar'
return APIResource(innerObject)
class APIDict(api_base.APIDictWrapper):
"""Simple APIDict for testing."""
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerDict=None):
if innerDict is None:
innerDict = {'foo': 'foo',
'bar': 'bar'}
return APIDict(innerDict)
# Wrapper classes that only define _attrs don't need extra testing.
class APIResourceWrapperTests(test.TestCase):
def test_get_attribute(self):
resource = APIResource.get_instance()
self.assertEqual('foo', resource.foo)
def test_get_invalid_attribute(self):
resource = APIResource.get_instance()
self.assertNotIn(
'missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
def test_get_inner_missing_attribute(self):
resource = APIResource.get_instance()
with self.assertRaises(AttributeError):
resource.baz
def test_repr(self):
resource = APIResource.get_instance()
resource_str = resource.__repr__()
self.assertIn('foo', resource_str)
self.assertIn('bar', resource_str)
self.assertNotIn('baz', resource_str)
class APIDictWrapperTests(test.TestCase):
# APIDict allows for both attribute access and dictionary style [element]
# style access. Test both
def test_get_item(self):
resource = APIDict.get_instance()
self.assertEqual('foo', resource.foo)
self.assertEqual('foo', resource['foo'])
def test_get_invalid_item(self):
resource = APIDict.get_instance()
self.assertNotIn(
'missing', resource._attrs,
msg="Test assumption broken. Find new missing attribute")
with self.assertRaises(AttributeError):
resource.missing
with self.assertRaises(KeyError):
resource['missing']
def test_get_inner_missing_attribute(self):
resource = APIDict.get_instance()
with self.assertRaises(AttributeError):
resource.baz
with self.assertRaises(KeyError):
resource['baz']
def test_get_with_default(self):
resource = APIDict.get_instance()
self.assertEqual('foo', resource.get('foo'))
self.assertIsNone(resource.get('baz'))
self.assertEqual('retValue', resource.get('baz', 'retValue'))
def test_get_with_non_str(self):
resource = APIDict.get_instance()
self.assertNotIn(0, resource._attrs,
msg="Test assumption broken. "
"Find new missing attribute.")
self.assertIsNone(resource.get(0))
self.assertEqual('retValue', resource.get(0, 'retValue'))
def test_get_item_non_str(self):
resource = APIDict.get_instance()
self.assertNotIn(0, resource._attrs,
msg="Test assumption broken. "
"Find new missing attribute.")
with self.assertRaises(KeyError):
resource[0]
def test_in_not_there_str(self):
resource = APIDict.get_instance()
self.assertNotIn('missing', resource._attrs,
msg="Test assumption broken. "
"Find new missing attribute.")
# We're primarily interested in this test NOT raising a TypeError.
self.assertFalse('missing' in resource)
def test_in_not_there_non_str(self):
resource = APIDict.get_instance()
self.assertNotIn(0, resource._attrs,
msg="Test assumption broken. "
"Find new missing attribute.")
# We're primarily interested in this test NOT raising a TypeError.
self.assertFalse(0 in resource)
class ApiVersionTests(test.TestCase):
def setUp(self):
super(ApiVersionTests, self).setUp()
self.previous_settings = settings.OPENSTACK_API_VERSIONS
settings.OPENSTACK_API_VERSIONS = {
"data-processing": 1.1,
"identity": "2.0",
"volume": 1
}
# Make sure cached data from other tests doesn't interfere
cinder.VERSIONS.clear_active_cache()
keystone.VERSIONS.clear_active_cache()
glance.VERSIONS.clear_active_cache()
def tearDown(self):
super(ApiVersionTests, self).tearDown()
settings.OPENSTACK_API_VERSIONS = self.previous_settings
# Clear out our bogus data so it doesn't interfere
cinder.VERSIONS.clear_active_cache()
keystone.VERSIONS.clear_active_cache()
glance.VERSIONS.clear_active_cache()
def test_invalid_versions(self):
with self.assertRaises(exceptions.ConfigurationError):
getattr(keystone.VERSIONS, 'active')
with self.assertRaises(exceptions.ConfigurationError):
getattr(cinder.VERSIONS, 'active')
try:
getattr(glance.VERSIONS, 'active')
except exceptions.ConfigurationError:
self.fail("ConfigurationError raised inappropriately.")
class ApiHelperTests(test.TestCase):
"""Tests for functions that don't use one of the api objects."""
def test_url_for(self):
url = api_base.url_for(self.request, 'image')
self.assertEqual('http://public.glance.example.com:9292/v1', url)
url = api_base.url_for(self.request, 'image', endpoint_type='adminURL')
self.assertEqual('http://admin.glance.example.com:9292/v1', url)
url = api_base.url_for(self.request, 'compute')
self.assertEqual('http://public.nova.example.com:8774/v2', url)
url = api_base.url_for(self.request, 'compute',
endpoint_type='adminURL')
self.assertEqual('http://admin.nova.example.com:8774/v2', url)
url = api_base.url_for(self.request, 'volume')
self.assertEqual('http://public.nova.example.com:8776/v1', url)
url = api_base.url_for(self.request, 'volume',
endpoint_type="internalURL")
self.assertEqual('http://int.nova.example.com:8776/v1', url)
url = api_base.url_for(self.request, 'volume',
endpoint_type='adminURL')
self.assertEqual('http://admin.nova.example.com:8776/v1', url)
self.assertNotIn('notAnApi', self.request.user.service_catalog,
'Select a new nonexistent service catalog key')
with self.assertRaises(exceptions.ServiceCatalogException):
url = api_base.url_for(self.request, 'notAnApi')
self.request.user.services_region = "RegionTwo"
url = api_base.url_for(self.request, 'compute')
self.assertEqual('http://public.nova2.example.com:8774/v2', url)
self.request.user.services_region = "RegionTwo"
url = api_base.url_for(self.request, 'compute',
endpoint_type='adminURL')
self.assertEqual('http://admin.nova2.example.com:8774/v2', url)
self.request.user.services_region = "RegionTwo"
with self.assertRaises(exceptions.ServiceCatalogException):
url = api_base.url_for(self.request, 'image')
self.request.user.services_region = "bogus_value"
url = api_base.url_for(self.request, 'identity',
endpoint_type='adminURL')
self.assertEqual('http://admin.keystone.example.com:35357/v2.0', url)
self.request.user.services_region = "bogus_value"
with self.assertRaises(exceptions.ServiceCatalogException):
url = api_base.url_for(self.request, 'image')
class QuotaSetTests(test.TestCase):
def test_quotaset_add_with_plus(self):
quota_dict = {'foo': 1, 'bar': 10}
other_quota_dict = {'my_test': 12}
quota_set = api_base.QuotaSet(quota_dict)
other_quota_set = api_base.QuotaSet(other_quota_dict)
quota_set += other_quota_set
self.assertEqual(3, len(quota_set))
quota_dict.update(other_quota_dict)
for q in quota_set:
self.assertEqual(quota_dict[q.name], q.limit)
def test_quotaset_add_doesnt_override_existing_quota(self):
quota_dict = {'foo': 1, 'bar': 10}
quota_set = api_base.QuotaSet(quota_dict)
other_quota_set = api_base.QuotaSet({'foo': 12})
quota_set += other_quota_set
self.assertEqual(2, len(quota_set))
for q in quota_set:
self.assertEqual(quota_dict[q.name], q.limit)
def test_quotaset_add_method(self):
quota_dict = {'foo': 1, 'bar': 10}
other_quota_dict = {'my_test': 12}
quota_set = api_base.QuotaSet(quota_dict)
other_quota_set = api_base.QuotaSet(other_quota_dict)
quota_set.add(other_quota_set)
self.assertEqual(3, len(quota_set))
quota_dict.update(other_quota_dict)
for q in quota_set:
self.assertEqual(quota_dict[q.name], q.limit)
def test_quotaset_add_with_wrong_type(self):
quota_set = api_base.QuotaSet({'foo': 1, 'bar': 10})
self.assertRaises(ValueError, quota_set.add, {'test': 7})
|
|
from __future__ import absolute_import
import os
os.environ['PYTHONINSPECT'] = '1'
from pdb import pm
import ctypes
import socket
import struct
import subprocess
import random
from pprint import pprint
SO_ATTACH_FILTER = 26
class BpfProgram(ctypes.Structure):
_fields_ = [
('bf_len', ctypes.c_int),
('bf_insns', ctypes.c_void_p)
]
class EthernetHeader(ctypes.BigEndianStructure):
_fields_ = [
('src', ctypes.c_ubyte * 6),
('dst', ctypes.c_ubyte * 6),
('type', ctypes.c_uint16)
]
class IpHeader(ctypes.BigEndianStructure):
_pack_ = 1
_fields_ = [
('version', ctypes.c_ubyte, 4),
('header_length', ctypes.c_ubyte, 4),
('dscp', ctypes.c_ubyte, 6),
('ecn', ctypes.c_ubyte, 2),
('total_length', ctypes.c_uint16),
('ipid', ctypes.c_uint16),
('flags', ctypes.c_uint16, 3),
('frag_offset', ctypes.c_uint16, 13),
('ttl', ctypes.c_uint8),
('protocol', ctypes.c_uint8),
('checksum', ctypes.c_uint16),
('src', ctypes.c_uint32),
('dst', ctypes.c_uint32)
]
class TcpHeader(ctypes.BigEndianStructure):
_pack_ = 1
_fields_ = [
('sport', ctypes.c_uint16),
('dport', ctypes.c_uint16),
('seq', ctypes.c_uint32),
('ack', ctypes.c_uint32),
('data_offset', ctypes.c_uint16, 4),
('reserved', ctypes.c_uint16, 3),
('ns', ctypes.c_uint16, 1),
('cwr', ctypes.c_uint16, 1),
('ece', ctypes.c_uint16, 1),
('urg', ctypes.c_uint16, 1),
('ack', ctypes.c_uint16, 1),
('psh', ctypes.c_uint16, 1),
('rst', ctypes.c_uint16, 1),
('syn', ctypes.c_uint16, 1),
('fin', ctypes.c_uint16, 1),
('window_size', ctypes.c_uint16),
('checksum', ctypes.c_uint16),
('urg_ptr', ctypes.c_uint16),
('options', ctypes.c_ubyte*40)
]
def compile_expr(s):
proc = subprocess.Popen(stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
args=['tcpdump', '-ddd', s])
stdout, stderr = proc.communicate()
lines = stdout.splitlines()
if not lines[0].rstrip().isdigit():
raise RuntimeError('tcpdump: ' + stderr)
length = int(lines[0])
return length, ''.join(struct.pack('=HBBL', *map(int, l.split()))
for l in lines[1:])
def attach_filter(sock, expr):
length, compiled = compile_expr(expr)
cbuf = ctypes.create_string_buffer(compiled)
prog = BpfProgram()
prog.bf_len = length
prog.bf_insns = ctypes.addressof(cbuf)
sock.setsockopt(socket.SOL_SOCKET, SO_ATTACH_FILTER, buffer(prog))
def dump(o):
flds = [x[0] for x in type(o)._fields_]
print ([(n, getattr(o, n)) for n in flds])
def checksum1(o, s=0):
s += o
s = (s & 0xffff) + (s >> 16)
return ~s & 0xffff
def checksum(ba, start, end, s=0):
for i in xrange(start, end, 2):
s += ba[i] + (ba[i+1] << 8)
s = (s & 0xffff) + (s >> 16)
return ~s & 0xffff
def tcp_checksum(ba):
s = checksum(ba, 14 + 12, 14 + 20)
s = checksum1(0, s)
s = checksum1(6, s)
s = checksum
pass
conns = {}
def handshake1(addr):
isn = random.randint(0, 0xffffffff)
tcp = {
'addr': addr,
'func': handshake2,
'risn': itcphdr.seq,
'rseq': itcphdr.seq + 1,
'wisn': isn,
'wseq': isn
}
conns[addr] = tcp
oethhdr.src = iethhdr.dst
oethhdr.dst = iethhdr.src
oethhdr.type = 0x800
oiphdr.version = 4
oiphdr.header_length = 4
oiphdr.total_length = 64
oiphdr.ttl = 255
oiphdr.ipid = iiphdr.ipid
oiphdr.dst = iiphdr.src
oiphdr.src = iiphdr.dst
oiphdr.checksum = 0
oiphdr.checksum = checksum(odata, 14, 34)
otcphdr.sport = itcphdr.dport
otcphdr.dport = itcphdr.sport
otcphdr.seq = tcp['wseq']
otcphdr.seq = tcp['rseq']
otcphdr.data_offset = 5
otcphdr.syn = 1
otcphdr.ack = 1
otcphdr.window_size = 0xffff
tcp_checksum()
print 'out',
dump(oethhdr)
print 'out',
dump(oiphdr)
def handshake2(d):
pass
idata = bytearray(4000)
iethhdr = EthernetHeader.from_buffer(idata, 0)
iiphdr = IpHeader.from_buffer(idata, 14)
itcphdr = TcpHeader.from_buffer(idata, 34)
odata = bytearray(4000)
oethhdr = EthernetHeader.from_buffer(idata)
oiphdr = IpHeader.from_buffer(odata, 14)
otcphdr = TcpHeader.from_buffer(odata, 34)
sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, 0x0800)
attach_filter(sock, 'dst host 178.63.48.10 and port 1800')
sock.bind(('eth0', 0x0800))
while 1:
length = sock.recv_into(idata)
print
dump(iethhdr)
dump(iiphdr)
dump(itcphdr)
addr = (iiphdr.src, itcphdr.sport)
d = conns.get(addr)
if d:
d['func'](d)
else:
handshake1(addr)
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example implementation of code to run on the Cloud ML service.
This file is generic and can be reused by other models without modification.
The only assumption this module has is that there exists model module that
implements create_model() function. The function creates class implementing
problem specific implementations of build_train_graph(), build_eval_graph(),
build_prediction_graph() and format_metric_values().
"""
import argparse
import json
import logging
import os
import shutil
import subprocess
import time
import uuid
from . import model as model_lib
import tensorflow as tf
from tensorflow.python.lib.io import file_io
class Evaluator(object):
"""Loads variables from latest checkpoint and performs model evaluation."""
def __init__(self, args, model, data_paths, dataset='eval'):
self.eval_batch_size = args.eval_batch_size
self.num_eval_batches = args.eval_set_size // self.eval_batch_size
self.batch_of_examples = []
self.checkpoint_path = train_dir(args.output_path)
self.output_path = os.path.join(args.output_path, dataset)
self.eval_data_paths = data_paths
self.batch_size = args.batch_size
self.stream = args.streaming_eval
self.model = model
def evaluate(self, num_eval_batches=None):
"""Run one round of evaluation, return loss and accuracy."""
num_eval_batches = num_eval_batches or self.num_eval_batches
with tf.Graph().as_default() as graph:
self.tensors = self.model.build_eval_graph(self.eval_data_paths,
self.eval_batch_size)
# Remove this if once Tensorflow 0.12 is standard.
try:
self.summary = tf.contrib.deprecated.merge_all_summaries()
except AttributeError:
self.summary = tf.merge_all_summaries()
self.saver = tf.train.Saver()
# Remove this if once Tensorflow 0.12 is standard.
try:
self.summary_writer = tf.summary.FileWriter(self.output_path)
except AttributeError:
self.summary_writer = tf.train.SummaryWriter(self.output_path)
self.sv = tf.train.Supervisor(
graph=graph,
logdir=self.output_path,
summary_op=None,
global_step=None,
saver=self.saver)
last_checkpoint = tf.train.latest_checkpoint(self.checkpoint_path)
with self.sv.managed_session(
master='', start_standard_services=False) as session:
self.sv.saver.restore(session, last_checkpoint)
if self.stream:
self.sv.start_queue_runners(session)
for _ in range(num_eval_batches):
session.run(self.tensors.metric_updates)
else:
if not self.batch_of_examples:
self.sv.start_queue_runners(session)
for i in range(num_eval_batches):
self.batch_of_examples.append(session.run(self.tensors.examples))
for i in range(num_eval_batches):
session.run(self.tensors.metric_updates,
{self.tensors.examples: self.batch_of_examples[i]})
metric_values = session.run(self.tensors.metric_values)
global_step = tf.train.global_step(session, self.tensors.global_step)
summary = session.run(self.summary)
self.summary_writer.add_summary(summary, global_step)
self.summary_writer.flush()
return metric_values
def write_predictions(self):
"""Run one round of predictions and write predictions to csv file."""
num_eval_batches = self.num_eval_batches + 1
with tf.Graph().as_default() as graph:
self.tensors = self.model.build_eval_graph(self.eval_data_paths,
self.batch_size)
self.saver = tf.train.Saver()
self.sv = tf.train.Supervisor(
graph=graph,
logdir=self.output_path,
summary_op=None,
global_step=None,
saver=self.saver)
last_checkpoint = tf.train.latest_checkpoint(self.checkpoint_path)
with self.sv.managed_session(
master='', start_standard_services=False) as session:
self.sv.saver.restore(session, last_checkpoint)
with open(os.path.join(self.output_path, 'predictions.csv'), 'wb') as f:
to_run = [self.tensors.keys] + self.tensors.predictions
self.sv.start_queue_runners(session)
last_log_progress = 0
for i in range(num_eval_batches):
progress = i * 100 // num_eval_batches
if progress > last_log_progress:
logging.info('%3d%% predictions processed', progress)
last_log_progress = progress
res = session.run(to_run)
for element in range(len(res[0])):
f.write('%s' % res[0][element])
for i in range(len(self.tensors.predictions)):
f.write(',')
f.write(self.model.format_prediction_values(res[i + 1][element]))
f.write('\n')
class Trainer(object):
"""Performs model training and optionally evaluation."""
def __init__(self, args, model, cluster, task):
self.args = args
self.model = model
self.cluster = cluster
self.task = task
self.evaluator = Evaluator(self.args, self.model, self.args.eval_data_paths,
'eval_set')
self.train_evaluator = Evaluator(self.args, self.model,
self.args.train_data_paths, 'train_set')
self.min_train_eval_rate = args.min_train_eval_rate
def run_training(self):
"""Runs a Master."""
ensure_output_path(self.args.output_path)
self.train_path = train_dir(self.args.output_path)
self.model_path = model_dir(self.args.output_path)
self.is_master = self.task.type != 'worker'
log_interval = self.args.log_interval_secs
self.eval_interval = self.args.eval_interval_secs
if self.is_master and self.task.index > 0:
raise StandardError('Only one replica of master expected')
if self.cluster:
logging.info('Starting %s/%d', self.task.type, self.task.index)
server = start_server(self.cluster, self.task)
target = server.target
device_fn = tf.train.replica_device_setter(
ps_device='/job:ps',
worker_device='/job:%s/task:%d' % (self.task.type, self.task.index),
cluster=self.cluster)
# We use a device_filter to limit the communication between this job
# and the parameter servers, i.e., there is no need to directly
# communicate with the other workers; attempting to do so can result
# in reliability problems.
device_filters = [
'/job:ps', '/job:%s/task:%d' % (self.task.type, self.task.index)
]
config = tf.ConfigProto(device_filters=device_filters)
else:
target = ''
device_fn = ''
config = None
with tf.Graph().as_default() as graph:
with tf.device(device_fn):
# Build the training graph.
self.tensors = self.model.build_train_graph(self.args.train_data_paths,
self.args.batch_size)
# Add the variable initializer Op.
# Remove this if once Tensorflow 0.12 is standard.
try:
init_op = tf.global_variables_initializer()
except AttributeError:
init_op = tf.initialize_all_variables()
# Create a saver for writing training checkpoints.
self.saver = tf.train.Saver()
# Build the summary operation based on the TF collection of Summaries.
# Remove this if once Tensorflow 0.12 is standard.
try:
self.summary_op = tf.contrib.deprecated.merge_all_summaries()
except AttributeError:
self.summary_op = tf.merge_all_summaries()
# Create a "supervisor", which oversees the training process.
self.sv = tf.train.Supervisor(
graph,
is_chief=self.is_master,
logdir=self.train_path,
init_op=init_op,
saver=self.saver,
# Write summary_ops by hand.
summary_op=None,
global_step=self.tensors.global_step,
# No saving; we do it manually in order to easily evaluate immediately
# afterwards.
save_model_secs=0)
should_retry = True
to_run = [self.tensors.global_step, self.tensors.train]
while should_retry:
try:
should_retry = False
with self.sv.managed_session(target, config=config) as session:
self.start_time = start_time = time.time()
self.last_save = self.last_log = 0
self.global_step = self.last_global_step = 0
self.local_step = self.last_local_step = 0
self.last_global_time = self.last_local_time = start_time
# Loop until the supervisor shuts down or args.max_steps have
# completed.
max_steps = self.args.max_steps
while not self.sv.should_stop() and self.global_step < max_steps:
try:
# Run one step of the model.
self.global_step = session.run(to_run)[0]
self.local_step += 1
self.now = time.time()
is_time_to_eval = (self.now - self.last_save) > self.eval_interval
is_time_to_log = (self.now - self.last_log) > log_interval
should_eval = self.is_master and is_time_to_eval
should_log = is_time_to_log or should_eval
if should_log:
self.log(session)
if should_eval:
self.eval(session)
except tf.errors.AbortedError:
should_retry = True
if self.is_master:
# Take the final checkpoint and compute the final accuracy.
self.eval(session)
# Export the model for inference.
self.model.export(
tf.train.latest_checkpoint(self.train_path), self.model_path)
except tf.errors.AbortedError:
should_retry = True
# Ask for all the services to stop.
self.sv.stop()
def log(self, session):
"""Logs training progress."""
logging.info('Train [%s/%d], step %d (%.3f sec) %.1f '
'global steps/s, %.1f local steps/s', self.task.type,
self.task.index, self.global_step,
(self.now - self.start_time),
(self.global_step - self.last_global_step) /
(self.now - self.last_global_time),
(self.local_step - self.last_local_step) /
(self.now - self.last_local_time))
self.last_log = self.now
self.last_global_step, self.last_global_time = self.global_step, self.now
self.last_local_step, self.last_local_time = self.local_step, self.now
def eval(self, session):
"""Runs evaluation loop."""
eval_start = time.time()
self.saver.save(session, self.sv.save_path, self.tensors.global_step)
logging.info(
'Eval, step %d:\n- on train set %s\n-- on eval set %s',
self.global_step,
self.model.format_metric_values(self.train_evaluator.evaluate()),
self.model.format_metric_values(self.evaluator.evaluate()))
now = time.time()
# Make sure eval doesn't consume too much of total time.
eval_time = now - eval_start
train_eval_rate = self.eval_interval / eval_time
if train_eval_rate < self.min_train_eval_rate and self.last_save > 0:
logging.info('Adjusting eval interval from %.2fs to %.2fs',
self.eval_interval, self.min_train_eval_rate * eval_time)
self.eval_interval = self.min_train_eval_rate * eval_time
self.last_save = now
self.last_log = now
def save_summaries(self, session):
self.sv.summary_computed(session,
session.run(self.summary_op), self.global_step)
self.sv.summary_writer.flush()
def main(_):
model, argv = model_lib.create_model()
run(model, argv)
def run(model, argv):
"""Runs the training loop."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--train_data_paths',
type=str,
action='append',
help='The paths to the training data files. '
'Can be comma separated list of files or glob pattern.')
parser.add_argument(
'--eval_data_paths',
type=str,
action='append',
help='The path to the files used for evaluation. '
'Can be comma separated list of files or glob pattern.')
parser.add_argument(
'--output_path',
type=str,
help='The path to which checkpoints and other outputs '
'should be saved. This can be either a local or GCS '
'path.')
parser.add_argument(
'--max_steps',
type=int,)
parser.add_argument(
'--batch_size',
type=int,
help='Number of examples to be processed per mini-batch.')
parser.add_argument(
'--eval_set_size', type=int, help='Number of examples in the eval set.')
parser.add_argument(
'--eval_batch_size', type=int, help='Number of examples per eval batch.')
parser.add_argument(
'--eval_interval_secs',
type=float,
default=5,
help='Minimal interval between calculating evaluation metrics and saving'
' evaluation summaries.')
parser.add_argument(
'--log_interval_secs',
type=float,
default=5,
help='Minimal interval between logging training metrics and saving '
'training summaries.')
parser.add_argument(
'--write_predictions',
action='store_true',
default=False,
help='If set, model is restored from latest checkpoint '
'and predictions are written to a csv file and no training is performed.')
parser.add_argument(
'--min_train_eval_rate',
type=int,
default=20,
help='Minimal train / eval time ratio on master. '
'Default value 20 means that 20x more time is used for training than '
'for evaluation. If evaluation takes more time the eval_interval_secs '
'is increased.')
parser.add_argument(
'--write_to_tmp',
action='store_true',
default=False,
help='If set, all checkpoints and summaries are written to '
'local filesystem (/tmp/) and copied to gcs once training is done. '
'This can speed up training but if training job fails all the summaries '
'and checkpoints are lost.')
parser.add_argument(
'--copy_train_data_to_tmp',
action='store_true',
default=False,
help='If set, training data is copied to local filesystem '
'(/tmp/). This can speed up training but requires extra space on the '
'local filesystem.')
parser.add_argument(
'--copy_eval_data_to_tmp',
action='store_true',
default=False,
help='If set, evaluation data is copied to local filesystem '
'(/tmp/). This can speed up training but requires extra space on the '
'local filesystem.')
parser.add_argument(
'--streaming_eval',
action='store_true',
default=False,
help='If set to True the evaluation is performed in streaming mode. '
'During each eval cycle the evaluation data is read and parsed from '
'files. This allows for having very large evaluation set. '
'If set to False (default) evaluation data is read once and cached in '
'memory. This results in faster evaluation cycle but can potentially '
'use more memory (in streaming mode large per-file read-ahead buffer is '
'used - which may exceed eval data size).')
args, _ = parser.parse_known_args(argv)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
# Print the job data as provided by the service.
logging.info('Original job data: %s', env.get('job', {}))
# First find out if there's a task value on the environment variable.
# If there is none or it is empty define a default one.
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task = type('TaskSpec', (object,), task_data)
trial = task_data.get('trial')
if trial is not None:
args.output_path = os.path.join(args.output_path, trial)
if args.write_to_tmp and args.output_path.startswith('gs://'):
output_path = args.output_path
args.output_path = os.path.join('/tmp/', str(uuid.uuid4()))
os.makedirs(args.output_path)
else:
output_path = None
if args.copy_train_data_to_tmp:
args.train_data_paths = copy_data_to_tmp(args.train_data_paths)
if args.copy_eval_data_to_tmp:
args.eval_data_paths = copy_data_to_tmp(args.eval_data_paths)
if not args.eval_batch_size:
# If eval_batch_size not set, use min of batch_size and eval_set_size
args.eval_batch_size = min(args.batch_size, args.eval_set_size)
logging.info("setting eval batch size to %s", args.eval_batch_size)
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
if args.write_predictions:
write_predictions(args, model, cluster, task)
else:
dispatch(args, model, cluster, task)
if output_path and (not cluster or not task or task.type == 'master'):
subprocess.check_call([
'gsutil', '-m', '-q', 'cp', '-r', args.output_path + '/*', output_path
])
shutil.rmtree(args.output_path, ignore_errors=True)
def copy_data_to_tmp(input_files):
"""Copies data to /tmp/ and returns glob matching the files."""
files = []
for e in input_files:
for path in e.split(','):
files.extend(file_io.get_matching_files(path))
for path in files:
if not path.startswith('gs://'):
return input_files
tmp_path = os.path.join('/tmp/', str(uuid.uuid4()))
os.makedirs(tmp_path)
subprocess.check_call(['gsutil', '-m', '-q', 'cp', '-r'] + files + [tmp_path])
return [os.path.join(tmp_path, '*')]
def write_predictions(args, model, cluster, task):
if not cluster or not task or task.type == 'master':
pass # Run locally.
else:
raise ValueError('invalid task_type %s' % (task.type,))
logging.info('Starting to write predictions on %s/%d', task.type, task.index)
evaluator = Evaluator(args, model)
evaluator.write_predictions()
logging.info('Done writing predictions on %s/%d', task.type, task.index)
def dispatch(args, model, cluster, task):
if not cluster or not task or task.type == 'master':
# Run locally.
Trainer(args, model, cluster, task).run_training()
elif task.type == 'ps':
run_parameter_server(cluster, task)
elif task.type == 'worker':
Trainer(args, model, cluster, task).run_training()
else:
raise ValueError('invalid task_type %s' % (task.type,))
def run_parameter_server(cluster, task):
logging.info('Starting parameter server %d', task.index)
server = start_server(cluster, task)
server.join()
def start_server(cluster, task):
if not task.type:
raise ValueError('--task_type must be specified.')
if task.index is None:
raise ValueError('--task_index must be specified.')
# Create and start a server.
return tf.train.Server(
tf.train.ClusterSpec(cluster),
protocol='grpc',
job_name=task.type,
task_index=task.index)
def ensure_output_path(output_path):
if not output_path:
raise ValueError('output_path must be specified')
# GCS doesn't have real directories.
if output_path.startswith('gs://'):
return
ensure_dir(output_path)
def ensure_dir(path):
try:
os.makedirs(path)
except OSError as e:
# If the directory already existed, ignore the error.
if e.args[0] == 17:
pass
else:
raise
def train_dir(output_path):
return os.path.join(output_path, 'train')
def eval_dir(output_path):
return os.path.join(output_path, 'eval')
def model_dir(output_path):
return os.path.join(output_path, 'model')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
tf.app.run()
|
|
"""Test Konnected setup process."""
from unittest.mock import patch
import pytest
from homeassistant.components import konnected
from homeassistant.components.konnected import config_flow
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
@pytest.fixture(name="mock_panel")
async def mock_panel_fixture():
"""Mock a Konnected Panel bridge."""
with patch("konnected.Client", autospec=True) as konn_client:
def mock_constructor(host, port, websession):
"""Fake the panel constructor."""
konn_client.host = host
konn_client.port = port
return konn_client
konn_client.side_effect = mock_constructor
konn_client.ClientError = config_flow.CannotConnect
konn_client.get_status.return_value = {
"hwVersion": "2.3.0",
"swVersion": "2.3.1",
"heap": 10000,
"uptime": 12222,
"ip": "192.168.1.90",
"port": 9123,
"sensors": [],
"actuators": [],
"dht_sensors": [],
"ds18b20_sensors": [],
"mac": "11:22:33:44:55:66",
"settings": {},
}
yield konn_client
async def test_config_schema(hass):
"""Test that config schema is imported properly."""
config = {
konnected.DOMAIN: {
konnected.CONF_API_HOST: "http://1.1.1.1:8888",
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [{konnected.CONF_ID: "aabbccddeeff"}],
}
}
assert konnected.CONFIG_SCHEMA(config) == {
"konnected": {
"access_token": "abcdefgh",
"api_host": "http://1.1.1.1:8888",
"devices": [
{
"default_options": {
"blink": True,
"api_host": "http://1.1.1.1:8888",
"discovery": True,
"io": {
"1": "Disabled",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Disabled",
"3": "Disabled",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
},
"id": "aabbccddeeff",
}
],
}
}
# check with host info
config = {
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [
{konnected.CONF_ID: "aabbccddeeff", "host": "192.168.1.1", "port": 1234}
],
}
}
assert konnected.CONFIG_SCHEMA(config) == {
"konnected": {
"access_token": "abcdefgh",
"devices": [
{
"default_options": {
"blink": True,
"api_host": "",
"discovery": True,
"io": {
"1": "Disabled",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Disabled",
"3": "Disabled",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
},
"id": "aabbccddeeff",
"host": "192.168.1.1",
"port": 1234,
}
],
}
}
# check pin to zone and multiple output
config = {
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [
{
konnected.CONF_ID: "aabbccddeeff",
"binary_sensors": [
{"pin": 2, "type": "door"},
{"zone": 1, "type": "door"},
],
"switches": [
{
"zone": 3,
"name": "Beep Beep",
"momentary": 65,
"pause": 55,
"repeat": 4,
},
{
"zone": 3,
"name": "Warning",
"momentary": 100,
"pause": 100,
"repeat": -1,
},
],
}
],
}
}
assert konnected.CONFIG_SCHEMA(config) == {
"konnected": {
"access_token": "abcdefgh",
"devices": [
{
"default_options": {
"blink": True,
"api_host": "",
"discovery": True,
"io": {
"1": "Binary Sensor",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Binary Sensor",
"3": "Switchable Output",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
"binary_sensors": [
{"inverse": False, "type": "door", "zone": "2"},
{"inverse": False, "type": "door", "zone": "1"},
],
"switches": [
{
"zone": "3",
"activation": "high",
"name": "Beep Beep",
"momentary": 65,
"pause": 55,
"repeat": 4,
},
{
"zone": "3",
"activation": "high",
"name": "Warning",
"momentary": 100,
"pause": 100,
"repeat": -1,
},
],
},
"id": "aabbccddeeff",
}
],
}
}
async def test_setup_with_no_config(hass):
"""Test that we do not discover anything or try to set up a Konnected panel."""
assert await async_setup_component(hass, konnected.DOMAIN, {})
# No flows started
assert len(hass.config_entries.flow.async_progress()) == 0
# Nothing saved from configuration.yaml
assert hass.data[konnected.DOMAIN][konnected.CONF_ACCESS_TOKEN] is None
assert hass.data[konnected.DOMAIN][konnected.CONF_API_HOST] is None
assert konnected.YAML_CONFIGS not in hass.data[konnected.DOMAIN]
async def test_setup_defined_hosts_known_auth(hass, mock_panel):
"""Test we don't initiate a config entry if configured panel is known."""
MockConfigEntry(
domain="konnected",
unique_id="112233445566",
data={"host": "0.0.0.0", "id": "112233445566"},
).add_to_hass(hass)
MockConfigEntry(
domain="konnected",
unique_id="aabbccddeeff",
data={"host": "1.2.3.4", "id": "aabbccddeeff"},
).add_to_hass(hass)
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [
{
config_flow.CONF_ID: "aabbccddeeff",
config_flow.CONF_HOST: "0.0.0.0",
config_flow.CONF_PORT: 1234,
}
],
}
},
)
is True
)
assert hass.data[konnected.DOMAIN][konnected.CONF_ACCESS_TOKEN] == "abcdefgh"
assert konnected.YAML_CONFIGS not in hass.data[konnected.DOMAIN]
# Flow aborted
assert len(hass.config_entries.flow.async_progress()) == 0
async def test_setup_defined_hosts_no_known_auth(hass):
"""Test we initiate config entry if config panel is not known."""
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [{konnected.CONF_ID: "aabbccddeeff"}],
}
},
)
is True
)
# Flow started for discovered bridge
assert len(hass.config_entries.flow.async_progress()) == 1
async def test_setup_multiple(hass):
"""Test we initiate config entry for multiple panels."""
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "arandomstringvalue",
konnected.CONF_API_HOST: "http://192.168.86.32:8123",
konnected.CONF_DEVICES: [
{
konnected.CONF_ID: "aabbccddeeff",
"binary_sensors": [
{"zone": 4, "type": "motion", "name": "Hallway Motion"},
{
"zone": 5,
"type": "window",
"name": "Master Bedroom Window",
},
{
"zone": 6,
"type": "window",
"name": "Downstairs Windows",
},
],
"switches": [{"zone": "out", "name": "siren"}],
},
{
konnected.CONF_ID: "445566778899",
"binary_sensors": [
{"zone": 1, "type": "motion", "name": "Front"},
{"zone": 2, "type": "window", "name": "Back"},
],
"switches": [
{
"zone": "out",
"name": "Buzzer",
"momentary": 65,
"pause": 55,
"repeat": 4,
}
],
},
],
}
},
)
is True
)
# Flow started for discovered bridge
assert len(hass.config_entries.flow.async_progress()) == 2
# Globals saved
assert (
hass.data[konnected.DOMAIN][konnected.CONF_ACCESS_TOKEN] == "arandomstringvalue"
)
assert (
hass.data[konnected.DOMAIN][konnected.CONF_API_HOST]
== "http://192.168.86.32:8123"
)
async def test_config_passed_to_config_entry(hass):
"""Test that configured options for a host are loaded via config entry."""
entry = MockConfigEntry(
domain=konnected.DOMAIN,
data={config_flow.CONF_ID: "aabbccddeeff", config_flow.CONF_HOST: "0.0.0.0"},
)
entry.add_to_hass(hass)
with patch.object(konnected, "AlarmPanel", autospec=True) as mock_int:
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [{konnected.CONF_ID: "aabbccddeeff"}],
}
},
)
is True
)
assert len(mock_int.mock_calls) == 3
p_hass, p_entry = mock_int.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
async def test_unload_entry(hass, mock_panel):
"""Test being able to unload an entry."""
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
entry = MockConfigEntry(
domain=konnected.DOMAIN, data={konnected.CONF_ID: "aabbccddeeff"}
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, konnected.DOMAIN, {}) is True
assert hass.data[konnected.DOMAIN]["devices"].get("aabbccddeeff") is not None
assert await konnected.async_unload_entry(hass, entry)
assert hass.data[konnected.DOMAIN]["devices"] == {}
async def test_api(hass, aiohttp_client, mock_panel):
"""Test callback view."""
await async_setup_component(hass, "http", {"http": {}})
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "abcdefgh",
"api_host": "http://192.168.86.32:8123",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA(
{
"api_host": "http://192.168.86.32:8123",
"io": {
"1": "Binary Sensor",
"2": "Binary Sensor",
"3": "Binary Sensor",
"4": "Digital Sensor",
"5": "Digital Sensor",
"6": "Switchable Output",
"out": "Switchable Output",
},
"binary_sensors": [
{"zone": "1", "type": "door"},
{"zone": "2", "type": "window", "name": "winder", "inverse": True},
{"zone": "3", "type": "door"},
],
"sensors": [
{"zone": "4", "type": "dht"},
{"zone": "5", "type": "ds18b20", "name": "temper"},
],
"switches": [
{
"zone": "out",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"zone": "6"},
],
}
)
entry = MockConfigEntry(
domain="konnected",
title="Konnected Alarm Panel",
data=device_config,
options=device_options,
)
entry.add_to_hass(hass)
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{konnected.DOMAIN: {konnected.CONF_ACCESS_TOKEN: "globaltoken"}},
)
is True
)
client = await aiohttp_client(hass.http.app)
# Test the get endpoint for switch status polling
resp = await client.get("/api/konnected")
assert resp.status == HTTP_NOT_FOUND # no device provided
resp = await client.get("/api/konnected/223344556677")
assert resp.status == HTTP_NOT_FOUND # unknown device provided
resp = await client.get("/api/konnected/device/112233445566")
assert resp.status == HTTP_NOT_FOUND # no zone provided
result = await resp.json()
assert result == {"message": "Switch on zone or pin unknown not configured"}
resp = await client.get("/api/konnected/device/112233445566?zone=8")
assert resp.status == HTTP_NOT_FOUND # invalid zone
result = await resp.json()
assert result == {"message": "Switch on zone or pin 8 not configured"}
resp = await client.get("/api/konnected/device/112233445566?pin=12")
assert resp.status == HTTP_NOT_FOUND # invalid pin
result = await resp.json()
assert result == {"message": "Switch on zone or pin 12 not configured"}
resp = await client.get("/api/konnected/device/112233445566?zone=out")
assert resp.status == 200
result = await resp.json()
assert result == {"state": 1, "zone": "out"}
resp = await client.get("/api/konnected/device/112233445566?pin=8")
assert resp.status == 200
result = await resp.json()
assert result == {"state": 1, "pin": "8"}
# Test the post endpoint for sensor updates
resp = await client.post("/api/konnected/device", json={"zone": "1", "state": 1})
assert resp.status == HTTP_NOT_FOUND
resp = await client.post(
"/api/konnected/device/112233445566", json={"zone": "1", "state": 1}
)
assert resp.status == 401
result = await resp.json()
assert result == {"message": "unauthorized"}
resp = await client.post(
"/api/konnected/device/223344556677",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "1", "state": 1},
)
assert resp.status == 400
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "15", "state": 1},
)
assert resp.status == 400
result = await resp.json()
assert result == {"message": "unregistered sensor/actuator"}
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "1", "state": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer globaltoken"},
json={"zone": "1", "state": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "4", "temp": 22, "humi": 20},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
# Test the put endpoint for sensor updates
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "1", "state": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
async def test_state_updates_zone(hass, aiohttp_client, mock_panel):
"""Test callback view."""
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "abcdefgh",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA(
{
"io": {
"1": "Binary Sensor",
"2": "Binary Sensor",
"3": "Binary Sensor",
"4": "Digital Sensor",
"5": "Digital Sensor",
"6": "Switchable Output",
"out": "Switchable Output",
},
"binary_sensors": [
{"zone": "1", "type": "door"},
{"zone": "2", "type": "window", "name": "winder", "inverse": True},
{"zone": "3", "type": "door"},
],
"sensors": [
{"zone": "4", "type": "dht"},
{"zone": "5", "type": "ds18b20", "name": "temper"},
],
"switches": [
{
"zone": "out",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"zone": "6"},
],
}
)
entry = MockConfigEntry(
domain="konnected",
title="Konnected Alarm Panel",
data=device_config,
options=device_options,
)
entry.add_to_hass(hass)
# Add empty data field to ensure we process it correctly (possible if entry is ignored)
entry = MockConfigEntry(domain="konnected", title="Konnected Alarm Panel", data={})
entry.add_to_hass(hass)
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{konnected.DOMAIN: {konnected.CONF_ACCESS_TOKEN: "1122334455"}},
)
is True
)
client = await aiohttp_client(hass.http.app)
# Test updating a binary sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "1", "state": 0},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.konnected_445566_zone_1").state == "off"
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "1", "state": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.konnected_445566_zone_1").state == "on"
# Test updating sht sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "4", "temp": 22, "humi": 20},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.konnected_445566_sensor_4_humidity").state == "20"
assert (
hass.states.get("sensor.konnected_445566_sensor_4_temperature").state == "22.0"
)
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "4", "temp": 25, "humi": 23},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.konnected_445566_sensor_4_humidity").state == "23"
assert (
hass.states.get("sensor.konnected_445566_sensor_4_temperature").state == "25.0"
)
# Test updating ds sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "5", "temp": 32, "addr": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.temper_temperature").state == "32.0"
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "5", "temp": 42, "addr": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.temper_temperature").state == "42.0"
async def test_state_updates_pin(hass, aiohttp_client, mock_panel):
"""Test callback view."""
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected",
"access_token": "abcdefgh",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA(
{
"io": {
"1": "Binary Sensor",
"2": "Binary Sensor",
"3": "Binary Sensor",
"4": "Digital Sensor",
"5": "Digital Sensor",
"6": "Switchable Output",
"out": "Switchable Output",
},
"binary_sensors": [
{"zone": "1", "type": "door"},
{"zone": "2", "type": "window", "name": "winder", "inverse": True},
{"zone": "3", "type": "door"},
],
"sensors": [
{"zone": "4", "type": "dht"},
{"zone": "5", "type": "ds18b20", "name": "temper"},
],
"switches": [
{
"zone": "out",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"zone": "6"},
],
}
)
entry = MockConfigEntry(
domain="konnected",
title="Konnected Alarm Panel",
data=device_config,
options=device_options,
)
entry.add_to_hass(hass)
# Add empty data field to ensure we process it correctly (possible if entry is ignored)
entry = MockConfigEntry(
domain="konnected",
title="Konnected Alarm Panel",
data={},
)
entry.add_to_hass(hass)
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{konnected.DOMAIN: {konnected.CONF_ACCESS_TOKEN: "1122334455"}},
)
is True
)
client = await aiohttp_client(hass.http.app)
# Test updating a binary sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "1", "state": 0},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.konnected_445566_zone_1").state == "off"
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "1", "state": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.konnected_445566_zone_1").state == "on"
# Test updating sht sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "6", "temp": 22, "humi": 20},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.konnected_445566_sensor_4_humidity").state == "20"
assert (
hass.states.get("sensor.konnected_445566_sensor_4_temperature").state == "22.0"
)
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "6", "temp": 25, "humi": 23},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.konnected_445566_sensor_4_humidity").state == "23"
assert (
hass.states.get("sensor.konnected_445566_sensor_4_temperature").state == "25.0"
)
# Test updating ds sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "7", "temp": 32, "addr": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.temper_temperature").state == "32.0"
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "7", "temp": 42, "addr": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.temper_temperature").state == "42.0"
|
|
# -*- coding: utf-8 -*-
"""
CuttlePool.
:license: BSD 3-clause, see LICENSE for details.
"""
__version__ = '0.10.0-dev'
try:
import threading
except ImportError:
import dummy_threading as threading
import time
import warnings
import weakref
_OVERFLOW = 0
_TIMEOUT = None
class CuttlePool(object):
"""
A resource pool.
:param func factory: A factory that produces the desired resource.
:param int capacity: Max number of resource instances in the pool.
:param int overflow: The number of extra resource instances that can be
made if the pool is exhausted. Defaults to ``0``.
:param int timeout: Time in seconds to wait for a resource. Defaults to
``None``.
:param resource_wrapper: A Resource subclass.
:param \**kwargs: Keyword arguments that are passed to ``factory`` when
a resource instance is created.
:raises ValueError: If capacity <= 0 or overflow < 0 or timeout < 0.
:raises TypeError: If timeout is not int or ``None``.
"""
def __init__(self,
factory,
capacity,
overflow=_OVERFLOW,
timeout=_TIMEOUT,
resource_wrapper=None,
**kwargs):
if capacity <= 0:
raise ValueError('CuttlePool requires a minimum capacity of 1')
if overflow < 0:
raise ValueError('Overflow must be non negative integer')
if timeout is not None:
msg = 'Timeout must be non negative integer'
if type(timeout) != int:
raise TypeError(msg)
if timeout < 0:
raise ValueError(msg)
self._capacity = capacity
self._overflow = overflow
self._timeout = timeout
self._factory = factory
self._resource_wrapper = resource_wrapper or Resource
self._factory_arguments = kwargs
# The reference queue is divided in two sections. One section is a
# queue of resources that are ready for use (the available region).
# The other section is an unordered list of resources that are
# currently in use and NoneType objects (the unavailable region).
self._reference_queue = [None] * self.maxsize
self._resource_start = self._resource_end = 0
# _size is the number of existing resources. _available is the
# number of available resources.
self._size = self._available = 0
# Required for locking the resource pool in multi-threaded
# environments.
self._lock = threading.RLock()
# Notify thread waiting for resource that the queue is not empty when
# a resource is returned to the pool.
self._not_empty = threading.Condition(self._lock)
@property
def capacity(self):
"""
The maximum capacity the pool will hold under normal circumstances.
"""
return self._capacity
@property
def connection_arguments(self):
"""For compatibility with older versions, will be removed in 1.0."""
warnings.warn(('connection_arguments is deprecated in favor of '
'factory_arguments and will be removed in 1.0'),
DeprecationWarning)
return self.factory_arguments
@property
def factory_arguments(self):
"""
Return a copy of the factory arguments used to create a resource.
"""
return self._factory_arguments.copy()
@property
def maxsize(self):
"""
The maximum possible number of resource instances that can exist at any
one time.
"""
return self._capacity + self._overflow
@property
def overflow(self):
"""
The number of additional resource instances the pool will create when
it is at capacity.
"""
return self._overflow
@property
def size(self):
"""
The number of existing resource instances that have been made by the
pool.
:note: This is not the number of resources *in* the pool, but the
number of existing resources. This includes resources in the
pool and resources in use.
.. warning:: This is not threadsafe. ``size`` can change when context
switches to another thread.
"""
with self._lock:
return self._size
@property
def timeout(self):
"""
The duration to wait for a resource to be returned to the pool when the
pool is depleted.
"""
return self._timeout
def _get(self, timeout, resource_wrapper=None):
"""
Get a resource from the pool. If timeout is ``None`` waits
indefinitely.
:param timeout: Time in seconds to wait for a resource.
:type timeout: int
:param resource_wrapper: A Resource subclass.
:return: A tuple containing a ``_ResourceTracker`` and ``Resource``.
:raises PoolEmptyError: When timeout has elapsed and unable to
retrieve resource.
"""
if resource_wrapper is None:
resource_wrapper = self._resource_wrapper
with self._lock:
if timeout is None:
while self.empty():
self._not_empty.wait()
else:
time_end = time.time() + timeout
while self.empty():
time_left = time_end - time.time()
if time_left < 0:
raise PoolEmptyError
self._not_empty.wait(time_left)
rtracker = self._reference_queue[self._resource_start]
self._resource_start = (self._resource_start + 1) % self.maxsize
self._available -= 1
wrapped_resource = rtracker.wrap_resource(self, resource_wrapper)
return rtracker, wrapped_resource
def _get_tracker(self, resource):
"""
Return the resource tracker that is tracking ``resource``.
:param resource: A resource.
:return: A resource tracker.
:rtype: :class:`_ResourceTracker`
"""
with self._lock:
for rt in self._reference_queue:
if rt is not None and resource is rt.resource:
return rt
raise UnknownResourceError('Resource not created by pool')
def _harvest_lost_resources(self):
"""Return lost resources to pool."""
with self._lock:
for i in self._unavailable_range():
rtracker = self._reference_queue[i]
if rtracker is not None and rtracker.available():
self.put_resource(rtracker.resource)
def _make_resource(self, resource_wrapper=None):
"""
Returns a resource instance.
:param resource_wrapper: A Resource subclass.
:return: A tuple containing a ``_ResourceTracker`` and ``Resource``.
"""
if resource_wrapper is None:
resource_wrapper = self._resource_wrapper
with self._lock:
for i in self._unavailable_range():
if self._reference_queue[i] is None:
rtracker = _ResourceTracker(
self._factory(**self._factory_arguments))
self._reference_queue[i] = rtracker
self._size += 1
# tell the resource-tracker to wrap the resource. We return the resource-tracker an the wrapped resource
wrapped_resource = rtracker.wrap_resource(
self, resource_wrapper)
return rtracker, wrapped_resource
raise PoolFullError
def _put(self, rtracker):
"""
Put a resource back in the queue.
:param rtracker: A resource.
:type rtracker: :class:`_ResourceTracker`
:raises PoolFullError: If pool is full.
:raises UnknownResourceError: If resource can't be found.
"""
with self._lock:
if self._available < self.capacity:
for i in self._unavailable_range():
if self._reference_queue[i] is rtracker:
# i retains its value and will be used to swap with
# first "empty" space in queue.
break
else:
raise UnknownResourceError
j = self._resource_end
rq = self._reference_queue
rq[i], rq[j] = rq[j], rq[i]
self._resource_end = (self._resource_end + 1) % self.maxsize
self._available += 1
self._not_empty.notify()
else:
raise PoolFullError
def _remove(self, rtracker):
"""
Remove a resource from the pool.
:param rtracker: A resource.
:type rtracker: :class:`_ResourceTracker`
"""
with self._lock:
i = self._reference_queue.index(rtracker)
self._reference_queue[i] = None
self._size -= 1
def _unavailable_range(self):
"""
Return a generator for the indices of the unavailable region of
``_reference_queue``.
"""
with self._lock:
i = self._resource_end
j = self._resource_start
if j < i or self.empty():
j += self.maxsize
for k in range(i, j):
yield k % self.maxsize
def empty(self):
"""Return ``True`` if pool is empty."""
with self._lock:
return self._available == 0
def get_connection(self, connection_wrapper=None):
"""For compatibility with older versions, will be removed in 1.0."""
warnings.warn(('get_connection() is deprecated in favor of '
'get_resource() and will be removed in 1.0'),
DeprecationWarning)
return self.get_resource(connection_wrapper)
def get_resource(self, resource_wrapper=None):
"""
Returns a ``Resource`` instance.
:param resource_wrapper: A Resource subclass.
:return: A ``Resource`` instance.
:raises PoolEmptyError: If attempt to get resource fails or times
out.
"""
rtracker = None
wrapped_resource = None
if resource_wrapper is None:
resource_wrapper = self._resource_wrapper
if self.empty():
self._harvest_lost_resources()
try:
# Try to get a resource from the pool. Do not wait.
rtracker, wrapped_resource = self._get(0, resource_wrapper)
except PoolEmptyError:
pass
if rtracker is None:
# Could not find resource, try to make one.
try:
rtracker, wrapped_resource = self._make_resource(
resource_wrapper)
except PoolFullError:
pass
if rtracker is None:
# Could not find or make resource, so must wait for a resource
# to be returned to the pool.
try:
rtracker, wrapped_resource = self._get(
self._timeout, resource_wrapper)
except PoolEmptyError:
pass
if rtracker is None:
raise PoolEmptyError
# Ensure resource is active.
if not self.ping(rtracker.resource):
# Lock here to prevent another thread creating a resource in the
# index that will have this resource removed. This ensures there
# will be space for _make_resource() to place a newly created
# resource.
with self._lock:
self._remove(rtracker)
rtracker, wrapped_resource = self._make_resource(
resource_wrapper)
# Ensure all resources leave pool with same attributes.
# normalize_connection() is used since it calls
# normalize_resource(), so if a user implements either one, the
# resource will still be normalized. This will be changed in 1.0 to
# call normalize_resource() when normalize_connection() is
# removed.
self.normalize_connection(rtracker.resource)
return wrapped_resource
def normalize_connection(self, connection):
"""For compatibility with older versions, will be removed in 1.0."""
warnings.warn(('normalize_connection is deprecated in favor of '
'normalize_resource and will be removed in 1.0'),
DeprecationWarning)
return self.normalize_resource(connection)
def normalize_resource(self, resource):
"""
A user implemented function that resets the properties of the
resource instance that was created by `factory`. This prevents
unwanted behavior from a resource retrieved from the pool as it could
have been changed when previously used.
:param obj resource: A resource instance.
"""
warnings.warn('Failing to implement normalize_resource() can '
'result in unwanted behavior.')
def ping(self, resource):
"""
A user implemented function that ensures the ``Resource`` object is
open.
:param obj resource: A ``Resource`` object.
:return: A bool indicating if the resource is open (``True``) or
closed (``False``).
"""
warnings.warn('Failing to implement ping() can result in unwanted '
'behavior.')
return True
def put_connection(self, connection):
"""For compatibility with older versions, will be removed in 1.0."""
warnings.warn(('put_connection is deprecated in favor of '
'put_resource and will be removed in 1.0'),
DeprecationWarning)
return self.put_resource(connection)
def put_resource(self, resource):
"""
Adds a resource back to the pool or discards it if the pool is full.
:param resource: A resource object.
:raises UnknownResourceError: If resource was not made by the
pool.
"""
rtracker = self._get_tracker(resource)
try:
self._put(rtracker)
except PoolFullError:
self._remove(rtracker)
class _ResourceTracker(object):
"""
Track if a resource is in use.
:param resource: A resource instance.
"""
def __init__(self, resource):
self.resource = resource
self._weakref = None
def available(self):
"""Determine if resource available for use."""
return self._weakref is None or self._weakref() is None
def wrap_resource(self, pool, resource_wrapper):
"""
Return a resource wrapped in ``resource_wrapper``.
:param pool: A pool instance.
:type pool: :class:`CuttlePool`
:param resource_wrapper: A wrapper class for the resource.
:type resource_wrapper: :class:`Resource`
:return: A wrapped resource.
:rtype: :class:`Resource`
"""
resource = resource_wrapper(self.resource, pool)
self._weakref = weakref.ref(resource)
return resource
class Resource(object):
"""
A wrapper around a resource instance.
:param resource: A resource instance.
:param pool: A resource pool.
"""
def __init__(self, resource, pool):
object.__setattr__(self, '_resource', resource)
object.__setattr__(self, '_pool', pool)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""
Gets attributes of resource object.
"""
return getattr(self._resource, name)
def __setattr__(self, name, value):
"""Sets attributes of resource object."""
if name not in self.__dict__:
setattr(self._resource, name, value)
else:
object.__setattr__(self, name, value)
def close(self):
"""
Returns the resource to the resource pool.
"""
if self._resource is not None:
self._pool.put_resource(self._resource)
self._resource = None
self._pool = None
class CuttlePoolError(Exception):
"""Base class for exceptions in this module."""
class PoolEmptyError(CuttlePoolError):
"""Exception raised when pool timeouts."""
class PoolFullError(CuttlePoolError):
"""Exception raised when there is no space to add a resource."""
class UnknownResourceError(CuttlePoolError):
"""
Exception raised when a resource is returned to the pool that was not
made by the pool.
"""
class PoolConnection(Resource):
"""For compatibility with older versions, will be removed in 1.0."""
def __init__(self, *args, **kwargs):
warnings.warn(('PoolConnection is deprecated in favor of Resource and '
'will be removed in 1.0'), DeprecationWarning)
super(PoolConnection, self).__init__(*args, **kwargs)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient import client as heat_client
from oslo_config import cfg
from oslo_log import log as logging
from conveyor.common import client as url_client
from conveyor import exception
LOG = logging.getLogger(__name__)
heat_opts = [
cfg.StrOpt('heat_url',
default='https://127.0.0.1:8700/v1',
help='Default heat URL',
deprecated_group='DEFAULT',
deprecated_name='heat_url')
]
CONF = cfg.CONF
CONF.register_opts(heat_opts, 'heat')
# Mapping of V2 Catalog Endpoint_type to V3 Catalog Interfaces
ENDPOINT_TYPE_TO_INTERFACE = {
'publicURL': 'public',
'internalURL': 'internal',
'adminURL': 'admin',
}
def format_parameters(params):
parameters = {}
for count, p in enumerate(params, 1):
parameters['Parameters.member.%d.ParameterKey' % count] = p
parameters['Parameters.member.%d.ParameterValue' % count] = params[p]
return parameters
def get_service_from_catalog(catalog, service_type):
if catalog:
for service in catalog:
if 'type' not in service:
continue
if service['type'] == service_type:
return service
return None
def get_version_from_service(service):
if service and service.get('endpoints'):
endpoint = service['endpoints'][0]
if 'interface' in endpoint:
return 3
else:
return 2.0
return 2.0
def _get_endpoint_region(endpoint):
"""Common function for getting the region from endpoint.
In Keystone V3, region has been deprecated in favor of
region_id.
This method provides a way to get region that works for
both Keystone V2 and V3.
"""
return endpoint.get('region_id') or endpoint.get('region')
def get_url_for_service(service, endpoint_type, region=None):
if 'type' not in service:
return None
identity_version = get_version_from_service(service)
service_endpoints = service.get('endpoints', [])
if region:
available_endpoints = [endpoint for endpoint in service_endpoints
if region == _get_endpoint_region(endpoint)]
else:
available_endpoints = service_endpoints
"""if we are dealing with the identity service and there is no endpoint
in the current region, it is okay to use the first endpoint for any
identity service endpoints and we can assume that it is global
"""
if service['type'] == 'identity' and not available_endpoints:
available_endpoints = [endpoint for endpoint in service_endpoints]
for endpoint in available_endpoints:
try:
if identity_version < 3:
return endpoint.get(endpoint_type)
else:
interface = \
ENDPOINT_TYPE_TO_INTERFACE.get(endpoint_type, '')
if endpoint.get('interface') == interface:
return endpoint.get('url')
except (IndexError, KeyError):
"""it could be that the current endpoint just doesn't match the
type, continue trying the next one
"""
pass
return None
def url_for(context, service_type, endpoint_type=None, region=None):
endpoint_type = endpoint_type or getattr(CONF,
'OPENSTACK_ENDPOINT_TYPE',
'publicURL')
fallback_endpoint_type = getattr(CONF, 'SECONDARY_ENDPOINT_TYPE', None)
region = getattr(CONF, 'os_region_name', None)
catalog = context.service_catalog
service = get_service_from_catalog(catalog, service_type)
if service:
url = get_url_for_service(service,
endpoint_type,
region=region)
if not url and fallback_endpoint_type:
url = get_url_for_service(service,
fallback_endpoint_type,
region=region)
if url:
return url
raise exception.ServiceCatalogException(service_type)
def heatclient(context, password=None):
api_version = "1"
insecure = getattr(CONF, 'OPENSTACK_SSL_NO_VERIFY', True)
cacert = getattr(CONF, 'OPENSTACK_SSL_CACERT', None)
try:
endpoint = url_for(context, 'orchestration')
except Exception as e:
LOG.error("HeatClient get URL from context.service_catalog "
"error: %s" % e)
cs = url_client.Client()
endpoint = cs.get_service_endpoint(context, 'orchestration',
region_name=CONF.os_region_name)
LOG.debug("HeatClient get URL from common function: %s" % endpoint)
if not endpoint:
endpoint = CONF.heat.heat_url + '/' + context.project_id
kwargs = {
'token': context.auth_token,
'insecure': insecure,
'ca_file': cacert,
'username': context.user_id,
'password': password
# 'timeout': args.timeout,
# 'ca_file': args.ca_file,
# 'cert_file': args.cert_file,
# 'key_file': args.key_file,
}
client = heat_client.Client(api_version, endpoint, **kwargs)
client.format_parameters = format_parameters
return client
class API(object):
def get_stack(self, context, stack_id, is_dict=True):
stack = heatclient(context).stacks.get(stack_id)
if is_dict:
try:
stack = stack.to_dict()
except Exception:
stack = self._dict_stack(stack)
return stack
def delete_stack(self, context, stack_id):
return heatclient(context).stacks.delete(stack_id)
def create_stack(self, context, password=None, **kwargs):
return heatclient(context, password).stacks.create(**kwargs)
def preview_stack(self, context, password=None, **kwargs):
return heatclient(context, password).stacks.preview(**kwargs)
def validate_template(self, context, **kwargs):
return heatclient(context).stacks.validate(**kwargs)
def resources_list(self, context, stack_name, **kwargs):
return heatclient(context).resources.list(stack_name, **kwargs)
def get_resource(self, context, stack_id, resource_name):
return heatclient(context).resources.get(stack_id, resource_name)
def get_resource_type(self, context, resource_type):
return heatclient(context).resource_types.get(resource_type)
def resource_type_list(self, context):
return heatclient(context).resource_types.list()
def events_list(self, context, stack_id):
return heatclient(context).events.list(stack_id)
def get_event(self, context, stack_id, resource_name, event_id):
return heatclient(context).events.get(stack_id, resource_name,
event_id)
def get_template(self, context, stack_id):
return heatclient(context).stacks.template(stack_id)
def stack_list(self, context, is_dict=True, **kwargs):
stacks = heatclient(context).stacks.list(**kwargs)
if stacks and is_dict:
stack_dict_list = []
for stack in stacks:
stack_dict = self._dict_stack(stack)
stack_dict_list.append(stack_dict)
return stack_dict_list
return stacks
def _dict_stack(self, stack):
stack_name = getattr(stack, 'stack_name', '')
stack_id = getattr(stack, 'id', '')
description = getattr(stack, 'description', '')
creation_time = getattr(stack, 'creation_time', '')
updated_time = getattr(stack, 'updated_time', '')
stack_status = getattr(stack, 'stack_status', '')
disable_rollback = getattr(stack, 'disable_rollback', '')
parameters = getattr(stack, 'parameters', '')
timeout_mins = getattr(stack, 'timeout_mins', '')
parent = getattr(stack, 'parent', '')
stack_dict = {
'id': stack_id,
'stack_name': stack_name,
'description': description,
'creation_time': creation_time,
'updated_time': updated_time,
'stack_status': stack_status,
'disable_rollback': disable_rollback,
'parameters': parameters,
'timeout_mins': timeout_mins,
'parent': parent
}
return stack_dict
|
|
"""
The :mod:`surprise.prediction_algorithms.algo_base` module defines the base
class :class:`AlgoBase` from which every single prediction algorithm has to
inherit.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .. import similarities as sims
from .predictions import PredictionImpossible
from .predictions import Prediction
from .optimize_baselines import baseline_als
from .optimize_baselines import baseline_sgd
class AlgoBase(object):
"""Abstract class where is defined the basic behavior of a prediction
algorithm.
Keyword Args:
baseline_options(dict, optional): If the algorithm needs to compute a
baseline estimate, the ``baseline_options`` parameter is used to
configure how they are computed. See
:ref:`baseline_estimates_configuration` for usage.
"""
def __init__(self, **kwargs):
self.bsl_options = kwargs.get('bsl_options', {})
self.sim_options = kwargs.get('sim_options', {})
if 'user_based' not in self.sim_options:
self.sim_options['user_based'] = True
def fit(self, trainset):
"""Train an algorithm on a given training set.
This method is called by every derived class as the first basic step
for training an algorithm. It basically just initializes some internal
structures and set the self.trainset attribute.
Args:
trainset(:obj:`Trainset <surprise.Trainset>`) : A training
set, as returned by the :meth:`folds
<surprise.dataset.Dataset.folds>` method.
Returns:
self
"""
self.trainset = trainset
# (re) Initialise baselines
self.bu = self.bi = None
return self
def predict(self, uid, iid, r_ui=None, clip=True, verbose=False):
"""Compute the rating prediction for given user and item.
The ``predict`` method converts raw ids to inner ids and then calls the
``estimate`` method which is defined in every derived class. If the
prediction is impossible (e.g. because the user and/or the item is
unknown), the prediction is set according to
:meth:`default_prediction()
<surprise.prediction_algorithms.algo_base.AlgoBase.default_prediction>`.
Args:
uid: (Raw) id of the user. See :ref:`this note<raw_inner_note>`.
iid: (Raw) id of the item. See :ref:`this note<raw_inner_note>`.
r_ui(float): The true rating :math:`r_{ui}`. Optional, default is
``None``.
clip(bool): Whether to clip the estimation into the rating scale.
For example, if :math:`\\hat{r}_{ui}` is :math:`5.5` while the
rating scale is :math:`[1, 5]`, then :math:`\\hat{r}_{ui}` is
set to :math:`5`. Same goes if :math:`\\hat{r}_{ui} < 1`.
Default is ``True``.
verbose(bool): Whether to print details of the prediction. Default
is False.
Returns:
A :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>` object
containing:
- The (raw) user id ``uid``.
- The (raw) item id ``iid``.
- The true rating ``r_ui`` (:math:`r_{ui}`).
- The estimated rating (:math:`\\hat{r}_{ui}`).
- Some additional details about the prediction that might be useful
for later analysis.
"""
# Convert raw ids to inner ids
try:
iuid = self.trainset.to_inner_uid(uid)
except ValueError:
iuid = 'UKN__' + str(uid)
try:
iiid = self.trainset.to_inner_iid(iid)
except ValueError:
iiid = 'UKN__' + str(iid)
details = {}
try:
est = self.estimate(iuid, iiid)
# If the details dict was also returned
if isinstance(est, tuple):
est, details = est
details['was_impossible'] = False
except PredictionImpossible as e:
est = self.default_prediction()
details['was_impossible'] = True
details['reason'] = str(e)
# clip estimate into [lower_bound, higher_bound]
if clip:
lower_bound, higher_bound = self.trainset.rating_scale
est = min(higher_bound, est)
est = max(lower_bound, est)
pred = Prediction(uid, iid, r_ui, est, details)
if verbose:
print(pred)
return pred
def default_prediction(self):
"""Used when the ``PredictionImpossible`` exception is raised during a
call to :meth:`predict()
<surprise.prediction_algorithms.algo_base.AlgoBase.predict>`. By
default, return the global mean of all ratings (can be overridden in
child classes).
Returns:
(float): The mean of all ratings in the trainset.
"""
return self.trainset.global_mean
def test(self, testset, verbose=False):
"""Test the algorithm on given testset, i.e. estimate all the ratings
in the given testset.
Args:
testset: A test set, as returned by a :ref:`cross-validation
itertor<use_cross_validation_iterators>` or by the
:meth:`build_testset() <surprise.Trainset.build_testset>`
method.
verbose(bool): Whether to print details for each predictions.
Default is False.
Returns:
A list of :class:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>` objects
that contains all the estimated ratings.
"""
# The ratings are translated back to their original scale.
predictions = [self.predict(uid,
iid,
r_ui_trans,
verbose=verbose)
for (uid, iid, r_ui_trans) in testset]
return predictions
def compute_baselines(self):
"""Compute users and items baselines.
The way baselines are computed depends on the ``bsl_options`` parameter
passed at the creation of the algorithm (see
:ref:`baseline_estimates_configuration`).
This method is only relevant for algorithms using :func:`Pearson
baseline similarty<surprise.similarities.pearson_baseline>` or the
:class:`BaselineOnly
<surprise.prediction_algorithms.baseline_only.BaselineOnly>` algorithm.
Returns:
A tuple ``(bu, bi)``, which are users and items baselines."""
# Firt of, if this method has already been called before on the same
# trainset, then just return. Indeed, compute_baselines may be called
# more than one time, for example when a similarity metric (e.g.
# pearson_baseline) uses baseline estimates.
if self.bu is not None:
return self.bu, self.bi
method = dict(als=baseline_als,
sgd=baseline_sgd)
method_name = self.bsl_options.get('method', 'als')
try:
if getattr(self, 'verbose', False):
print('Estimating biases using', method_name + '...')
self.bu, self.bi = method[method_name](self)
return self.bu, self.bi
except KeyError:
raise ValueError('Invalid method ' + method_name +
' for baseline computation.' +
' Available methods are als and sgd.')
def compute_similarities(self):
"""Build the similarity matrix.
The way the similarity matrix is computed depends on the
``sim_options`` parameter passed at the creation of the algorithm (see
:ref:`similarity_measures_configuration`).
This method is only relevant for algorithms using a similarity measure,
such as the :ref:`k-NN algorithms <pred_package_knn_inpired>`.
Returns:
The similarity matrix."""
construction_func = {'cosine': sims.cosine,
'msd': sims.msd,
'pearson': sims.pearson,
'pearson_baseline': sims.pearson_baseline}
if self.sim_options['user_based']:
n_x, yr = self.trainset.n_users, self.trainset.ir
else:
n_x, yr = self.trainset.n_items, self.trainset.ur
min_support = self.sim_options.get('min_support', 1)
args = [n_x, yr, min_support]
name = self.sim_options.get('name', 'msd').lower()
if name == 'pearson_baseline':
shrinkage = self.sim_options.get('shrinkage', 100)
bu, bi = self.compute_baselines()
if self.sim_options['user_based']:
bx, by = bu, bi
else:
bx, by = bi, bu
args += [self.trainset.global_mean, bx, by, shrinkage]
try:
if getattr(self, 'verbose', False):
print('Computing the {0} similarity matrix...'.format(name))
sim = construction_func[name](*args)
if getattr(self, 'verbose', False):
print('Done computing similarity matrix.')
return sim
except KeyError:
raise NameError('Wrong sim name ' + name + '. Allowed values ' +
'are ' + ', '.join(construction_func.keys()) + '.')
def get_neighbors(self, iid, k):
"""Return the ``k`` nearest neighbors of ``iid``, which is the inner id
of a user or an item, depending on the ``user_based`` field of
``sim_options`` (see :ref:`similarity_measures_configuration`).
As the similarities are computed on the basis of a similarity measure,
this method is only relevant for algorithms using a similarity measure,
such as the :ref:`k-NN algorithms <pred_package_knn_inpired>`.
For a usage example, see the :ref:`FAQ <get_k_nearest_neighbors>`.
Args:
iid(int): The (inner) id of the user (or item) for which we want
the nearest neighbors. See :ref:`this note<raw_inner_note>`.
k(int): The number of neighbors to retrieve.
Returns:
The list of the ``k`` (inner) ids of the closest users (or items)
to ``iid``.
"""
if self.sim_options['user_based']:
all_instances = self.trainset.all_users
else:
all_instances = self.trainset.all_items
others = [(x, self.sim[iid, x]) for x in all_instances() if x != iid]
others.sort(key=lambda tple: tple[1], reverse=True)
k_nearest_neighbors = [j for (j, _) in others[:k]]
return k_nearest_neighbors
|
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import pmagpy.pmag as pmag
def main():
"""
NAME
make_magic_plots.py
DESCRIPTION
inspects magic directory for available plots.
SYNTAX
make_magic_plots.py [command line options]
INPUT
magic files
OPTIONS
-h prints help message and quits
-f FILE specifies input file name
-fmt [png,eps,svg,jpg,pdf] specify format, default is png
-DM [2,3] define data model
"""
dirlist=['./']
dir_path=os.getcwd()
names=os.listdir(dir_path)
for n in names:
if 'Location' in n:
dirlist.append(n)
if '-fmt' in sys.argv:
ind=sys.argv.index("-fmt")
fmt=sys.argv[ind+1]
else: fmt='png'
if '-f' in sys.argv:
ind=sys.argv.index("-f")
filelist=[sys.argv[ind+1]]
else:
filelist=os.listdir(dir_path)
new_model=0
if '-DM' in sys.argv:
ind=sys.argv.index("-DM")
data_model=sys.argv[ind+1]
if data_model=='3': new_model=1
if new_model:
samp_file='samples.txt'
azimuth_key='azimuth'
meas_file='measurements.txt'
loc_key='location'
method_key='method_codes'
dec_key='dir_dec'
inc_key='dir_inc'
Mkeys=['magnitude','magn_moment','magn_volume','magn_mass']
results_file='sites.txt'
tilt_key='direction_tilt_correction'
hyst_file='specimens.txt'
aniso_file='specimens.txt'
else:
new_model=0
samp_file='er_samples.txt'
azimuth_key='sample_azimuth'
meas_file='magic_measurements.txt'
loc_key='er_location_name'
method_key='magic_method_codes'
dec_key='measurement_dec'
inc_key='measurement_inc'
Mkeys=['measurement_magnitude','measurement_magn_moment','measurement_magn_volume','measurement_magn_mass']
results_file='pmag_results.txt'
tilt_key='tilt_correction'
hyst_file='rmag_hysteresis'
aniso_file='rmag_anisotropy'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
for loc in dirlist:
print('working on: ',loc)
os.chdir(loc) # change working directories to each location
crd='s'
print(samp_file)
if samp_file in filelist: # find coordinate systems
print('found sample file')
samps,file_type=pmag.magic_read(samp_file) # read in data
Srecs=pmag.get_dictitem(samps,azimuth_key,'','F')# get all none blank sample orientations
if len(Srecs)>0:
crd='g'
if meas_file in filelist: # start with measurement data
print('working on measurements data')
data,file_type=pmag.magic_read(meas_file) # read in data
if loc == './': data=pmag.get_dictitem(data,loc_key,'','T') # get all the blank location names from data file
# looking for zeq_magic possibilities
AFZrecs=pmag.get_dictitem(data,method_key,'LT-AF-Z','has')# get all none blank method codes
TZrecs=pmag.get_dictitem(data,method_key,'LT-T-Z','has')# get all none blank method codes
MZrecs=pmag.get_dictitem(data,method_key,'LT-M-Z','has')# get all none blank method codes
Drecs=pmag.get_dictitem(data,dec_key,'','F') # get all dec measurements
Irecs=pmag.get_dictitem(data,inc_key,'','F') # get all inc measurements
for key in Mkeys:
Mrecs=pmag.get_dictitem(data,key,'','F') # get intensity data
if len(Mrecs)>0:break
if len(AFZrecs)>0 or len(TZrecs)>0 or len(MZrecs)>0 and len(Drecs)>0 and len(Irecs)>0 and len(Mrecs)>0: # potential for stepwise demag curves
if new_model:
CMD = 'zeq_magic3.py -fsp specimens.txt -sav -fmt '+fmt+' -crd '+crd
else:
CMD='zeq_magic.py -fsp pmag_specimens.txt -sav -fmt '+fmt+' -crd '+crd
print(CMD)
os.system(CMD)
# looking for thellier_magic possibilities
if len(pmag.get_dictitem(data,method_key,'LP-PI-TRM','has'))>0:
if new_model:
CMD= 'thellier_magic3.py -fsp specimens.txt -sav -fmt '+fmt
else:
CMD= 'thellier_magic.py -fsp pmag_specimens.txt -sav -fmt '+fmt
print(CMD)
os.system(CMD)
# looking for hysteresis possibilities
if len(pmag.get_dictitem(data,method_key,'LP-HYS','has'))>0: # find hyst experiments
if new_model:
CMD= 'quick_hyst3.py -sav -fmt '+fmt
else:
CMD= 'quick_hyst.py -sav -fmt '+fmt
print(CMD)
os.system(CMD)
if results_file in filelist: # start with measurement data
data,file_type=pmag.magic_read(results_file) # read in data
print('number of datapoints: ',len(data))
if loc == './': data=pmag.get_dictitem(data,loc_key,':','has') # get all the concatenated location names from data file
print('number of datapoints: ',len(data) ,loc)
if new_model:
print('working on site directions')
dec_key='dir_dec'
inc_key='dir_inc'
int_key='int_abs'
else:
print('working on results directions')
dec_key='average_dec'
inc_key='average_inc'
int_key='average_int'
SiteDIs=pmag.get_dictitem(data,dec_key,"",'F') # find decs
SiteDIs=pmag.get_dictitem(SiteDIs,inc_key,"",'F') # find decs and incs
SiteDIs=pmag.get_dictitem(SiteDIs,'data_type','i','has') # only individual results - not poles
print('number of directions: ',len(SiteDIs))
SiteDIs_t=pmag.get_dictitem(SiteDIs,tilt_key,'100','T')# tilt corrected coordinates
print('number of tilt corrected directions: ',len(SiteDIs))
SiteDIs_g=pmag.get_dictitem(SiteDIs,tilt_key,'0','T')# geographic coordinates
SiteDIs_s=pmag.get_dictitem(SiteDIs,'tilt_correction','-1','T')# sample coordinates
SiteDIs_x=pmag.get_dictitem(SiteDIs,'tilt_correction','','T')# no coordinates
if len(SiteDIs_t)>0 or len(SiteDIs_g) >0 or len(SiteDIs_s)>0 or len(SiteDIs_x)>0:
CRD=""
if len(SiteDIs_t)>0:
CRD=' -crd t'
elif len(SiteDIs_g )>0:
CRD=' -crd g'
elif len(SiteDIs_s )>0:
CRD=' -crd s'
if new_model:
CMD= 'eqarea_magic3.py -sav -crd t -fmt '+fmt +CRD
else:
CMD= 'eqarea_magic.py -sav -crd t -fmt '+fmt +CRD
print(CMD)
os.system(CMD)
print('working on VGP map')
VGPs=pmag.get_dictitem(SiteDIs,'vgp_lat',"",'F') # are there any VGPs?
if len(VGPs)>0: # YES!
os.system('vgpmap_magic.py -prj moll -res c -sym ro 5 -sav -fmt png')
print('working on intensities')
if not new_model:
CMD='magic_select.py -f '+results_file+' -key data_type i T -F tmp.txt'
os.system(CMD)
infile=' tmp.txt'
else: infile=results_file
print(int_key)
CMD='magic_select.py -key '+int_key +' 0. has -F tmp1.txt -f '+infile
os.system(CMD)
CMD="grab_magic_key.py -f tmp1.txt -key "+int_key+ " | awk '{print $1*1e6}' >tmp2.txt"
os.system(CMD)
data,file_type=pmag.magic_read('tmp1.txt') # read in data
if new_model:
locations=pmag.get_dictkey(data,loc_key,"")
else:
locations=pmag.get_dictkey(data,loc_key+'s',"")
histfile='LO:_'+locations[0]+'_intensities_histogram:_.'+fmt
os.system("histplot.py -b 1 -xlab 'Intensity (uT)' -sav -f tmp2.txt -F " +histfile)
print("histplot.py -b 1 -xlab 'Intensity (uT)' -sav -f tmp2.txt -F " +histfile)
os.system('rm tmp*.txt')
if hyst_file in filelist: # start with measurement data
print('working on hysteresis')
data,file_type=pmag.magic_read(hyst_file) # read in data
if loc == './': data=pmag.get_dictitem(data,loc_key,'','T') # get all the blank location names from data file
hdata=pmag.get_dictitem(data,'hysteresis_bcr','','F')
hdata=pmag.get_dictitem(hdata,'hysteresis_mr_moment','','F')
hdata=pmag.get_dictitem(hdata,'hysteresis_ms_moment','','F')
hdata=pmag.get_dictitem(hdata,'hysteresis_bc','','F') # there are data for a dayplot
if len(hdata)>0:
print('dayplot_magic.py -sav -fmt '+fmt)
os.system('dayplot_magic.py -sav -fmt '+fmt)
if aniso_file in filelist: # do anisotropy plots if possible
print('working on anisotropy')
data,file_type=pmag.magic_read(aniso_file) # read in data
if loc == './': data=pmag.get_dictitem(data,loc_key,'','T') # get all the blank location names from data file
sdata=pmag.get_dictitem(data,'anisotropy_tilt_correction','-1','T') # get specimen coordinates
gdata=pmag.get_dictitem(data,'anisotropy_tilt_correction','0','T') # get specimen coordinates
tdata=pmag.get_dictitem(data,'anisotropy_tilt_correction','100','T') # get specimen coordinates
CRD=""
if new_model:
CMD= 'aniso_magic3.py -x -B -sav -fmt '+fmt
else:
CMD= 'aniso_magic.py -x -B -sav -fmt '+fmt
if len(sdata)>3:
CMD=CMD+' -crd s'
print(CMD)
os.system(CMD)
if len(gdata)>3:
CMD=CMD+' -crd g'
print(CMD)
os.system(CMD)
if len(tdata)>3:
CMD=CMD+' -crd t'
print(CMD)
os.system(CMD)
if loc!='./':os.chdir('..') # change working directories to each location
if __name__ == "__main__":
main()
|
|
from __future__ import division
import datetime
from django.conf import settings
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
# from django.contrib.contenttypes import generic
from django.db.models.signals import m2m_changed
from openbudgets.apps.accounts.models import Account
from openbudgets.apps.entities.models import Division, Entity
# from openbudgets.apps.sources.models import ReferenceSource, AuxSource
from openbudgets.commons.mixins.models import TimeStampedMixin, UUIDPKMixin, \
PeriodStartMixin, PeriodicMixin, ClassMethodMixin
from openbudgets.apps.sheets.utilities import is_comparable
class TemplateManager(models.Manager):
"""Exposes additional methods for model query operations.
Open Budgets makes extensive use of related_map and related_map_min methods
for efficient bulk select queries.
"""
def related_map_min(self):
return self.select_related().prefetch_related('divisions', 'sheets')
def related_map(self):
return self.select_related().prefetch_related('divisions', 'sheets', 'nodes')
#TODO: Consider better ways to do this.
def latest_of(self, entity):
return self.filter(sheets__entity=entity).latest('period_start')
class Template(UUIDPKMixin, PeriodStartMixin, TimeStampedMixin, ClassMethodMixin):
"""The Template model describes the structure of a Sheet.
In Open Budgets, Sheets are the modeled representation of budget and actual
data for Entities.
Sheets / SheetItems get their structure from Templates / TemplateNodes.
A Template can and usually does apply for more than one Sheet. This is the
basis of the Open Budgets comparative analysis implementation.
"""
class Meta:
ordering = ['name']
verbose_name = _('template')
verbose_name_plural = _('templates')
objects = TemplateManager()
divisions = models.ManyToManyField(
Division,
verbose_name=_('divisions'),
related_name='templates',)
blueprint = models.ForeignKey(
'self',
blank=True,
null=True,
related_name='instances',
verbose_name=_('blueprint'),)
name = models.CharField(
_('Name'),
db_index=True,
max_length=255,
help_text=_('The name of this template.'),)
description = models.TextField(
_('Description'),
db_index=True,
blank=True,
help_text=_('An overview text for this template.'),)
# referencesources = generic.GenericRelation(
# ReferenceSource,)
# auxsources = generic.GenericRelation(
# AuxSource,)
@property
def node_count(self):
return self.nodes.count()
@property
def period(self):
"""Returns the applicable period of this template.
If the Template instance has divisions (self.divisions.all()),
objects are valid until the next object with a period_start in the
future from this one, or, until 'now' if there is no future object.
In the current case of multi-period ranges, returns a tuple of
datetime.year objects.
"""
start, end = None, None
ranges = settings.OPENBUDGETS_PERIOD_RANGES
# TODO: Support ranges other than yearly, including multiple ranges.
if len(ranges) == 1 and 'yearly' in ranges:
start = self.period_start.year
if self.is_blueprint:
objs = self.__class__.objects.filter(
divisions__in=self.divisions.all())
for obj in objs:
if obj.period_start.year > self.period_start.year:
end = obj.period_start.year
else:
end = datetime.datetime.now().year
else:
# We have 'implementation' templates that use a blueprint
# as a model, and implement a variant structure based on it.
# Such templates are used by single entities, and may be
# employed by one or many sheets for that entity.
objs = self.sheets.all()
years = [obj.period_start.year for obj in objs].sort()
if not years:
end = start
else:
end = years[-1]
return start, end
@property
def is_blueprint(self):
"""Returns True if the Template is a blueprint, false otherwise.
Blueprints are Templates that serve as structural models for other
templates.
Blueprints must be assigned to Divisions - they are blueprints for
Sheets of the Entities in their Division(s).
"""
if not self.divisions.all():
return False
return True
@property
def has_sheets(self):
return bool(self.sheets.count())
@models.permalink
def get_absolute_url(self):
return 'template_detail', [self.id]
def __unicode__(self):
return self.name
class AbstractBaseNode(models.Model):
class Meta:
abstract = True
DIRECTIONS = (('REVENUE', _('Revenue')), ('EXPENDITURE', _('Expenditure')),)
PATH_DELIMITER = settings.OPENBUDGETS_IMPORT_INTRA_FIELD_DELIMITER
name = models.CharField(
_('Name'),
db_index=True,
max_length=255,
help_text=_('The name of this template node.'),)
code = models.CharField(
_('Code'),
db_index=True,
max_length=255,
help_text=_('An identifying code for this template node.'),)
comparable = models.BooleanField(
_('Comparable'),
default=is_comparable,
help_text=_('A flag to designate whether this node is suitable for '
'comparison or not.'),)
direction = models.CharField(
_('REVENUE/EXPENDITURE'),
db_index=True,
max_length=15,
choices=DIRECTIONS,
default=DIRECTIONS[0][0],
help_text=_('Template nodes are one of revenue or expenditure.'),)
parent = models.ForeignKey(
'self',
null=True,
blank=True,
related_name='children',)
inverse = models.ManyToManyField(
'self',
symmetrical=True,
null=True,
blank=True,
related_name='inverses',
help_text=_('Inverse relations across revenue and expenditure nodes.'),)
path = models.CharField(
_('Path'),
db_index=True,
max_length=255,
editable=False,
help_text=_('A representation of the path to the root of the template '
'from this template node, using codes.'),)
backwards = models.ManyToManyField(
'self',
null=True,
blank=True,
symmetrical=False,
related_name='forwards',)
@property
def ancestors(self):
ancestors = []
current = self
try:
while current:
parent = current.parent
if parent:
ancestors.append(parent)
current = parent
except TemplateNode.DoesNotExist:
pass
ancestors.reverse()
return ancestors
@property
def depth(self):
branch = self.path.split(',')
return len(branch) - 1
def _get_path_to_root(self):
"""Recursively build a *code* hierarchy from self to top of tree."""
path = [self.code]
if self.parent:
parent_path = self.parent._get_path_to_root()
if parent_path:
path = path + parent_path
return path
def clean(self):
if self.path and self.pk is None:
try:
tmp = self.path.split(self.PATH_DELIMITER)
except ValueError:
raise ValidationError('The delimiter symbol for path appears '
'to be invalid.')
def save(self, *args, **kwargs):
if self.path and self.pk is None:
# The instance creation was passed an explicit path
# Convert it to a list with the delimiter, then, to a
# comma-separated string.
tmp = self.path.split(self.PATH_DELIMITER)
self.path = ','.join(tmp)
else:
# Create the path recursively over parents
self.path = ','.join(self._get_path_to_root())
return super(AbstractBaseNode, self).save(*args, **kwargs)
class TemplateNodeManager(models.Manager):
"""Exposes additional methods for model query operations.
Open Budgets makes extensive use of related_map and related_map_min methods
for efficient bulk select queries.
"""
def related_map_min(self):
return self.select_related('parent')
def related_map(self):
return self.select_related('parent').prefetch_related(
'templates',
'inverse',
'backwards',
'items')
class TemplateNode(UUIDPKMixin, AbstractBaseNode, TimeStampedMixin):
"""The TemplateNode model implements the structure of a Sheet.
In Open Budgets, Sheets are the modeled representation of budget and actual
data for Entities.
Sheets / SheetItems get their structure from Templates / TemplateNodes.
A TemplateNode can and usually does apply for more than one Template.
This is the basis of the Open Budgets comparative analysis implementation.
"""
class Meta:
ordering = ['code', 'name']
verbose_name = _('template node')
verbose_name_plural = _('template nodes')
objects = TemplateNodeManager()
templates = models.ManyToManyField(
Template,
through='TemplateNodeRelation',
related_name='nodes',)
description = models.TextField(
_('description'),
blank=True,
help_text=_('A descriptive text for this template node.'),)
# referencesources = generic.GenericRelation(
# ReferenceSource,)
# auxsources = generic.GenericRelation(
# AuxSource,)
@property
def past(self):
"""Returns a list of past nodes that morph to this one."""
nodes = list(self.backwards.all())
if len(nodes):
for node in nodes:
nodes += node.past
return nodes
@property
def future(self):
"""Returns a list of future nodes that stem from this one."""
nodes = list(self.forwards.all())
if len(nodes):
for node in nodes:
nodes += node.future
return nodes
@property
def with_past(self):
return [self] + self.past
@property
def with_future(self):
return [self] + self.future
def timeline(self, include_future=False):
"""Returns this node's full timeline as a list."""
timeline = self.with_past
if include_future:
timeline += self.future
return timeline
@models.permalink
def get_absolute_url(self):
return 'template_node', [self.id]
def __unicode__(self):
return self.code
def clean(self):
if self.parent and not self.direction == self.parent.direction:
raise ValidationError('A node must have the same direction as its '
'parent.')
if self.parent is self:
raise ValidationError('A node cannot be its own parent.')
def inverse_changed(sender, instance, action, reverse, model, pk_set, **kwargs):
"""Validating m2m relations on TemplateNode."""
if action == 'pre_add':
# validate that inverse never points to self
if instance.pk in pk_set:
raise ValidationError(_('Inverse node can not point to self.'))
# validate that it always points to the opposite `direction`
if model.objects.filter(pk__in=pk_set,
direction=instance.direction).count():
raise ValidationError(_("Inverse node's direction can not be the "
"same as self direction."))
m2m_changed.connect(inverse_changed, sender=TemplateNode.inverse.through)
class TemplateNodeRelationManager(models.Manager):
"""Exposes additional methods for model query operations.
Open Budgets makes extensive use of related_map and related_map_min methods
for efficient bulk select queries.
"""
def related_map(self):
return self.select_related()
# TODO: check where is used, and implement differently.
def has_same_node(self, node, template):
return self.filter(node__code=node.code, node__name=node.name,
node__parent=node.parent, template=template).count()
class TemplateNodeRelation(models.Model):
"""A custom through table for relations between nodes and templates."""
class Meta:
ordering = ['template__name', 'node__name']
verbose_name = _('template/node relation')
verbose_name = _('template/node relations')
unique_together = (('node', 'template'),)
objects = TemplateNodeRelationManager()
template = models.ForeignKey(
Template,)
node = models.ForeignKey(
TemplateNode,)
def __unicode__(self):
return '{template} -> {node}'.format(template=self.template,
node=self.node)
def validate_unique(self, exclude=None):
"""Custom validation for our use case."""
super(TemplateNodeRelation, self).validate_unique(exclude)
if not bool(self.__class__.objects.has_same_node(
self.node, self.template)):
raise ValidationError(_('Node with name: {name}; code: {code}; '
'parent: {parent}; already exists in '
'template: {template}'.format(
name=self.node.name, code=self.node.code,
parent=self.node.parent,
template=self.template)))
class SheetManager(models.Manager):
"""Exposes additional methods for model query operations.
Open Budgets makes extensive use of related_map and related_map_min methods
for efficient bulk select queries.
"""
def related_map_min(self):
return self.select_related('entity')
def related_map(self):
return self.select_related().prefetch_related('items')
# TODO: Check if we can replace this expensive query
def latest_of(self, entity):
return self.filter(entity=entity).latest('period_start')
class Sheet(UUIDPKMixin, PeriodicMixin, TimeStampedMixin, ClassMethodMixin):
"""The Sheet model describes the declared budgetary data of a given period,
for a given entity.
In Open Budgets, Sheets / SheetItems get their structure from
Templates / TemplateNodes.
A Template can and usually does apply for more than one Sheet. This is the
basis of the Open Budgets comparative analysis implementation.
"""
objects = SheetManager()
class Meta:
ordering = ('entity', 'period_start')
get_latest_by = 'period_start'
verbose_name = _('sheet')
verbose_name_plural = _('sheets')
entity = models.ForeignKey(
Entity,
related_name='sheets',
help_text=_('The entity this sheet belongs to.'),)
template = models.ForeignKey(
Template,
related_name='sheets',
help_text=_('The template used to structure this sheet.'),)
budget = models.DecimalField(
_('budget'),
db_index=True,
max_digits=26,
decimal_places=2,
blank=True,
null=True,
help_text=_('The total budget amount for this sheet.'),)
actual = models.DecimalField(
_('actual'),
db_index=True,
max_digits=26,
decimal_places=2,
blank=True,
null=True,
help_text=_('The total actual amount for this sheet.'),)
description = models.TextField(
_('description'),
db_index=True,
blank=True,
help_text=_('An introductory description for this sheet.'),)
# referencesources = generic.GenericRelation(
# ReferenceSource,)
# auxsources = generic.GenericRelation(
# AuxSource,)
@property
def item_count(self):
value = self.items.all().count()
return value
@property
def variance(self):
"""Returns variance between budget and actual as a percentage."""
if not self.actual or not self.budget:
return None
# Note: we imported division from __future__ for py3 style division
value = round(self.actual / self.budget * 100, 2)
return value
@models.permalink
def get_absolute_url(self):
return 'sheet_detail', [self.id]
def __unicode__(self):
return unicode(self.period)
class AbstractBaseItem(models.Model):
class Meta:
abstract = True
budget = models.DecimalField(
_('budget'),
db_index=True,
max_digits=26,
decimal_places=2,
blank=True,
null=True,
help_text=_('The total budget amount for this item.'),)
actual = models.DecimalField(
_('actual'),
db_index=True,
max_digits=26,
decimal_places=2,
blank=True,
null=True,
help_text=_('The total actual amount for this item.'),)
description = models.TextField(
_('description'),
db_index=True,
blank=True,
help_text=_('An introductory description for this sheet item.'),)
@property
def variance(self):
"""Returns variance between budget and actual as a percentage."""
if not self.actual or not self.budget:
return None
# Note: we imported division from __future__ for py3 style division
value = round(self.actual / self.budget * 100, 2)
return value
@property
def period(self):
return self.sheet.period
class SheetItemManager(models.Manager):
"""Exposes additional methods for model query operations.
Open Budgets makes extensive use of related_map and related_map_min methods
for efficient bulk select queries.
"""
def get_queryset(self):
return super(SheetItemManager, self).select_related('node')
def related_map_min(self):
return self.select_related()
def related_map(self):
return self.select_related().prefetch_related('parent__parent', 'children', 'discussion')
# TODO: Check this for a more efficient implementation
def timeline(self, node_pks, entity_pk):
nodes = TemplateNode.objects.filter(id__in=node_pks)
timelines = []
if nodes.count():
for node in nodes:
timelines += node.timeline()
else:
raise TemplateNode.DoesNotExist()
return self.filter(node__in=timelines, sheet__entity=entity_pk).select_related('sheet')
class SheetItem(UUIDPKMixin, AbstractBaseItem, TimeStampedMixin, ClassMethodMixin):
"""The SheetItem model describes items of budgetary data of a given period,
for a given entity.
In Open Budgets, Sheets / SheetItems get their structure from
Templates / TemplateNodes.
A Template can and usually does apply for more than one Sheet. This is the
basis of the Open Budgets comparative analysis implementation.
"""
class Meta:
ordering = ['node']
verbose_name = _('sheet item')
verbose_name_plural = _('sheet items')
unique_together = (('sheet', 'node'),)
objects = SheetItemManager()
sheet = models.ForeignKey(
Sheet,
related_name='items',)
node = models.ForeignKey(
TemplateNode,
related_name='items',)
parent = models.ForeignKey(
'self',
null=True,
blank=True,
editable=False,
related_name='children',)
# referencesources = generic.GenericRelation(
# ReferenceSource,)
# auxsources = generic.GenericRelation(
# AuxSource,)
@property
def lookup(self):
return self.node.pk
@property
def name(self):
return self.node.name
@property
def code(self):
return self.node.code
@property
def comparable(self):
return self.node.comparable
@property
def direction(self):
return self.node.direction
@property
def path(self):
return self.node.path
@property
def depth(self):
return self.node.depth
@property
def has_comments(self):
return len(self.description) or self.discussion.exists()
@property
def comment_count(self):
count = 0
if self.description:
count = 1
count += self.discussion.count()
return count
@property
def ancestors(self):
ancestors = []
current = self
try:
while current:
parent = current.parent
if parent:
ancestors.append(parent)
current = parent
except TemplateNode.DoesNotExist:
pass
ancestors.reverse()
return ancestors
@models.permalink
def get_absolute_url(self):
return 'sheet_item_detail', [self.pk]
def __unicode__(self):
return self.node.code
class SheetItemCommentManager(models.Manager):
"""Exposes additional methods for model query operations.
Open Budgets makes extensive use of related_map and related_map_min methods
for efficient bulk select queries.
"""
def get_queryset(self):
return super(SheetItemCommentManager, self).select_related()
def related_map_min(self):
return self.select_related('user')
def related_map(self):
return self.select_related()
def by_item(self, item_pk):
return self.filter(item=item_pk).related_map_min()
class SheetItemComment(UUIDPKMixin, TimeStampedMixin, ClassMethodMixin):
"""The SheetItemComment model records discussion around particular budget
items.
"""
class Meta:
ordering = ['user', 'last_modified']
verbose_name = _('sheet item comment')
verbose_name_plural = _('sheet item comments')
objects = SheetItemCommentManager()
item = models.ForeignKey(
SheetItem,
related_name='discussion',)
user = models.ForeignKey(
Account,
related_name='item_comments',)
comment = models.TextField(
_('Comment'),
help_text=_('Add your comments to this discussion.'),)
def __unicode__(self):
return self.comment
|
|
from __future__ import print_function
from __future__ import absolute_import
import unittest
import responses
import requests
from .requests_patch import patched_extract_cookies_to_jar
from terminalone import T1, filters
mock_credentials = {
'username': 'user;',
'password': 'password',
'api_key': 'api_key',
}
API_BASE = 'api.mediamath.com'
requests.sessions.extract_cookies_to_jar = patched_extract_cookies_to_jar
requests.adapters.extract_cookies_to_jar = patched_extract_cookies_to_jar
class TestGets(unittest.TestCase):
def setup(self):
"""set up test fixtures"""
with open('tests/fixtures/session.xml') as f:
fixture = f.read()
responses.add(responses.POST, 'https://api.mediamath.com/api/v2.0/login',
body=fixture,
adding_headers={
'Set-Cookie': 'adama_session=1',
},
content_type='application/xml')
self.t1 = T1(auth_method='cookie',
api_base=API_BASE,
**mock_credentials)
@responses.activate
def test_collection(self):
self.setup()
with open('tests/fixtures/advertisers.xml') as f:
fixture = f.read()
responses.add(responses.GET, 'https://api.mediamath.com/api/v2.0/advertisers',
body=fixture,
content_type='application/xml')
advertisers = self.t1.get('advertisers')
number_advertisers = len(list(advertisers))
self.assertEqual(100, number_advertisers)
@responses.activate
def test_counts(self):
self.setup()
with open('tests/fixtures/advertisers.xml') as f:
fixture = f.read()
responses.add(responses.GET, 'https://api.mediamath.com/api/v2.0/advertisers',
body=fixture,
content_type='application/xml')
advertisers, number_advertisers = self.t1.get('advertisers', page_limit=1, count=True)
self.assertEqual(12345, number_advertisers)
advertisers = next(advertisers)
self.assertEqual(advertisers._type, 'advertiser', 'Expected advertiser, got: %r' % advertisers._type)
@responses.activate
def test_get_all(self):
self.setup()
with open('tests/fixtures/organizations.xml') as f:
fixture = f.read()
responses.add(responses.GET, 'https://api.mediamath.com/api/v2.0/organizations',
body=fixture,
content_type='application/xml')
orgs, count = self.t1.get('organizations', count=True, get_all=True)
c = 0
for _ in orgs:
c += 1
self.assertEqual(c, count)
@responses.activate
def test_entity_get_save(self):
self.setup()
with open('tests/fixtures/advertiser.xml') as f:
fixture = f.read()
responses.add(responses.GET, 'https://api.mediamath.com/api/v2.0/advertisers/1',
body=fixture,
content_type='application/xml')
responses.add(responses.POST, 'https://api.mediamath.com/api/v2.0/advertisers/1',
body=fixture,
content_type='application/xml')
adv = self.t1.get('advertisers', 1)
assert adv.id == 1, "Expected ID 1, got: %d" % adv.id
assert all(
hasattr(adv, item) for item in [
'id',
'name',
'status',
'agency_id',
'created_on',
'updated_on',
'ad_server_id',
]
), 'Expected a full record, got: %r' % adv
adv.save()
@responses.activate
def test_full(self):
self.setup()
with open('tests/fixtures/advertisers.xml') as f:
advertisers = f.read()
with open('tests/fixtures/advertiser.xml') as f:
advertiser = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/advertisers?page_limit=1&page_offset=0&sort_by=id',
body=advertisers,
content_type='application/xml',
match_querystring=True)
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/advertisers?full=%2A&page_limit=1&page_offset=0&sort_by=id',
body=advertiser,
content_type='application/xml',
match_querystring=True)
adv = next(self.t1.get('advertisers', page_limit=1))
assert not hasattr(adv, 'status'), 'Expected limited record, got: %r' % adv
adv = next(self.t1.get('advertisers', page_limit=1, full=True))
assert all(
hasattr(adv, item) for item in [
'id',
'name',
'status',
'agency_id',
'created_on',
'updated_on',
'ad_server_id',
]
), 'Expected a full record, got: %r' % adv
@responses.activate
def test_get_creative_approval(self):
self.setup()
with open('tests/fixtures/atomic_creatives_with_creative_approvals.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/atomic_creatives/1000?with=creative_approvals',
body=fixture,
content_type='application/xml',
match_querystring=True)
atomic = self.t1.get('atomic_creatives', 1000, include='creative_approvals')
self.assertEqual(3, len(atomic.creative_approvals))
@responses.activate
def test_limit(self):
self.setup()
with open('tests/fixtures/data_pixel_bundle_full.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/pixel_bundles/limit/advertiser=29'
'?full=pixel_bundle&page_limit=1&page_offset=0&sort_by=id',
body=fixture,
content_type='application/xml',
match_querystring=True)
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/pixel_bundles/limit/agency.organization=100001'
'?full=pixel_bundle&page_limit=1&page_offset=0&sort_by=id',
body=open('tests/fixtures/data_pixel_bundle_full.xml').read(),
content_type='application/xml',
match_querystring=True)
pxl = next(self.t1.get('pixel_bundles', limit={'advertiser': 29},
full='pixel_bundle', page_limit=1))
self.assertEqual(29, pxl.advertiser_id)
pxl = next(self.t1.get('pixel_bundles', limit={'agency.organization': 100001},
full='pixel_bundle', page_limit=1))
self.assertNotEqual(pxl.pixel_type, 'event', 'Expected non-event pixel, got: %r' % pxl.pixel_type)
@responses.activate
def test_include(self):
self.setup()
with open('tests/fixtures/pixel_bundle_with_advertiser.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/pixel_bundles/limit/advertiser=29'
'?with=advertiser&full=%2A&page_limit=1&page_offset=0&sort_by=id',
body=fixture,
content_type='application/xml',
match_querystring=True)
pxl = next(self.t1.get('pixel_bundles', limit={'advertiser': 29},
include='advertiser', full=True, page_limit=1))
assert hasattr(pxl, 'advertiser'), 'Expected advertiser included, got: %r' % pxl
assert hasattr(pxl.advertiser, 'id'), 'Expected advertiser instance, got: %r' % pxl.advertiser
@responses.activate
def test_include_traversals(self):
self.setup()
with open('tests/fixtures/pixel_bundle_with_advertiser_agency.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/pixel_bundles/limit/advertiser=29'
'?with=advertiser%2Cagency&full=%2A&page_limit=1&page_offset=0&sort_by=id',
body=fixture,
content_type='application/xml',
match_querystring=True)
pxl = next(self.t1.get('pixel_bundles', limit={'advertiser': 29},
include=[['advertiser', 'agency'], ], full=True, page_limit=1))
assert hasattr(pxl, 'advertiser'), 'Expected advertiser included, got: %r' % pxl
assert hasattr(pxl.advertiser, 'agency'), 'Expected agency instance, got: %r' % pxl.advertiser
@responses.activate
def test_include_plural(self):
self.setup()
with open('tests/fixtures/campaigns_with_strategies.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/campaigns/limit/advertiser=29'
'?page_limit=1&with=strategies&page_offset=0&sort_by=id',
body=fixture,
content_type='application/xml',
match_querystring=True)
camp = next(self.t1.get('campaigns', limit={'advertiser': 29},
include='strategies', page_limit=1))
assert hasattr(camp, 'strategies'), 'Expected strategies included, got: %r' % camp
assert isinstance(camp.strategies, list), 'Expected list of strategies, got: %r' % camp.strategies
assert hasattr(camp.strategies[0], 'id'), 'Expected strategy instances, got: %r' % camp.strategies[0]
@responses.activate
def test_include_multi(self):
self.setup()
with open('tests/fixtures/atomic_creatives_with_advertiser_concept.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/atomic_creatives/limit/advertiser=29'
'?with=advertiser&with=concept&full=%2A&page_limit=1&page_offset=0&sort_by=-concept_id',
body=fixture,
content_type='application/xml',
match_querystring=True)
ac = next(self.t1.get('atomic_creatives', limit={'advertiser': 29},
include=[['advertiser', ], ['concept', ]],
full=True,
page_limit=1,
sort_by='-concept_id'))
assert hasattr(ac, 'advertiser'), 'Expected advertiser included, got: %r' % ac
@responses.activate
def test_find(self):
self.setup()
with open('tests/fixtures/pixel_bundles.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/pixel_bundles'
'?q=%289991%2C9992%2C9993%29&page_limit=100&page_offset=0&sort_by=id',
body=fixture,
content_type='application/xml',
match_querystring=True)
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/campaigns'
'?q=name%3D%3Atest%2A&page_limit=5&page_offset=0&sort_by=id',
body=fixture,
content_type='application/xml',
match_querystring=True)
pxls = self.t1.find('pixel_bundles', 'id', operator=filters.IN,
candidates=[9991, 9992, 9993])
count = len(list(pxls))
assert count == 3, 'Expected 3 entities, got: %d' % count
camps = self.t1.find('campaigns', 'name', filters.CASE_INS_STRING,
'test*', page_limit=5)
names = [c.name for c in camps]
good = all(n.lower().startswith('pixel bundle') for n in names)
assert good, 'Expected all results to start with "test", got: %r' % names
@responses.activate
def test_permissions(self):
self.setup()
with open('tests/fixtures/permissions.xml') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/api/v2.0/users/10000/permissions',
body=fixture,
content_type='application/xml',
match_querystring=True)
p = self.t1.get('users', 10000, child='permissions')
assert p._type == 'permission', 'Expected permission entity, got: %r' % p
@responses.activate
def test_picard_meta(self):
self.setup()
with open('tests/fixtures/reports_meta.json') as f:
fixture = f.read()
responses.add(responses.GET,
'https://api.mediamath.com/reporting/v1/std/meta',
body=fixture,
content_type='application/json',
match_querystring=True)
r = self.t1.new('report')
md = r.metadata
assert hasattr(md, 'keys'), 'Expected mapping structure, got: %r' % type(md)
assert 'reports' in md, 'Expected overall metadata, got: %r' % md
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import itertools
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
from astropy.utils.data import get_pkg_data_filename
import pytest
from ..core_celestial import _reproject_celestial
from ..core_full import _reproject_full
from ..high_level import reproject_interp
# TODO: add reference comparisons
DATA = os.path.join(os.path.dirname(__file__), '..', '..', 'tests', 'data')
def array_footprint_to_hdulist(array, footprint, header):
hdulist = fits.HDUList()
hdulist.append(fits.PrimaryHDU(array, header))
hdulist.append(fits.ImageHDU(footprint, header, name='footprint'))
return hdulist
@pytest.mark.array_compare()
def test_reproject_celestial_2d_gal2equ():
"""
Test reprojection of a 2D celestial image, which includes a coordinate
system conversion.
"""
hdu_in = fits.open(os.path.join(DATA, 'galactic_2d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['CTYPE1'] = 'RA---TAN'
header_out['CTYPE2'] = 'DEC--TAN'
header_out['CRVAL1'] = 266.39311
header_out['CRVAL2'] = -28.939779
array_out, footprint_out = reproject_interp(hdu_in, header_out)
return array_footprint_to_hdulist(array_out, footprint_out, header_out)
# Note that we can't use independent_celestial_slices=True and reorder the
# axes, hence why we need to prepare the combinations in this way.
AXIS_ORDER = list(itertools.permutations((0, 1, 2)))
COMBINATIONS = [(True, (0, 1, 2))]
for axis_order in AXIS_ORDER:
COMBINATIONS.append((False, axis_order))
@pytest.mark.array_compare(single_reference=True)
@pytest.mark.parametrize(('indep_slices', 'axis_order'), tuple(COMBINATIONS))
def test_reproject_celestial_3d_equ2gal(indep_slices, axis_order):
"""
Test reprojection of a 3D cube with celestial components, which includes a
coordinate system conversion (the original header is in equatorial
coordinates). We test using both the 'fast' method which assumes celestial
slices are independent, and the 'full' method. We also scramble the input
dimensions of the data and header to make sure that the reprojection can
deal with this.
"""
# Read in the input cube
hdu_in = fits.open(os.path.join(DATA, 'equatorial_3d.fits'))[0]
# Define the output header - this should be the same for all versions of
# this test to make sure we can use a single reference file.
header_out = hdu_in.header.copy()
header_out['NAXIS1'] = 10
header_out['NAXIS2'] = 9
header_out['CTYPE1'] = 'GLON-SIN'
header_out['CTYPE2'] = 'GLAT-SIN'
header_out['CRVAL1'] = 163.16724
header_out['CRVAL2'] = -15.777405
header_out['CRPIX1'] = 6
header_out['CRPIX2'] = 5
# We now scramble the input axes
if axis_order != (0, 1, 2):
wcs_in = WCS(hdu_in.header)
wcs_in = wcs_in.sub((3 - np.array(axis_order)[::-1]).tolist())
hdu_in.header = wcs_in.to_header()
hdu_in.data = np.transpose(hdu_in.data, axis_order)
array_out, footprint_out = reproject_interp(hdu_in, header_out,
independent_celestial_slices=indep_slices)
return array_footprint_to_hdulist(array_out, footprint_out, header_out)
@pytest.mark.array_compare()
def test_small_cutout():
"""
Test reprojection of a cutout from a larger image (makes sure that the
pre-reprojection cropping works)
"""
hdu_in = fits.open(os.path.join(DATA, 'galactic_2d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['NAXIS1'] = 10
header_out['NAXIS2'] = 9
header_out['CTYPE1'] = 'RA---TAN'
header_out['CTYPE2'] = 'DEC--TAN'
header_out['CRVAL1'] = 266.39311
header_out['CRVAL2'] = -28.939779
header_out['CRPIX1'] = 5.1
header_out['CRPIX2'] = 4.7
array_out, footprint_out = reproject_interp(hdu_in, header_out)
return array_footprint_to_hdulist(array_out, footprint_out, header_out)
def test_mwpan_car_to_mol():
"""
Test reprojection of the Mellinger Milky Way Panorama from CAR to MOL,
which was returning all NaNs due to a regression that was introduced in
reproject 0.3 (https://github.com/astrofrog/reproject/pull/124).
"""
hdu_in = fits.Header.fromtextfile(os.path.join(DATA, 'mwpan2_RGB_3600.hdr'))
wcs_in = WCS(hdu_in, naxis=2)
data_in = np.ones((hdu_in['NAXIS2'], hdu_in['NAXIS1']), dtype=np.float)
header_out = fits.Header()
header_out['NAXIS'] = 2
header_out['NAXIS1'] = 360
header_out['NAXIS2'] = 180
header_out['CRPIX1'] = 180
header_out['CRPIX2'] = 90
header_out['CRVAL1'] = 0
header_out['CRVAL2'] = 0
header_out['CDELT1'] = -2 * np.sqrt(2) / np.pi
header_out['CDELT2'] = 2 * np.sqrt(2) / np.pi
header_out['CTYPE1'] = 'GLON-MOL'
header_out['CTYPE2'] = 'GLAT-MOL'
header_out['RADESYS'] = 'ICRS'
array_out, footprint_out = reproject_interp((data_in, wcs_in), header_out)
assert np.isfinite(array_out).any()
def test_small_cutout_outside():
"""
Test reprojection of a cutout from a larger image - in this case the
cutout is completely outside the region of the input image so we should
take a shortcut that returns arrays of NaNs.
"""
hdu_in = fits.open(os.path.join(DATA, 'galactic_2d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['NAXIS1'] = 10
header_out['NAXIS2'] = 9
header_out['CTYPE1'] = 'RA---TAN'
header_out['CTYPE2'] = 'DEC--TAN'
header_out['CRVAL1'] = 216.39311
header_out['CRVAL2'] = -21.939779
header_out['CRPIX1'] = 5.1
header_out['CRPIX2'] = 4.7
array_out, footprint_out = reproject_interp(hdu_in, header_out)
assert np.all(np.isnan(array_out))
assert np.all(footprint_out == 0)
def test_celestial_mismatch_2d():
"""
Make sure an error is raised if the input image has celestial WCS
information and the output does not (and vice-versa). This example will
use the _reproject_celestial route.
"""
hdu_in = fits.open(os.path.join(DATA, 'galactic_2d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['CTYPE1'] = 'APPLES'
header_out['CTYPE2'] = 'ORANGES'
data = hdu_in.data
wcs1 = WCS(hdu_in.header)
wcs2 = WCS(header_out)
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs1), wcs2, shape_out=(2, 2))
assert exc.value.args[0] == "Input WCS has celestial components but output WCS does not"
def test_celestial_mismatch_3d():
"""
Make sure an error is raised if the input image has celestial WCS
information and the output does not (and vice-versa). This example will
use the _reproject_full route.
"""
hdu_in = fits.open(os.path.join(DATA, 'equatorial_3d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['CTYPE1'] = 'APPLES'
header_out['CTYPE2'] = 'ORANGES'
header_out['CTYPE3'] = 'BANANAS'
data = hdu_in.data
wcs1 = WCS(hdu_in.header)
wcs2 = WCS(header_out)
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs1), wcs2, shape_out=(1, 2, 3))
assert exc.value.args[0] == "Input WCS has celestial components but output WCS does not"
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs2), wcs1, shape_out=(1, 2, 3))
assert exc.value.args[0] == "Output WCS has celestial components but input WCS does not"
def test_spectral_mismatch_3d():
"""
Make sure an error is raised if there are mismatches between the presence
or type of spectral axis.
"""
hdu_in = fits.open(os.path.join(DATA, 'equatorial_3d.fits'))[0]
header_out = hdu_in.header.copy()
header_out['CTYPE3'] = 'FREQ'
header_out['CUNIT3'] = 'Hz'
data = hdu_in.data
wcs1 = WCS(hdu_in.header)
wcs2 = WCS(header_out)
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs1), wcs2, shape_out=(1, 2, 3))
assert exc.value.args[0] == "The input (VOPT) and output (FREQ) spectral coordinate types are not equivalent."
header_out['CTYPE3'] = 'BANANAS'
wcs2 = WCS(header_out)
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs1), wcs2, shape_out=(1, 2, 3))
assert exc.value.args[0] == "Input WCS has a spectral component but output WCS does not"
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs2), wcs1, shape_out=(1, 2, 3))
assert exc.value.args[0] == "Output WCS has a spectral component but input WCS does not"
def test_naxis_mismatch():
"""
Make sure an error is raised if the input and output WCS have a different
number of dimensions.
"""
data = np.ones((3, 2, 2))
wcs_in = WCS(naxis=3)
wcs_out = WCS(naxis=2)
with pytest.raises(ValueError) as exc:
array_out, footprint_out = reproject_interp((data, wcs_in), wcs_out, shape_out=(1, 2))
assert exc.value.args[0] == "Number of dimensions between input and output WCS should match"
def test_slice_reprojection():
"""
Test case where only the slices change and the celestial projection doesn't
"""
inp_cube = np.arange(3, dtype='float').repeat(4 * 5).reshape(3, 4, 5)
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
header_in['NAXIS1'] = 5
header_in['NAXIS2'] = 4
header_in['NAXIS3'] = 3
header_out = header_in.copy()
header_out['NAXIS3'] = 2
header_out['CRPIX3'] -= 0.5
wcs_in = WCS(header_in)
wcs_out = WCS(header_out)
out_cube, out_cube_valid = _reproject_full(inp_cube, wcs_in, wcs_out, (2, 4, 5))
# we expect to be projecting from
# inp_cube = np.arange(3, dtype='float').repeat(4*5).reshape(3,4,5)
# to
# inp_cube_interp = (inp_cube[:-1]+inp_cube[1:])/2.
# which is confirmed by
# map_coordinates(inp_cube.astype('float'), new_coords, order=1, cval=np.nan, mode='constant')
# np.testing.assert_allclose(inp_cube_interp, map_coordinates(inp_cube.astype('float'), new_coords, order=1, cval=np.nan, mode='constant'))
assert out_cube.shape == (2, 4, 5)
assert out_cube_valid.sum() == 40.
# We only check that the *valid* pixels are equal
# but it's still nice to check that the "valid" array works as a mask
np.testing.assert_allclose(out_cube[out_cube_valid.astype('bool')],
((inp_cube[:-1] + inp_cube[1:]) / 2.)[out_cube_valid.astype('bool')])
# Actually, I fixed it, so now we can test all
np.testing.assert_allclose(out_cube, ((inp_cube[:-1] + inp_cube[1:]) / 2.))
def test_4d_fails():
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
header_in['NAXIS'] = 4
header_out = header_in.copy()
w_in = WCS(header_in)
w_out = WCS(header_out)
array_in = np.zeros((2, 3, 4, 5))
with pytest.raises(ValueError) as ex:
x_out, y_out, z_out = reproject_interp((array_in, w_in), w_out, shape_out=[2, 4, 5, 6])
assert str(ex.value) == "Length of shape_out should match number of dimensions in wcs_out"
def test_inequal_wcs_dims():
inp_cube = np.arange(3, dtype='float').repeat(4 * 5).reshape(3, 4, 5)
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
header_out = header_in.copy()
header_out['CTYPE3'] = 'VRAD'
header_out['CUNIT3'] = 'm/s'
header_in['CTYPE3'] = 'STOKES'
header_in['CUNIT3'] = ''
wcs_out = WCS(header_out)
with pytest.raises(ValueError) as ex:
out_cube, out_cube_valid = reproject_interp((inp_cube, header_in), wcs_out, shape_out=(2, 4, 5))
assert str(ex.value) == "Output WCS has a spectral component but input WCS does not"
def test_different_wcs_types():
inp_cube = np.arange(3, dtype='float').repeat(4 * 5).reshape(3, 4, 5)
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
header_out = header_in.copy()
header_out['CTYPE3'] = 'VRAD'
header_out['CUNIT3'] = 'm/s'
header_in['CTYPE3'] = 'VELO'
header_in['CUNIT3'] = 'm/s'
wcs_in = WCS(header_in)
wcs_out = WCS(header_out)
with pytest.raises(ValueError) as ex:
out_cube, out_cube_valid = reproject_interp((inp_cube, header_in), wcs_out, shape_out=(2, 4, 5))
assert str(ex.value) == ("The input (VELO) and output (VRAD) spectral "
"coordinate types are not equivalent.")
# TODO: add a test to check the units are the same.
def test_reproject_3d_celestial_correctness_ra2gal():
inp_cube = np.arange(3, dtype='float').repeat(7 * 8).reshape(3, 7, 8)
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
header_in['NAXIS1'] = 8
header_in['NAXIS2'] = 7
header_in['NAXIS3'] = 3
header_out = header_in.copy()
header_out['CTYPE1'] = 'GLON-TAN'
header_out['CTYPE2'] = 'GLAT-TAN'
header_out['CRVAL1'] = 158.5644791
header_out['CRVAL2'] = -21.59589875
# make the cube a cutout approximately in the center of the other one, but smaller
header_out['NAXIS1'] = 4
header_out['CRPIX1'] = 2
header_out['NAXIS2'] = 3
header_out['CRPIX2'] = 1.5
header_out['NAXIS3'] = 2
header_out['CRPIX3'] -= 0.5
wcs_in = WCS(header_in)
wcs_out = WCS(header_out)
out_cube, out_cube_valid = reproject_interp((inp_cube, wcs_in), wcs_out, shape_out=(2, 3, 4))
assert out_cube.shape == (2, 3, 4)
assert out_cube_valid.sum() == out_cube.size
# only compare the spectral axis
np.testing.assert_allclose(out_cube[:, 0, 0], ((inp_cube[:-1] + inp_cube[1:]) / 2.)[:, 0, 0])
def test_reproject_celestial_3d():
"""
Test both full_reproject and slicewise reprojection. We use a case where the
non-celestial slices are the same and therefore where both algorithms can
work.
"""
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
array_in = np.ones((3, 200, 180))
# TODO: here we can check that if we change the order of the dimensions in
# the WCS, things still work properly
wcs_in = WCS(header_in)
wcs_out = wcs_in.deepcopy()
wcs_out.wcs.ctype = ['GLON-SIN', 'GLAT-SIN', wcs_in.wcs.ctype[2]]
wcs_out.wcs.crval = [158.0501, -21.530282, wcs_in.wcs.crval[2]]
wcs_out.wcs.crpix = [50., 50., wcs_in.wcs.crpix[2] + 0.4]
out_full, foot_full = _reproject_full(array_in, wcs_in, wcs_out, (3, 160, 170))
out_celestial, foot_celestial = _reproject_celestial(array_in, wcs_in, wcs_out, (3, 160, 170))
np.testing.assert_allclose(out_full, out_celestial)
np.testing.assert_allclose(foot_full, foot_celestial)
def test_reproject_celestial_3d_withoutputarray():
"""
Test both full_reproject and slicewise reprojection. We use a case where the
non-celestial slices are the same and therefore where both algorithms can
work.
"""
header_in = fits.Header.fromtextfile(get_pkg_data_filename('../../tests/data/cube.hdr'))
array_in = np.ones((3, 200, 180))
outshape = (3, 160, 170)
out_full = np.empty(outshape)
out_celestial = np.empty(outshape)
# TODO: here we can check that if we change the order of the dimensions in
# the WCS, things still work properly
wcs_in = WCS(header_in)
wcs_out = wcs_in.deepcopy()
wcs_out.wcs.ctype = ['GLON-SIN', 'GLAT-SIN', wcs_in.wcs.ctype[2]]
wcs_out.wcs.crval = [158.0501, -21.530282, wcs_in.wcs.crval[2]]
wcs_out.wcs.crpix = [50., 50., wcs_in.wcs.crpix[2] + 0.4]
# TODO when someone learns how to do it: make sure the memory isn't duplicated...
_ = _reproject_full(array_in, wcs_in, wcs_out, shape_out=outshape,
array_out=out_full, return_footprint=False)
assert out_full is _
_ = _reproject_celestial(array_in, wcs_in, wcs_out, shape_out=outshape,
array_out=out_celestial, return_footprint=False)
assert out_celestial is _
np.testing.assert_allclose(out_full, out_celestial)
|
|
# -*- coding: utf-8 -*-
import httplib as http
from flask import request
from django.core.exceptions import ValidationError
from framework import forms, status
from framework.auth import cas
from framework.auth.core import get_user, generate_verification_key
from framework.auth.decorators import block_bing_preview, collect_auth, must_be_logged_in
from framework.auth.forms import PasswordForm, SetEmailAndPasswordForm
from framework.auth.signals import user_registered
from framework.auth.utils import validate_email, validate_recaptcha
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.sessions import session
from framework.transactions.handlers import no_auto_transaction
from framework.utils import get_timestamp, throttle_period_expired
from osf.models import AbstractNode, OSFUser, PreprintService, PreprintProvider
from osf.utils import sanitize
from osf.utils.permissions import expand_permissions, ADMIN
from website import mails, language, settings
from website.notifications.utils import check_if_all_global_subscriptions_are_none
from website.profile import utils as profile_utils
from website.project.decorators import (must_have_permission, must_be_valid_project, must_not_be_registration,
must_be_contributor_or_public, must_be_contributor)
from website.project.model import has_anonymous_link
from website.project.signals import unreg_contributor_added, contributor_added
from website.util import web_url_for, is_json_request
from website.exceptions import NodeStateError
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_node_contributors_abbrev(auth, node, **kwargs):
anonymous = has_anonymous_link(node, auth)
formatter = 'surname'
max_count = kwargs.get('max_count', 3)
if 'user_ids' in kwargs:
users = [
OSFUser.load(user_id) for user_id in kwargs['user_ids']
if node.contributor_set.filter(user__guid__guid=user_id).exists()
]
else:
users = node.visible_contributors
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contributors = []
n_contributors = len(users)
others_count = ''
for index, user in enumerate(users[:max_count]):
if index == max_count - 1 and len(users) > max_count:
separator = ' &'
others_count = str(n_contributors - 3)
elif index == len(users) - 1:
separator = ''
elif index == len(users) - 2:
separator = ' &'
else:
separator = ','
contributor = user.get_summary(formatter)
contributor['user_id'] = user._primary_key
contributor['separator'] = separator
contributors.append(contributor)
return {
'contributors': contributors,
'others_count': others_count,
}
@collect_auth
@must_be_valid_project(retractions_valid=True)
def get_contributors(auth, node, **kwargs):
# Can set limit to only receive a specified number of contributors in a call to this route
if request.args.get('limit'):
try:
limit = int(request.args['limit'])
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "limit": {}'.format(request.args['limit'])
))
else:
limit = None
anonymous = has_anonymous_link(node, auth)
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
# Limit is either an int or None:
# if int, contribs list is sliced to specified length
# if None, contribs list is not sliced
contribs = profile_utils.serialize_contributors(
node.visible_contributors[0:limit],
node=node,
)
# Will either return just contributor list or contributor list + 'more' element
if limit:
return {
'contributors': contribs,
'more': max(0, len(node.visible_contributors) - limit)
}
else:
return {'contributors': contribs}
@must_be_logged_in
@must_be_valid_project
def get_contributors_from_parent(auth, node, **kwargs):
parent = node.parent_node
if not parent:
raise HTTPError(http.BAD_REQUEST)
if not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contribs = [
profile_utils.add_contributor_json(contrib, node=node)
for contrib in parent.contributors if contrib not in node.contributors
]
return {'contributors': contribs}
def deserialize_contributors(node, user_dicts, auth, validate=False):
"""View helper that returns a list of User objects from a list of
serialized users (dicts). The users in the list may be registered or
unregistered users.
e.g. ``[{'id': 'abc123', 'registered': True, 'fullname': ..},
{'id': None, 'registered': False, 'fullname'...},
{'id': '123ab', 'registered': False, 'fullname': ...}]
If a dict represents an unregistered user without an ID, creates a new
unregistered User record.
:param Node node: The node to add contributors to
:param list(dict) user_dicts: List of serialized users in the format above.
:param Auth auth:
:param bool validate: Whether to validate and sanitize fields (if necessary)
"""
# Add the registered contributors
contribs = []
for contrib_dict in user_dicts:
fullname = contrib_dict['fullname']
visible = contrib_dict['visible']
email = contrib_dict.get('email')
if validate is True:
# Validate and sanitize inputs as needed. Email will raise error if invalid.
# TODO Edge case bug: validation and saving are performed in same loop, so all in list
# up to the invalid entry will be saved. (communicate to the user what needs to be retried)
fullname = sanitize.strip_html(fullname)
if not fullname:
raise ValidationError('Full name field cannot be empty')
if email:
validate_email(email) # Will raise a ValidationError if email invalid
if contrib_dict['id']:
contributor = OSFUser.load(contrib_dict['id'])
else:
try:
contributor = OSFUser.create_unregistered(
fullname=fullname,
email=email)
contributor.save()
except ValidationError:
## FIXME: This suppresses an exception if ID not found & new validation fails; get_user will return None
contributor = get_user(email=email)
# Add unclaimed record if necessary
if not contributor.is_registered:
contributor.add_unclaimed_record(node, referrer=auth.user,
given_name=fullname,
email=email)
contributor.save()
contribs.append({
'user': contributor,
'visible': visible,
'permissions': expand_permissions(contrib_dict.get('permission'))
})
return contribs
@unreg_contributor_added.connect
def finalize_invitation(node, contributor, auth, email_template='default'):
try:
record = contributor.get_unclaimed_record(node._primary_key)
except ValueError:
pass
else:
if record['email']:
send_claim_email(record['email'], contributor, node, notify=True, email_template=email_template)
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_contributors_post(auth, node, **kwargs):
""" Add contributors to a node. """
user_dicts = request.json.get('users')
node_ids = request.json.get('node_ids')
if node._id in node_ids:
node_ids.remove(node._id)
if user_dicts is None or node_ids is None:
raise HTTPError(http.BAD_REQUEST)
# Prepare input data for `Node::add_contributors`
try:
contribs = deserialize_contributors(node, user_dicts, auth=auth, validate=True)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
try:
node.add_contributors(contributors=contribs, auth=auth)
except NodeStateError as e:
return {'status': 400, 'message': e.args[0]}, 400
node.save()
# Disconnect listener to avoid multiple invite emails
unreg_contributor_added.disconnect(finalize_invitation)
for child_id in node_ids:
child = AbstractNode.load(child_id)
# Only email unreg users once
try:
child_contribs = deserialize_contributors(
child, user_dicts, auth=auth, validate=True
)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
child.add_contributors(contributors=child_contribs, auth=auth)
child.save()
# Reconnect listeners
unreg_contributor_added.connect(finalize_invitation)
return {
'status': 'success',
'contributors': profile_utils.serialize_contributors(
node.visible_contributors,
node=node,
)
}, 201
@no_auto_transaction
@must_be_valid_project # injects project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_manage_contributors(auth, node, **kwargs):
"""Reorder and remove contributors.
:param Auth auth: Consolidated authorization
:param-json list contributors: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>}
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
contributors = request.json.get('contributors')
# Update permissions and order
try:
node.manage_contributors(contributors, auth=auth, save=True)
except (ValueError, NodeStateError) as error:
raise HTTPError(http.BAD_REQUEST, data={'message_long': error.args[0]})
# If user has removed herself from project, alert; redirect to
# node summary if node is public, else to user's dashboard page
if not node.is_contributor(auth.user):
status.push_status_message(
'You have removed yourself as a contributor from this project',
kind='success',
trust=False
)
if node.is_public:
return {'redirectUrl': node.url}
return {'redirectUrl': web_url_for('dashboard')}
# Else if user has revoked her admin permissions, alert and stay on
# current page
if not node.has_permission(auth.user, ADMIN):
status.push_status_message(
'You have removed your administrative privileges for this project',
kind='success',
trust=False
)
# Else stay on current page
return {}
@must_be_valid_project # returns project
@must_be_contributor
@must_not_be_registration
def project_remove_contributor(auth, **kwargs):
"""Remove a contributor from a list of nodes.
:param Auth auth: Consolidated authorization
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
contributor_id = request.get_json()['contributorID']
node_ids = request.get_json()['nodeIDs']
contributor = OSFUser.load(contributor_id)
if contributor is None:
raise HTTPError(http.BAD_REQUEST, data={'message_long': 'Contributor not found.'})
redirect_url = {}
parent_id = node_ids[0]
for node_id in node_ids:
# Update permissions and order
node = AbstractNode.load(node_id)
# Forbidden unless user is removing herself
if not node.has_permission(auth.user, 'admin'):
if auth.user != contributor:
raise HTTPError(http.FORBIDDEN)
if node.visible_contributors.count() == 1 \
and node.visible_contributors[0] == contributor:
raise HTTPError(http.FORBIDDEN, data={
'message_long': 'Must have at least one bibliographic contributor'
})
nodes_removed = node.remove_contributor(contributor, auth=auth)
# remove_contributor returns false if there is not one admin or visible contributor left after the move.
if not nodes_removed:
raise HTTPError(http.BAD_REQUEST, data={
'message_long': 'Could not remove contributor.'})
# On parent node, if user has removed herself from project, alert; redirect to
# node summary if node is public, else to user's dashboard page
if not node.is_contributor(auth.user) and node_id == parent_id:
status.push_status_message(
'You have removed yourself as a contributor from this project',
kind='success',
trust=False,
id='remove_self_contrib'
)
if node.is_public:
redirect_url = {'redirectUrl': node.url}
else:
redirect_url = {'redirectUrl': web_url_for('dashboard')}
return redirect_url
# TODO: consider moving this into utils
def send_claim_registered_email(claimer, unclaimed_user, node, throttle=24 * 3600):
"""
A registered user claiming the unclaimed user account as an contributor to a project.
Send an email for claiming the account to the referrer and notify the claimer.
:param claimer: the claimer
:param unclaimed_user: the user account to claim
:param node: the project node where the user account is claimed
:param throttle: the time period in seconds before another claim for the account can be made
:return:
:raise: http.BAD_REQUEST
"""
unclaimed_record = unclaimed_user.get_unclaimed_record(node._primary_key)
# check throttle
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
# roll the valid token for each email, thus user cannot change email and approve a different email address
verification_key = generate_verification_key(verification_type='claim')
unclaimed_record['token'] = verification_key['token']
unclaimed_record['expires'] = verification_key['expires']
unclaimed_record['claimer_email'] = claimer.username
unclaimed_user.save()
referrer = OSFUser.load(unclaimed_record['referrer_id'])
claim_url = web_url_for(
'claim_user_registered',
uid=unclaimed_user._primary_key,
pid=node._primary_key,
token=unclaimed_record['token'],
_external=True,
)
# Send mail to referrer, telling them to forward verification link to claimer
mails.send_mail(
referrer.username,
mails.FORWARD_INVITE_REGISTERED,
user=unclaimed_user,
referrer=referrer,
node=node,
claim_url=claim_url,
fullname=unclaimed_record['name'],
can_change_preferences=False,
osf_contact_email=settings.OSF_CONTACT_EMAIL,
)
unclaimed_record['last_sent'] = get_timestamp()
unclaimed_user.save()
# Send mail to claimer, telling them to wait for referrer
mails.send_mail(
claimer.username,
mails.PENDING_VERIFICATION_REGISTERED,
fullname=claimer.fullname,
referrer=referrer,
node=node,
can_change_preferences=False,
osf_contact_email=settings.OSF_CONTACT_EMAIL,
)
# TODO: consider moving this into utils
def send_claim_email(email, unclaimed_user, node, notify=True, throttle=24 * 3600, email_template='default'):
"""
Unregistered user claiming a user account as an contributor to a project. Send an email for claiming the account.
Either sends to the given email or the referrer's email, depending on the email address provided.
:param str email: The address given in the claim user form
:param User unclaimed_user: The User record to claim.
:param Node node: The node where the user claimed their account.
:param bool notify: If True and an email is sent to the referrer, an email
will also be sent to the invited user about their pending verification.
:param int throttle: Time period (in seconds) after the referrer is
emailed during which the referrer will not be emailed again.
:param str email_template: the email template to use
:return
:raise http.BAD_REQUEST
"""
claimer_email = email.lower().strip()
unclaimed_record = unclaimed_user.get_unclaimed_record(node._primary_key)
referrer = OSFUser.load(unclaimed_record['referrer_id'])
claim_url = unclaimed_user.get_claim_url(node._primary_key, external=True)
# Option 1:
# When adding the contributor, the referrer provides both name and email.
# The given email is the same provided by user, just send to that email.
preprint_provider = None
logo = None
if unclaimed_record.get('email') == claimer_email:
# check email template for branded preprints
if email_template == 'preprint':
email_template, preprint_provider = find_preprint_provider(node)
if not email_template or not preprint_provider:
return
mail_tpl = getattr(mails, 'INVITE_PREPRINT')(email_template, preprint_provider)
if preprint_provider._id == 'osf':
logo = settings.OSF_PREPRINTS_LOGO
else:
logo = preprint_provider._id
else:
mail_tpl = getattr(mails, 'INVITE_DEFAULT'.format(email_template.upper()))
to_addr = claimer_email
unclaimed_record['claimer_email'] = claimer_email
unclaimed_user.save()
# Option 2:
# TODO: [new improvement ticket] this option is disabled from preprint but still available on the project page
# When adding the contributor, the referred only provides the name.
# The account is later claimed by some one who provides the email.
# Send email to the referrer and ask her/him to forward the email to the user.
else:
# check throttle
timestamp = unclaimed_record.get('last_sent')
if not throttle_period_expired(timestamp, throttle):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='User account can only be claimed with an existing user once every 24 hours'
))
# roll the valid token for each email, thus user cannot change email and approve a different email address
verification_key = generate_verification_key(verification_type='claim')
unclaimed_record['last_sent'] = get_timestamp()
unclaimed_record['token'] = verification_key['token']
unclaimed_record['expires'] = verification_key['expires']
unclaimed_record['claimer_email'] = claimer_email
unclaimed_user.save()
claim_url = unclaimed_user.get_claim_url(node._primary_key, external=True)
# send an email to the invited user without `claim_url`
if notify:
pending_mail = mails.PENDING_VERIFICATION
mails.send_mail(
claimer_email,
pending_mail,
user=unclaimed_user,
referrer=referrer,
fullname=unclaimed_record['name'],
node=node,
can_change_preferences=False,
osf_contact_email=settings.OSF_CONTACT_EMAIL,
)
mail_tpl = mails.FORWARD_INVITE
to_addr = referrer.username
# Send an email to the claimer (Option 1) or to the referrer (Option 2) with `claim_url`
mails.send_mail(
to_addr,
mail_tpl,
user=unclaimed_user,
referrer=referrer,
node=node,
claim_url=claim_url,
email=claimer_email,
fullname=unclaimed_record['name'],
branded_service=preprint_provider,
can_change_preferences=False,
logo=logo if logo else settings.OSF_LOGO,
osf_contact_email=settings.OSF_CONTACT_EMAIL,
)
return to_addr
@contributor_added.connect
def notify_added_contributor(node, contributor, auth=None, throttle=None, email_template='default'):
if email_template == 'false':
return
throttle = throttle or settings.CONTRIBUTOR_ADDED_EMAIL_THROTTLE
# Email users for projects, or for components where they are not contributors on the parent node.
if contributor.is_registered and \
(not node.parent_node or (node.parent_node and not node.parent_node.is_contributor(contributor))):
mimetype = 'html'
preprint_provider = None
logo = None
if email_template == 'preprint':
email_template, preprint_provider = find_preprint_provider(node)
if not email_template or not preprint_provider:
return
email_template = getattr(mails, 'CONTRIBUTOR_ADDED_PREPRINT')(email_template, preprint_provider)
if preprint_provider._id == 'osf':
logo = settings.OSF_PREPRINTS_LOGO
else:
logo = preprint_provider._id
elif email_template == 'access_request':
mimetype = 'html'
email_template = getattr(mails, 'CONTRIBUTOR_ADDED_ACCESS_REQUEST'.format(email_template.upper()))
elif node.is_preprint:
email_template = getattr(mails, 'CONTRIBUTOR_ADDED_PREPRINT_NODE_FROM_OSF'.format(email_template.upper()))
logo = settings.OSF_PREPRINTS_LOGO
else:
email_template = getattr(mails, 'CONTRIBUTOR_ADDED_DEFAULT'.format(email_template.upper()))
contributor_record = contributor.contributor_added_email_records.get(node._id, {})
if contributor_record:
timestamp = contributor_record.get('last_sent', None)
if timestamp:
if not throttle_period_expired(timestamp, throttle):
return
else:
contributor.contributor_added_email_records[node._id] = {}
mails.send_mail(
contributor.username,
email_template,
mimetype=mimetype,
user=contributor,
node=node,
referrer_name=auth.user.fullname if auth else '',
all_global_subscriptions_none=check_if_all_global_subscriptions_are_none(contributor),
branded_service=preprint_provider,
can_change_preferences=False,
logo=logo if logo else settings.OSF_LOGO,
osf_contact_email=settings.OSF_CONTACT_EMAIL
)
contributor.contributor_added_email_records[node._id]['last_sent'] = get_timestamp()
contributor.save()
elif not contributor.is_registered:
unreg_contributor_added.send(node, contributor=contributor, auth=auth, email_template=email_template)
def find_preprint_provider(node):
"""
Given a node, find the preprint and the service provider.
:param node: the node to which a contributer or preprint author is added
:return: tuple containing the type of email template (osf or branded) and the preprint provider
"""
try:
preprint = PreprintService.objects.get(node=node)
provider = preprint.provider
email_template = 'osf' if provider._id == 'osf' else 'branded'
return email_template, provider
except PreprintService.DoesNotExist:
return None, None
def verify_claim_token(user, token, pid):
"""View helper that checks that a claim token for a given user and node ID
is valid. If not valid, throws an error with custom error messages.
"""
# if token is invalid, throw an error
if not user.verify_claim_token(token=token, project_id=pid):
if user.is_registered:
error_data = {
'message_short': 'User has already been claimed.',
'message_long': 'Please <a href="/login/">log in</a> to continue.'}
raise HTTPError(400, data=error_data)
else:
return False
return True
@block_bing_preview
@collect_auth
@must_be_valid_project
def claim_user_registered(auth, node, **kwargs):
"""
View that prompts user to enter their password in order to claim being a contributor on a project.
A user must be logged in.
"""
current_user = auth.user
sign_out_url = web_url_for('auth_register', logout=True, next=request.url)
if not current_user:
return redirect(sign_out_url)
# Logged in user should not be a contributor the project
if node.is_contributor(current_user):
logout_url = web_url_for('auth_logout', redirect_url=request.url)
data = {
'message_short': 'Already a contributor',
'message_long': ('The logged-in user is already a contributor to this '
'project. Would you like to <a href="{}">log out</a>?').format(logout_url)
}
raise HTTPError(http.BAD_REQUEST, data=data)
uid, pid, token = kwargs['uid'], kwargs['pid'], kwargs['token']
unreg_user = OSFUser.load(uid)
if not verify_claim_token(unreg_user, token, pid=node._primary_key):
error_data = {
'message_short': 'Invalid url.',
'message_long': 'The token in the URL is invalid or has expired.'
}
raise HTTPError(http.BAD_REQUEST, data=error_data)
# Store the unreg_user data on the session in case the user registers
# a new account
session.data['unreg_user'] = {
'uid': uid, 'pid': pid, 'token': token
}
session.save()
form = PasswordForm(request.form)
if request.method == 'POST':
if form.validate():
if current_user.check_password(form.password.data):
node.replace_contributor(old=unreg_user, new=current_user)
node.save()
status.push_status_message(
'You are now a contributor to this project.',
kind='success',
trust=False
)
return redirect(node.url)
else:
status.push_status_message(language.LOGIN_FAILED, kind='warning', trust=False)
else:
forms.push_errors_to_status(form.errors)
if is_json_request():
form_ret = forms.utils.jsonify(form)
user_ret = profile_utils.serialize_user(current_user, full=False)
else:
form_ret = form
user_ret = current_user
return {
'form': form_ret,
'user': user_ret,
'signOutUrl': sign_out_url
}
@user_registered.connect
def replace_unclaimed_user_with_registered(user):
"""Listens for the user_registered signal. If unreg_user is stored in the
session, then the current user is trying to claim themselves as a contributor.
Replaces the old, unregistered contributor with the newly registered
account.
"""
unreg_user_info = session.data.get('unreg_user')
if unreg_user_info:
unreg_user = OSFUser.load(unreg_user_info['uid'])
pid = unreg_user_info['pid']
node = AbstractNode.load(pid)
node.replace_contributor(old=unreg_user, new=user)
node.save()
status.push_status_message(
'Successfully claimed contributor.', kind='success', trust=False)
@block_bing_preview
@collect_auth
def claim_user_form(auth, **kwargs):
"""
View for rendering the set password page for a claimed user.
Must have ``token`` as a querystring argument.
Renders the set password form, validates it, and sets the user's password.
HTTP Method: GET, POST
"""
uid, pid = kwargs['uid'], kwargs['pid']
token = request.form.get('token') or request.args.get('token')
user = OSFUser.load(uid)
# If unregistered user is not in database, or url bears an invalid token raise HTTP 400 error
if not user or not verify_claim_token(user, token, pid):
error_data = {
'message_short': 'Invalid url.',
'message_long': 'Claim user does not exists, the token in the URL is invalid or has expired.'
}
raise HTTPError(http.BAD_REQUEST, data=error_data)
# If user is logged in, redirect to 're-enter password' page
if auth.logged_in:
return redirect(web_url_for('claim_user_registered',
uid=uid, pid=pid, token=token))
unclaimed_record = user.unclaimed_records[pid]
user.fullname = unclaimed_record['name']
user.update_guessed_names()
# The email can be the original referrer email if no claimer email has been specified.
claimer_email = unclaimed_record.get('claimer_email') or unclaimed_record.get('email')
# If there is a registered user with this email, redirect to 're-enter password' page
try:
user_from_email = OSFUser.objects.get(emails__address=claimer_email.lower().strip()) if claimer_email else None
except OSFUser.DoesNotExist:
user_from_email = None
if user_from_email and user_from_email.is_registered:
return redirect(web_url_for('claim_user_registered', uid=uid, pid=pid, token=token))
form = SetEmailAndPasswordForm(request.form, token=token)
if request.method == 'POST':
if not form.validate():
forms.push_errors_to_status(form.errors)
elif settings.RECAPTCHA_SITE_KEY and not validate_recaptcha(request.form.get('g-recaptcha-response'), remote_ip=request.remote_addr):
status.push_status_message('Invalid captcha supplied.', kind='error')
else:
username, password = claimer_email, form.password.data
if not username:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='No email associated with this account. Please claim this '
'account on the project to which you were invited.'
))
user.register(username=username, password=password, accepted_terms_of_service=form.accepted_terms_of_service.data)
# Clear unclaimed records
user.unclaimed_records = {}
user.verification_key = generate_verification_key()
user.save()
# Authenticate user and redirect to project page
status.push_status_message(language.CLAIMED_CONTRIBUTOR, kind='success', trust=True)
# Redirect to CAS and authenticate the user with a verification key.
provider = PreprintProvider.load(pid)
redirect_url = None
if provider:
redirect_url = web_url_for('auth_login', next=provider.landing_url, _absolute=True)
else:
redirect_url = web_url_for('resolve_guid', guid=pid, _absolute=True)
return redirect(cas.get_login_url(
redirect_url,
username=user.username,
verification_key=user.verification_key
))
return {
'firstname': user.given_name,
'email': claimer_email if claimer_email else '',
'fullname': user.fullname,
'form': forms.utils.jsonify(form) if is_json_request() else form,
'osf_contact_email': settings.OSF_CONTACT_EMAIL,
}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def invite_contributor_post(node, **kwargs):
"""API view for inviting an unregistered user. Performs validation, but does not actually invite the user.
Expects JSON arguments with 'fullname' (required) and email (not required).
"""
fullname = request.json.get('fullname').strip()
email = request.json.get('email')
# Validate and sanitize inputs as needed. Email will raise error if invalid.
fullname = sanitize.strip_html(fullname)
if email:
email = email.lower().strip()
try:
validate_email(email)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
if not fullname:
return {'status': 400, 'message': 'Full name field cannot be empty'}, 400
# Check if email is in the database
user = get_user(email=email)
if user:
if node.is_contributor(user):
msg = 'User with this email address is already a contributor to this project.'
return {'status': 400, 'message': msg}, 400
elif not user.is_confirmed:
serialized = profile_utils.serialize_unregistered(fullname, email)
else:
serialized = profile_utils.add_contributor_json(user)
# use correct display name
serialized['fullname'] = fullname
serialized['email'] = email
else:
# Create a placeholder
serialized = profile_utils.serialize_unregistered(fullname, email)
return {'status': 'success', 'contributor': serialized}
@must_be_contributor_or_public
def claim_user_post(node, **kwargs):
"""
View for claiming a user from the X-editable form on a project page.
:param node: the project node
:return:
"""
request_data = request.json
# The unclaimed user
unclaimed_user = OSFUser.load(request_data['pk'])
unclaimed_data = unclaimed_user.get_unclaimed_record(node._primary_key)
# Claimer is not logged in and submit her/his email through X-editable, stored in `request_data['value']`
if 'value' in request_data:
email = request_data['value'].lower().strip()
claimer = get_user(email=email)
# registered user
if claimer and claimer.is_registered:
send_claim_registered_email(claimer, unclaimed_user, node)
# unregistered user
else:
send_claim_email(email, unclaimed_user, node, notify=True)
# Claimer is logged in with confirmed identity stored in `request_data['claimerId']`
elif 'claimerId' in request_data:
claimer_id = request_data['claimerId']
claimer = OSFUser.load(claimer_id)
send_claim_registered_email(claimer, unclaimed_user, node)
email = claimer.username
else:
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'email': email,
'fullname': unclaimed_data['name']
}
|
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import scale_vector
from compas.geometry import normalize_vector
from compas.geometry import add_vectors
from compas.geometry import subtract_vectors
from compas.geometry import cross_vectors
from compas.geometry import centroid_points
from compas.geometry import intersection_line_line
from compas.geometry import normal_polygon
from compas.geometry import is_colinear
from compas.utilities import iterable_like
from compas.utilities import pairwise
from compas.utilities import is_item_iterable
__all__ = [
'offset_line',
'offset_polyline',
'offset_polygon',
]
def intersect_lines(l1, l2, tol):
x1, x2 = intersection_line_line(l1, l2, tol)
if x1 and x2:
return centroid_points([x1, x2])
def intersect_lines_colinear(l1, l2, tol):
def are_segments_colinear(l1, l2, tol):
a, b = l1
d, c = l2
return is_colinear(a, b, c, tol)
if are_segments_colinear(l1, l2, tol):
return centroid_points([l1[1], l2[0]])
def intersect(l1, l2, tol):
supported_funcs = [intersect_lines, intersect_lines_colinear]
for func in supported_funcs:
point = func(l1, l2, tol)
if point:
return point
msg = "Intersection not found for line: {}, and line: {}".format(l1, l2)
raise ValueError(msg)
def offset_segments(point_list, distances, normal):
segments = []
for line, distance in zip(pairwise(point_list), distances):
segments.append(offset_line(line, distance, normal))
return segments
def offset_line(line, distance, normal=[0.0, 0.0, 1.0]):
"""Offset a line by a distance.
Parameters
----------
line : [point, point] | :class:`~compas.geometry.Line`
A line defined by two points.
distances : float or list[float]
The offset distance as float.
A single value determines a constant offset.
A list of two offset values can be used to a create variable offset at the start and end.
normal : [float, float, float] | :class:`~compas.geometry.Vector`, optional
The normal of the offset plane.
Returns
-------
tuple[[float, float, float], [float, float, float]]
Two points defining the offset line.
Notes
-----
The offset direction is chosen such that if the line were along the positve
X axis and the normal of the offset plane is along the positive Z axis, the
offset line is in the direction of the postive Y axis.
Examples
--------
>>>
"""
a, b = line
ab = subtract_vectors(b, a)
direction = normalize_vector(cross_vectors(normal, ab))
if not is_item_iterable(distance):
distance = [distance]
distances = list(iterable_like(line, distance, distance[-1]))
u = scale_vector(direction, distances[0])
v = scale_vector(direction, distances[1])
c = add_vectors(a, u)
d = add_vectors(b, v)
return c, d
def offset_polygon(polygon, distance, tol=1e-6):
"""Offset a polygon (closed) by a distance.
Parameters
----------
polygon : sequence[point] | :class:`~compas.geometry.Polygon`
The XYZ coordinates of the corners of the polygon.
The first and last coordinates must not be identical.
distance : float | list[tuple[float, float]]
The offset distance as float.
A single value determines a constant offset globally.
A list of pairs of local offset values per line segment can be used to create variable offsets.
tol : float, optional
A tolerance value for intersection calculations.
Returns
-------
list[[float, float, float]]
The XYZ coordinates of the corners of the offset polygon.
The first and last coordinates are identical.
Notes
-----
The offset direction is determined by the normal of the polygon.
If the polygon is in the XY plane and the normal is along the positive Z axis,
positive offset distances will result in an offset towards the inside of the
polygon.
The algorithm works also for spatial polygons that do not perfectly fit a plane.
Examples
--------
>>>
"""
normal = normal_polygon(polygon)
if not is_item_iterable(distance):
distance = [distance]
distances = iterable_like(polygon, distance, distance[-1])
polygon = polygon + polygon[:1]
segments = offset_segments(polygon, distances, normal)
offset = []
for s1, s2 in pairwise(segments[-1:] + segments):
point = intersect(s1, s2, tol)
offset.append(point)
return offset
def offset_polyline(polyline, distance, normal=[0.0, 0.0, 1.0], tol=1e-6):
"""Offset a polyline by a distance.
Parameters
----------
polyline : sequence[point] | :class:`~compas.geometry.Polyline`
The XYZ coordinates of the vertices of a polyline.
distance : float | list[tuple[float, float]]
The offset distance as float.
A single value determines a constant offset globally.
Alternatively, pairs of local offset values per line segment can be used to create variable offsets.
normal : [float, float, float] | :class:`~compas.geometry.Vector`, optional
The normal of the offset plane.
tol : float, optional
A tolerance value for intersection calculations.
Returns
-------
list[[float, float, float]]
The XYZ coordinates of the resulting polyline.
Notes
-----
The offset direction is determined by the provided normal vector.
If the polyline is in the XY plane and the normal is along the positive Z axis,
positive offset distances will result in counterclockwise offsets,
and negative values in clockwise direction.
Examples
--------
>>>
"""
if not is_item_iterable(distance):
distance = [distance]
distances = iterable_like(polyline, distance, distance[-1])
segments = offset_segments(polyline, distances, normal)
offset = [segments[0][0]]
for s1, s2 in pairwise(segments):
point = intersect(s1, s2, tol)
offset.append(point)
offset.append(segments[-1][1])
return offset
|
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import io
import os
import tempfile
import picamera
import pytest
from PIL import Image
from collections import namedtuple
from verify import verify_image, RAW_FORMATS
CaptureCase = namedtuple('CaptureCase', ('format', 'ext', 'options'))
CAPTURE_CASES = (
CaptureCase('jpeg', '.jpg', {'quality': 95}),
CaptureCase('jpeg', '.jpg', {}),
CaptureCase('jpeg', '.jpg', {'resize': (640, 480)}),
CaptureCase('jpeg', '.jpg', {'quality': 50}),
CaptureCase('gif', '.gif', {}),
#CaptureCase('png', '.png', {}),
CaptureCase('bmp', '.bmp', {}),
) + tuple(
CaptureCase(fmt, '.data', {})
for fmt in RAW_FORMATS
)
# Run tests with a variety of file suffixes and expected formats
@pytest.fixture(scope='module', params=CAPTURE_CASES)
def filename_format_options(request):
filename = tempfile.mkstemp(suffix=request.param.ext)[1]
def fin():
os.unlink(filename)
request.addfinalizer(fin)
return filename, request.param.format, request.param.options
# Run tests with a variety of file suffixes and expected formats
@pytest.fixture(params=CAPTURE_CASES)
def ext_format_options(request):
return request.param.ext, request.param.format, request.param.options
# Run tests with a variety of format specs
@pytest.fixture(params=CAPTURE_CASES)
def format_options(request):
return request.param.format, request.param.options
@pytest.fixture(params=(False, True))
def use_video_port(request):
return request.param
@pytest.fixture(params=(False, True))
def burst(request):
return request.param
def expected_failures(resolution, format, use_video_port, burst=False):
if resolution == (2592, 1944) and format in ('gif', 'bmp'):
pytest.xfail('Camera fails to produce output with max. res BMPs or GIFs')
if resolution == (2592, 1944) and format in ('rgba', 'bgra') and not use_video_port:
pytest.xfail('Camera runs out of memory with this combination')
if use_video_port and burst:
pytest.xfail('Burst captures not supported with use_video_port')
def test_capture_to_file(
camera, previewing, mode, filename_format_options, use_video_port):
filename, format, options = filename_format_options
resolution, framerate = mode
expected_failures(resolution, format, use_video_port)
camera.capture(
filename,
# Check that in the case of cooked formats, capture correctly
# derives the format from the extension
format=format if format in RAW_FORMATS else None,
use_video_port=use_video_port, **options)
if 'resize' in options:
resolution = options['resize']
verify_image(filename, format, resolution)
def test_capture_to_stream(
camera, previewing, mode, format_options, use_video_port):
stream = io.BytesIO()
format, options = format_options
resolution, framerate = mode
expected_failures(resolution, format, use_video_port)
if 'resize' in options:
resolution = options['resize']
camera.capture(stream, format, use_video_port=use_video_port, **options)
stream.seek(0)
verify_image(stream, format, resolution)
def test_capture_continuous_to_file(
camera, mode, ext_format_options, tempdir, use_video_port, burst):
ext, format, options = ext_format_options
resolution, framerate = mode
expected_failures(resolution, format, use_video_port, burst)
for i, filename in enumerate(
camera.capture_continuous(os.path.join(
tempdir, 'image{counter:02d}%s' % ext),
format=format if format in RAW_FORMATS else None,
use_video_port=use_video_port, burst=burst)):
verify_image(filename, format, resolution)
if i == 3:
break
def test_capture_continuous_to_stream(
camera, mode, format_options, use_video_port, burst):
format, options = format_options
resolution, framerate = mode
expected_failures(resolution, format, use_video_port, burst)
stream = io.BytesIO()
for i, foo in enumerate(
camera.capture_continuous(stream, format=format,
use_video_port=use_video_port, burst=burst)):
stream.truncate()
stream.seek(0)
verify_image(stream, format, resolution)
stream.seek(0)
if i == 3:
break
def test_capture_sequence_to_file(
camera, mode, ext_format_options, tempdir, use_video_port, burst):
ext, format, options = ext_format_options
resolution, framerate = mode
expected_failures(resolution, format, use_video_port, burst)
filenames = [
os.path.join(tempdir, 'image%d%s' % (i, ext))
for i in range(3)
]
camera.capture_sequence(
filenames, format=format,
use_video_port=use_video_port, burst=burst)
for filename in filenames:
verify_image(filename, format, resolution)
def test_capture_sequence_to_stream(
camera, mode, format_options, use_video_port, burst):
format, options = format_options
resolution, framerate = mode
expected_failures(resolution, format, use_video_port, burst)
streams = [io.BytesIO() for i in range(3)]
camera.capture_sequence(
streams, format=format,
use_video_port=use_video_port, burst=burst)
for stream in streams:
stream.seek(0)
verify_image(stream, format, resolution)
def test_capture_bayer(camera, mode):
stream = io.BytesIO()
camera.capture(stream, format='jpeg', bayer=True)
# Bayer data is always the last 6404096 bytes of the stream, and starts
# with 'BRCM'
if camera.exif_tags['IFD0.Model'].upper() == 'RP_OV5647':
stream.seek(-6404096, io.SEEK_END)
else:
stream.seek(-10270208, io.SEEK_END)
assert stream.read(4) == b'BRCM'
def test_capture_sequence_bayer(camera, mode):
streams = [io.BytesIO() for i in range(3)]
camera.capture_sequence(streams, format='jpeg', bayer=True)
for stream in streams:
if camera.exif_tags['IFD0.Model'].upper() == 'RP_OV5647':
stream.seek(-6404096, io.SEEK_END)
else:
stream.seek(-10270208, io.SEEK_END)
assert stream.read(4) == b'BRCM'
def test_exif_ascii(camera, mode):
camera.exif_tags['IFD0.Artist'] = 'Me!'
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2000 Foo'
# Exif is only supported with JPEGs...
stream = io.BytesIO()
camera.capture(stream, 'jpeg')
stream.seek(0)
img = Image.open(stream)
exif = img._getexif()
# IFD0.Artist = 315
# IFD0.Copyright = 33432
assert exif[315] == 'Me!'
assert exif[33432] == 'Copyright (c) 2000 Foo'
@pytest.mark.xfail(reason="Exif binary values don't work")
def test_exif_binary(camera, mode):
camera.exif_tags['IFD0.Copyright'] = b'Photographer copyright (c) 2000 Foo\x00Editor copyright (c) 2002 Bar\x00'
camera.exif_tags['IFD0.UserComment'] = b'UNICODE\x00\xff\xfeF\x00o\x00o\x00'
# Exif is only supported with JPEGs...
stream = io.BytesIO()
camera.capture(stream, 'jpeg')
stream.seek(0)
img = Image.open(stream)
exif = img._getexif()
# IFD0.Copyright = 33432
# IFD0.UserComment = 37510
assert exif[33432] == b'Photographer copyright (c) 2000 Foo\x00Editor copyright (c) 2002 Bar\x00'
assert exif[37510] == b'UNICODE\x00\xff\xfeF\x00o\x00o\x00'
def test_capture_bad_format(camera):
with pytest.raises(picamera.PiCameraValueError):
camera.capture('test.foo')
with pytest.raises(picamera.PiCameraValueError):
camera.capture('test.jpg', format='foo')
with pytest.raises(picamera.PiCameraValueError):
camera.capture('test.tiff')
with pytest.raises(picamera.PiCameraValueError):
camera.capture('test.jpg', format='tiff')
def test_capture_bad_burst(camera):
with pytest.raises(picamera.PiCameraValueError):
camera.capture_sequence(['test.jpg'], use_video_port=True, burst=True)
with pytest.raises(picamera.PiCameraValueError):
camera.capture('test.jpg', use_video_port=True, burst=True)
def test_capture_bytes_filename(camera, tmpdir):
camera.capture(str(tmpdir.join('test.jpg')).encode('utf-8'))
def test_capture_bytes_format(camera, tmpdir):
camera.capture(str(tmpdir.join('test.jpg')), b'jpeg')
def test_capture_continuous_repeat(camera):
stream = io.BytesIO()
images = []
for image in camera.capture_continuous(stream, format='yuv', burst=True):
images.append(stream.getvalue())
if len(images) == 2:
break
stream.seek(0)
stream.truncate()
assert images[0] != images[1]
|
|
''' Significant lifting from https://jmetzen.github.io/2015-11-27/vae.html '''
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import rnn
import random
import matplotlib.pyplot as plt
import re, string
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
import pickle as pkl
import itertools
import ctc_loss
import os
n=2**19-3
def map_lambda():
return n+1
def rev_map_lambda():
return "<UNK>"
def load_text(n,num_samples=None):
# fname = 'Oxford_English_Dictionary.txt'
# txt = []
# with open(fname,'rb') as f:
# txt = f.readlines()
# txt = [x.decode('utf-8').strip() for x in txt]
# txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
# word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # List of definitions
# def_list = [x.split(' ', 1)[1].strip()for x in txt]
with open('./training_data/training_data.pkl','rb') as raw:
word_list,dl=pkl.load(raw)
def_list=[]
# def_list=[' '.join(defi) for defi in def_list]
i=0
# words={}
while i<len( dl):
defi=dl[i]
if len(defi)>0:
def_list+=[' '.join(defi)]
i+=1
else:
dl.pop(i)
word_list.pop(i)
# for w,d in zip(word_list,def_list):
# if w not in words:
# words[w]=[]
# words[w].append(d)
# word_list=[]
# def_list=[]
# for word in words:
# word_list.append(word)
# # def_list.append(random.choice(words[word]))
# def_list.append(words[word][0])
maxlen=0
minlen=100
for defi in def_list:
minlen=min(minlen,len(defi.split()))
maxlen=max(maxlen,len(defi.split()))
print(minlen)
print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
_map,rev_map=get_one_hot_map(word_list,def_list,n)
pkl.dump(_map,open('mapa.pkl','wb'))
pkl.dump(rev_map,open('rev_mapa.pkl','wb'))
# exit()
if num_samples is not None:
num_samples=len(word_list)
# X = (36665, 56210)
# X = map_one_hot(word_list[:num_samples],_map,1,n)
# # y = (36665, 56210)
# # print _map
# y,mask = map_one_hot(def_list[:num_samples],_map,maxlen,n)
# np.save('Xa',X)
# np.save('ya',y)
# np.save('maska',mask)
X=np.load('Xa.npy','r')
y=np.load('ya.npy','r')
mask=np.load('maska.npy','r')
print (np.max(y))
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(map_lambda)
rev_map=defaultdict(rev_map_lambda)
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print (len(words))
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
# for num_bits in range(binary_dim):
# for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
# bitmap=np.zeros(binary_dim)
# bitmap[np.array(bit_config)]=1
# num=bitmap*(2** np.arange(binary_dim ))
# num=np.sum(num)
# num=int(num)
# word=words[i]
# _map[word]=num
# rev_map[num]=word
# i+=1
# if i>=len(words):
# break
# if i>=len(words):
# break
i+=1
for word in words:
i+=1
_map[word]=i
rev_map[i]=word
rev_map[n+2]='<UNK>'
if zero_end_tok:
rev_map[1]='.'
else:
rev_map[1]='Start'
rev_map[n+3]='End'
print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
print (len(rev_map.keys()))
print(len(_map.keys()))
print ('heyo')
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
num_failed=0
num_counted=0
for word in corpus:
w=word.lower()
num_counted+=1
if w not in _map:
num_failed+=1
mapped=_map[w]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
print 'fuck',num_failed/float(num_counted)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
if not onehot:
rtn=np.zeros([len(corpus),binary_dim],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),2**binary_dim],dtype=np.float32)
for l,line in enumerate(corpus):
# if len(line)==0:
# rtn[l]=n+2
# else:
# if line not in _map:
# total_not+=1
mapped=_map[line]
if mapped==75001:
total_not+=1
if onehot:
binrep=np.zeros(2**binary_dim)
print line
binrep[mapped]=1
else:
binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
rtn[l]=binrep
print (total_not,len(corpus))
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2,binary_dim],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
for l,_line in enumerate(corpus):
x=0
line=_line.split()
for i in range(min(len(line),maxlen)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
if form2:
binrep=(1&(mapped/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
rtn[l,i+1,:]=binrep
else:
rtn[l,i+1]=mapped
if mapped==75001:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
if form2:
rtn[l,x+1,:]=(1&(to_app/(2**np.arange(binary_dim))).astype(np.uint32)).astype(np.float32)
else:
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
print (nopes,totes,wtf)
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100,generative=False,ctrain=False,test=False,global_step=None):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
print self.learning_rate
self.batch_size = batch_size
if global_step is None:
global_step=tf.Variable(0,trainiable=False)
self.global_step=global_step
# tf Graph input
self.n_words=network_architecture['n_input']
if not form2:
self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in')
else:
self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in')
self.intype=type(self.x)
if not form2:
self.caption_placeholder = tf.placeholder(tf.int32, [None,network_architecture["maxlen"]],name='caption_placeholder')
else:
self.caption_placeholder = tf.placeholder(tf.float32, [None, network_architecture["maxlen"],self.n_words],name='caption_placeholder')
print self.caption_placeholder.shape
self.mask=tf.placeholder(tf.float32, [None, network_architecture["maxlen"]],name='mask')
self.timestep=tf.placeholder(tf.float32,[],name='timestep')
# Create autoencoder network
to_restore=None
self.embw=tf.Variable(xavier_init(network_architecture['n_input'],network_architecture['n_z']),name='embw')
self.embb=tf.Variable(tf.zeros([network_architecture['n_z']]),name='embb')
if not generative:
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
to_restore=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self._create_loss_optimizer()
self.test=test
else:
self._build_gen()
# Initializing the tensor flow variables
init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.InteractiveSession()
if embeddings_trainable:
self.saver = tf.train.Saver(var_list=to_restore,max_to_keep=100)
saved_path=tf.train.latest_checkpoint(model_path)
else:
self.saver= tf.train.Saver(var_list=self.untrainable_variables,max_to_keep=100)
mod_path=model_path
if use_ctc:
mod_path=mod_path[:-3]
saved_path=tf.train.latest_checkpoint(mod_path.replace('defdef','embtransfer'))
self.sess.run(init)
if ctrain:
self.saver.restore(self.sess, saved_path)
self.saver=tf.train.Saver(max_to_keep=100)
def _create_network(self):
# Initialize autoencode network weights and biases
network_weights = self._initialize_weights(**self.network_architecture)
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
self.network_weights=network_weights
seqlen=tf.cast(tf.reduce_sum(self.mask,reduction_indices=-1),tf.int32)
embedded_input,embedded_input_KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],tf.reshape(self.caption_placeholder,[-1,self.network_architecture['n_input']]),logit=True)
embedded_input=tf.reshape(embedded_input,[-1,self.network_architecture['maxlen'],self.network_architecture['n_lstm_input']])
if not vanilla:
embedded_input_KLD_loss=tf.reshape(embedded_input_KLD_loss,[-1,self.network_architecture['maxlen']])[:,1:]
encoder_input=embedded_input[:,1:,:]
cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input'])
if lstm_stack>1:
cell=tf.contrib.rnn.MultiRNNCell([cell]*lstm_stack)
if not use_bdlstm:
encoder_outs,encoder_states=rnn.dynamic_rnn(cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False)
else:
backward_cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input'])
if lstm_stack>1:
backward_cell=tf.contrib.rnn.MultiRNNCell([backward_cell]*lstm_stack)
encoder_outs,encoder_states=rnn.bidirectional_dynamic_rnn(cell,backward_cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False)
ix_range=tf.range(0,self.batch_size,1)
ixs=tf.expand_dims(ix_range,-1)
to_cat=tf.expand_dims(seqlen-2,-1)
gather_inds=tf.concat([ixs,to_cat],axis=-1)
print encoder_outs
outs=tf.gather_nd(encoder_outs,gather_inds)
outs=tf.nn.dropout(outs,.75)
self.deb=tf.gather_nd(self.caption_placeholder[:,1:,:],gather_inds)
print outs.shape
input_embedding,input_embedding_KLD_loss=self._get_middle_embedding([network_weights['middle_encoding'],network_weights['biases_middle_encoding']],network_weights['middle_encoding'],outs,logit=True)
input_embedding=tf.nn.l2_normalize(input_embedding,dim=-1)
other_loss=tf.constant(0,dtype=tf.float32)
KLD_penalty=(tf.cast(self.timestep,tf.float32)/1.0)*1e-3
cos_penalty=tf.maximum(-0.1,(tf.cast(self.timestep,tf.float32)/(5.0)))*1e-3
input_KLD_loss=0
if form3:
_x,input_KLD_loss=self._get_input_embedding([network_weights['embmap'],network_weights['embmap_biases']],network_weights['embmap'])
input_KLD_loss=tf.reduce_mean(input_KLD_loss)*KLD_penalty#*tf.constant(0.0,dtype=tf.float32)
normed_embedding= tf.nn.l2_normalize(input_embedding, dim=-1)
normed_target=tf.nn.l2_normalize(_x,dim=-1)
cos_sim=(tf.reduce_sum(tf.multiply(normed_embedding,normed_target),axis=-1))
# # self.exp_loss=tf.reduce_mean((-cos_sim))
# # self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
other_loss += tf.reduce_mean(1-(cos_sim))*cos_penalty
# other_loss+=tf.reduce_mean(tf.reduce_sum(tf.square(_x-input_embedding),axis=-1))*cos_penalty
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
# if not same_embedding:
# input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'])
# else:
# input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'])
if not embeddings_trainable:
input_embedding=tf.stop_gradient(input_embedding)
# embed2decoder=tf.Variable(xavier_init(self.network_architecture['n_z_m_2'],self.network_architecture['n_lstm_input']),name='decoder_embedding_weight')
# embed2decoder_bias=tf.Variable(tf.zeros(self.network_architecture['n_lstm_input']),name='decoder_embedding_bias')
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
# input_embedding=tf.matmul(input_embedding,embed2decoder)+embed2decoder_bias
loss = 0
self.debug=0
probs=[]
with tf.variable_scope("RNN"):
for i in range(self.network_architecture['maxlen']):
if i > 0:
# current_embedding = tf.nn.embedding_lookup(self.word_embedding, caption_placeholder[:,i-1]) + self.embedding_bias
if form4:
current_embedding,KLD_loss=input_embedding,0
elif form2:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1,:],logit=True)
else:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1])
loss+=tf.reduce_sum(KLD_loss*self.mask[:,i])*KLD_penalty
else:
current_embedding = input_embedding
if i > 0:
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
if i > 0:
if not form2:
labels = tf.expand_dims(self.caption_placeholder[:, i], 1)
ix_range=tf.range(0, self.batch_size, 1)
ixs = tf.expand_dims(ix_range, 1)
concat = tf.concat([ixs, labels],1)
onehot = tf.sparse_to_dense(
concat, tf.stack([self.batch_size, self.n_words]), 1.0, 0.0)
else:
onehot=self.caption_placeholder[:,i,:]
logit = tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not use_ctc:
if form2:
# best_word=tf.nn.softmax(logit)
# best_word=tf.round(best_word)
# all_the_f_one_h.append(best_word)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy=tf.reduce_sum(xentropy,reduction_indices=-1)
else:
xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy = xentropy * self.mask[:,i]
xentropy=tf.reduce_sum(xentropy)
self.debug+=xentropy
loss += xentropy
else:
probs.append(tf.expand_dims(tf.nn.sigmoid(logit),1))
self.debug=[input_KLD_loss,tf.reduce_mean(input_embedding_KLD_loss)/self.batch_size*KLD_penalty,other_loss,KLD_penalty]
if not use_ctc:
loss_ctc=0
# self.debug=other_loss
# self.debug=[input_KLD_loss,embedded_input_KLD_loss,input_embedding_KLD_loss]
else:
probs=tf.concat(probs,axis=1)
probs=ctc_loss.get_output_probabilities(probs,self.caption_placeholder[:,1:,:])
loss_ctc=ctc_loss.loss(probs,self.caption_placeholder[:,1:,:],self.network_architecture['maxlen']-2,self.batch_size,seqlen-1)
self.debug=loss_ctc
#
loss = (loss / tf.reduce_sum(self.mask[:,1:]))+tf.reduce_sum(input_embedding_KLD_loss)/self.batch_size*KLD_penalty+tf.reduce_sum(embedded_input_KLD_loss*self.mask[:,1:])/tf.reduce_sum(self.mask[:,1:])*KLD_penalty+loss_ctc+input_KLD_loss+other_loss
self.loss=loss
def _initialize_weights(self, n_lstm_input, maxlen,
n_input, n_z, n_z_m,n_z_m_2):
all_weights = dict()
if form3:
n_in=n_z
else:
n_in=n_input
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight',trainable=embeddings_trainable),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias',trainable=embeddings_trainable)}
if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_log_sigmab',trainable=embeddings_trainable)}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(xavier_init(n_in, n_z),name='out_log_sigma',trainable=embeddings_trainable)
}
else:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable)}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable)}
self.untrainable_variables=all_weights['input_meaning'].values()+all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
if mid_vae:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb'),
'out_log_sigma': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_log_sigmab')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_log_sigma'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')}
all_weights['embmap']={
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_log_sigma'),
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias')
}
all_weights['embmap_biases']={
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_log_sigmab',trainable=embeddings_trainable)
}
else:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')}
all_weights['embmap']={
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean'),
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias')
}
all_weights['embmap_biases']={
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable)
}
self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
if lstm_stack>1:
self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
all_weights['LSTM'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
'lstm': self.lstm}
return all_weights
def _get_input_embedding(self, ve_weights, aff_weights):
if not form3:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],self.x)
else:
x=tf.matmul(self.x,self.embw)+self.embb
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x)
embedding=tf.matmul(z,aff_weights['affine_weight'])+aff_weights['affine_bias']
return embedding,vae_loss
def _get_middle_embedding(self, ve_weights, lstm_weights, x,logit=False):
if logit:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
print z.shape
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
if form3:
x=tf.matmul(x,self.embw)+self.embb
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean']
if not vanilla:
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _vae_sample_mid(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if mid_vae:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean']
if mid_vae:
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma']
if mid_vae:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if mid_vae:
print 'stop fucking sampling',mid_vae
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _create_loss_optimizer(self):
if clip_grad:
opt_func = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), .1)
self.optimizer = opt_func.apply_gradients(zip(grads, tvars))
else:
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def _create_loss_test(self):
self.test_op = \
tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[1],extra_feed_dict={})
def partial_fit(self, X,y,mask,testify=False,timestep=0):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
if self.test and testify:
print tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[self.batch_size],extra_feed_dict={self.caption_placeholder: y, self.mask: mask})
exit()
else:
opt, cost,shit = self.sess.run((self.optimizer, self.loss,self.debug),
feed_dict={self.x: X, self.caption_placeholder: y, self.mask: mask,self.timestep:timestep})
# print shit
# print deb
# exit()
return cost,shit
def _build_gen(self):
#same setup as `_create_network` function
network_weights = self._initialize_weights(**self.network_architecture)
if form2:
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
else:
start_token_tensor=tf.constant((np.zeros([self.batch_size])).astype(np.int32),dtype=tf.int32)
self.network_weights=network_weights
if not same_embedding:
input_embedding,_=self._get_input_embedding([network_weights['embmap'],network_weights['embmap_biases']],network_weights['embmap'])
else:
input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'])
print input_embedding.shape
# image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(self.batch_size,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(input_embedding, state)
print state,output.shape
if form4:
previous_word,_=input_embedding,None
elif form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor)
print previous_word.shape
# previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(self.network_architecture['maxlen']):
tf.get_variable_scope().reuse_variables()
print i
out, state = self.lstm(previous_word, state)
# get a one-hot word encoding from the output of the LSTM
logit=tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not form2:
best_word = tf.argmax(logit, 1)
else:
best_word=tf.nn.sigmoid(logit)
best_word=tf.round(best_word)
# with tf.device("/cpu:0"):
# # get the embedding of the best_word to use as input to the next iteration of our LSTM
# previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
# previous_word += self.embedding_bias
print logit.shape
if form4:
previous_word,_=input_embedding,None
elif form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word)
print previous_word.shape
all_words.append(best_word)
self.generated_words=all_words
def generate(self, _map, x):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
# """
# if z_mu is None:
# z_mu = np.random.normal(size=self.network_architecture["n_z"])
# # Note: This maps to mean of distribution, we could alternatively
# # sample from Gaussian distribution
# return self.sess.run(self.x_reconstr_mean,
# feed_dict={self.z: z_mu})
# saver = tf.train.Saver()
# saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
generated_word_index,f_it= self.sess.run([self.generated_words,all_the_f_one_h], feed_dict={self.x:x})
print f_it
print generated_word_index
if form2:
generated_word_index=np.array(bin_to_int(generated_word_index))
generated_word_index=np.rollaxis(generated_word_index,1)
else:
generated_word_index=np.array(generated_word_index)
return generated_word_index
# generated_sentence = ixtoword(_map,generated_word_index)
# return generated_sentence
def ixtoword(_map,ixs):
return [[_map[x] for x in y] for y in ixs]
def bin_to_int(a):
return [(x*(2** np.arange(x.shape[-1] ))).sum(axis=-1).astype(np.uint32) for x in a]
def train(network_architecture, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=2,gen=False,ctrain=False,test=False):
global_step=tf.Variable(0,trainable=False)
total_batch = int(n_samples / batch_size)
if should_decay and not gen:
learning_rate = tf.train.exponential_decay(learning_rate, global_step,
total_batch, 0.95, staircase=True)
vae = VariationalAutoencoder(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size,generative=gen,ctrain=ctrain,test=test,global_step=global_step)
# Training cycle
# if test:
# maxlen=network_architecture['maxlen']
# return tf.test.compute_gradient_error([vae.x,vae.caption_placeholder,vae.mask],[np.array([batch_size,n_input]),np.array([batch_size,maxlen,n_input]),np.array([batch_size,maxlen])],vae.loss,[])
if gen:
return vae
costs=[]
indlist=np.arange(all_samps).astype(int)
# indlist=np.arange(10*batch_size).astype(int)
for epoch in range(training_epochs):
avg_cost = 0.
# Loop over all batches
np.random.shuffle(indlist)
testify=False
avg_loss=0
# for i in range(1):
for i in range(total_batch):
# break
ts=i
# i=0
inds=np.random.choice(indlist,batch_size)
# print indlist[i*batch_size:(i+1)*batch_size]
# batch_xs = X[indlist[i*batch_size:(i+1)*batch_size]]
batch_xs = X[inds]
# Fit training using batch data
# if epoch==2 and i ==0:
# testify=True
# cost,loss = vae.partial_fit(batch_xs,y[indlist[i*batch_size:(i+1)*batch_size]].astype(np.uint32),mask[indlist[i*batch_size:(i+1)*batch_size]],timestep=epoch*total_batch+ts,testify=testify)
cost,loss = vae.partial_fit(batch_xs,y[inds].astype(np.uint32),mask[inds],timestep=(epoch)+1,testify=testify)
# Compute average loss
avg_cost = avg_cost * i /(i+1) +cost/(i+1)
# avg_loss=avg_loss*i/(i+1)+loss/(i+1)
if i% display_step==0:
print avg_cost,loss,cost
if epoch == 0 and ts==0:
costs.append(avg_cost)
costs.append(avg_cost)
# Display logs per epoch step
if epoch % (display_step*10) == 0 or epoch==1:
if should_save:
print 'saving'
vae.saver.save(vae.sess, os.path.join(model_path,'model'))
pkl.dump(costs,open(loss_output_path,'wb'))
print("Epoch:", '%04d' % (epoch+1),
"cost=", avg_cost)
return vae
if __name__ == "__main__":
import sys
form2=True
vanilla=True
if sys.argv[1]!='vanilla':
vanilla=False
mid_vae=False
form3= True
form4=False
vanilla=True
if sys.argv[2]=='mid_vae':
mid_vae=True
print 'mid_vae'
same_embedding=False
clip_grad=True
if sys.argv[3]!='clip':
clip_grad=False
should_save=True
should_train=True
# should_train=not should_train
should_continue=False
# should_continue=True
should_decay=True
zero_end_tok=True
training_epochs=int(sys.argv[13])
batch_size=int(sys.argv[4])
onehot=False
embeddings_trainable=False
if sys.argv[5]!='transfer':
print 'true embs'
embeddings_trainable=True
transfertype2=True
binary_dim=int(sys.argv[6])
all_the_f_one_h=[]
if not zero_end_tok:
X, y, mask, _map = load_text(2**binary_dim-4)
else:
X, y, mask, _map = load_text(2**binary_dim-3)
n_input =binary_dim
n_samples = 30000
lstm_dim=int(sys.argv[7])
model_path = sys.argv[8]
vartype=''
transfertype=''
maxlen=int(sys.argv[9])+2
n_z=int(sys.argv[10])
n_z_m=int(sys.argv[11])
n_z_m_2=int(sys.argv[12])
if not vanilla:
vartype='var'
if not embeddings_trainable:
transfertype='transfer'
cliptype=''
if clip_grad:
cliptype='clip'
use_ctc=False
losstype=''
if sys.argv[14]=='ctc_loss':
use_ctc=True
losstype='ctc'
lstm_stack=int(sys.argv[15])
use_bdlstm=False
bdlstmtype=''
if sys.argv[16]!='forward':
use_bdlstm=True
bdlstmtype='bdlstm'
loss_output_path= 'losses/%s%ss_%sb_%sl_%sh_%sd_%sz_%szm_%s%s%sdefdef%s4.pkl'%(bdlstmtype,str(lstm_stack),str(batch_size),str(maxlen-2),str(lstm_dim),str(n_input),str(n_z),str(n_z_m),str(losstype),str(cliptype),str(vartype),str(transfertype))
all_samps=len(X)
n_samples=all_samps
# X, y = X[:n_samples, :], y[:n_samples, :]
network_architecture = \
dict(maxlen=maxlen, # 2nd layer decoder neurons
n_input=n_input, # One hot encoding input
n_lstm_input=lstm_dim, # LSTM cell size
n_z=n_z, # dimensionality of latent space
n_z_m=n_z_m,
n_z_m_2=n_z_m_2
)
# batch_size=1
if should_train:
# vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue)
# print train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,test=True)
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,learning_rate=.005)
else:
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=True,ctrain=True)
# # vae_2d._build_gen()
ind_list=np.arange(len(X)).astype(int)
# np.random.shuffle(ind_list)
x_sample = X[ind_list[:batch_size]]
print x_sample
y_sample = y[ind_list[:batch_size]]
print y_sample
y_hat = vae_2d.generate(_map,x_sample)
y_hat=y_hat[:10]
# print y_hat
y_hat_words=ixtoword(_map,y_hat)
print y_hat_words
if form2:
y_words=ixtoword(_map,np.array(bin_to_int(y_sample[:10])))
else:
y_words=ixtoword(_map,y_sample)
print(y_hat)
print(y_hat_words)
print(y_words)
print(ixtoword(_map,bin_to_int(np.expand_dims(x_sample[:10],axis=0))))
# # plt.figure(figsize=(8, 6))
# plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
# plt.colorbar()
# plt.grid()
# plt.show()
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# This utility assumes you already have local a Spark git folder and that you
# have added remotes corresponding to both (i) the github apache Spark
# mirror and (ii) the apache git repo.
import json
import os
import re
import subprocess
import sys
import urllib2
try:
import jira.client
JIRA_IMPORTED = True
except ImportError:
JIRA_IMPORTED = False
# Location of your Spark git development area
SPARK_HOME = os.environ.get("SPARK_CSV_HOME", os.getcwd())
# Remote name which points to the Gihub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "origin")
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "origin")
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "")
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_BASE = "https://github.com/databricks/spark-csv/pull"
GITHUB_API_BASE = "https://api.github.com/repos/databricks/spark-csv"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
print "Exceeded the GitHub API rate limit; see the instructions in " + \
"dev/merge_spark_pr.py to configure an OAuth token for making authenticated " + \
"GitHub requests."
else:
print "Unable to fetch URL, exiting: %s" % url
sys.exit(-1)
def fail(msg):
print msg
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print cmd
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
print "Restoring head pointer to %s" % original_head
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print "Deleting local branch %s" % branch
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = raw_input(
"Enter primary author in the format of \"name <email>\" [%s]: " %
distinct_authors[0])
if primary_author == "":
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of Spark.
merge_message_flags += ["-m", body.replace("@", "")]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += ["-m", "Closes #%s from %s." % (pr_num, pr_repo_desc)]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
try:
run_cmd("git cherry-pick -sx %s" % merge_hash)
except Exception as e:
msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?"
continue_maybe(msg)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
def resolve_jira_issue(merge_branches, comment, default_jira_id=""):
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id)
if jira_id == "":
jira_id = default_jira_id
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
cur_status = issue.fields.status.name
cur_summary = issue.fields.summary
cur_assignee = issue.fields.assignee
if cur_assignee is None:
cur_assignee = "NOT ASSIGNED!!!"
else:
cur_assignee = cur_assignee.displayName
if cur_status == "Resolved" or cur_status == "Closed":
fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status))
print ("=== JIRA %s ===" % jira_id)
print ("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" % (
cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id))
versions = asf_jira.project_versions("SPARK")
versions = sorted(versions, key=lambda x: x.name, reverse=True)
versions = filter(lambda x: x.raw['released'] is False, versions)
# Consider only x.y.z versions
versions = filter(lambda x: re.match('\d+\.\d+\.\d+', x.name), versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches)
for v in default_fix_versions:
# Handles the case where we have forked a release branch but not yet made the release.
# In this case, if the PR is committed to the master branch and the release branch, we
# only consider the release branch to be the fix version. E.g. it is not valid to have
# both 1.1.0 and 1.0.0 as fix versions.
(major, minor, patch) = v.split(".")
if patch == "0":
previous = "%s.%s.%s" % (major, int(minor) - 1, 0)
if previous in default_fix_versions:
default_fix_versions = filter(lambda x: x != v, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
resolution = filter(lambda r: r.raw['name'] == "Fixed", asf_jira.resolutions())[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions = jira_fix_versions,
comment = comment, resolution = {'id': resolution.raw['id']})
print "Successfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions)
def resolve_jira_issues(title, merge_branches, comment):
jira_ids = re.findall("SPARK-[0-9]{4,5}", title)
if len(jira_ids) == 0:
resolve_jira_issue(merge_branches, comment)
for jira_id in jira_ids:
resolve_jira_issue(merge_branches, comment, jira_id)
def standardize_jira_ref(text):
"""
Standardize the [SPARK-XXXXX] [MODULE] prefix
Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to "[SPARK-XXX] [MLLIB] Issue"
>>> standardize_jira_ref("[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful")
'[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful'
>>> standardize_jira_ref("[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests")
'[SPARK-4123] [PROJECT INFRA] [WIP] Show new dependencies added in pull requests'
>>> standardize_jira_ref("[MLlib] Spark 5954: Top by key")
'[SPARK-5954] [MLLIB] Top by key'
>>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl")
'[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl'
>>> standardize_jira_ref("SPARK-1094 Support MiMa for reporting binary compatibility accross versions.")
'[SPARK-1094] Support MiMa for reporting binary compatibility accross versions.'
>>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark")
'[SPARK-1146] [WIP] Vagrant support for Spark'
>>> standardize_jira_ref("SPARK-1032. If Yarn app fails before registering, app master stays aroun...")
'[SPARK-1032] If Yarn app fails before registering, app master stays aroun...'
>>> standardize_jira_ref("[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.")
'[SPARK-6250] [SPARK-6146] [SPARK-5911] [SQL] Types are now reserved words in DDL parser.'
>>> standardize_jira_ref("Additional information for users building from source code")
'Additional information for users building from source code'
"""
jira_refs = []
components = []
# If the string is compliant, no need to process any further
if (re.search(r'^\[SPARK-[0-9]{3,6}\] (\[[A-Z0-9_\s,]+\] )+\S+', text)):
return text
# Extract JIRA ref(s):
pattern = re.compile(r'(SPARK[-\s]*[0-9]{3,6})+', re.IGNORECASE)
for ref in pattern.findall(text):
# Add brackets, replace spaces with a dash, & convert to uppercase
jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']')
text = text.replace(ref, '')
# Extract spark component(s):
# Look for alphanumeric chars, spaces, dashes, periods, and/or commas
pattern = re.compile(r'(\[[\w\s,-\.]+\])', re.IGNORECASE)
for component in pattern.findall(text):
components.append(component.upper())
text = text.replace(component, '')
# Cleanup any remaining symbols:
pattern = re.compile(r'^\W+(.*)', re.IGNORECASE)
if (pattern.search(text) is not None):
text = pattern.search(text).groups()[0]
# Assemble full text (JIRA ref(s), module(s), remaining text)
clean_text = ' '.join(jira_refs).strip() + " " + ' '.join(components).strip() + " " + text.strip()
# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were included
clean_text = re.sub(r'\s+', ' ', clean_text.strip())
return clean_text
def main():
global original_head
os.chdir(SPARK_HOME)
original_head = run_cmd("git rev-parse HEAD")[:8]
branches = get_json("%s/branches" % GITHUB_API_BASE)
# branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
latest_branch = "master" # sorted(branch_names, reverse=True)[0]
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
# Decide whether to use the modified title or not
modified_title = standardize_jira_ref(pr["title"])
if modified_title != pr["title"]:
print "I've re-written the title as follows to match the standard format:"
print "Original: %s" % pr["title"]
print "Modified: %s" % modified_title
result = raw_input("Would you like to use the modified title? (y/n): ")
if result.lower() == "y":
title = modified_title
print "Using modified title:"
else:
title = pr["title"]
print "Using original title:"
print title
else:
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
# Merged pull requests don't appear as merged in the GitHub API;
# Instead, they're closed by asfgit.
merge_commits = \
[e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"]
if merge_commits:
merge_hash = merge_commits[0]["commit_id"]
message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"]
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify',
"%s^{commit}" % merge_hash]).strip() != ""
if not commit_is_downloaded:
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
print "Found commit %s:\n%s" % (merge_hash, message)
cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref, title, body, pr_repo_desc)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
if JIRA_IMPORTED:
if JIRA_USERNAME and JIRA_PASSWORD:
continue_maybe("Would you like to update an associated JIRA?")
jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % (pr_num, GITHUB_BASE, pr_num)
resolve_jira_issues(title, merged_refs, jira_comment)
else:
print "JIRA_USERNAME and JIRA_PASSWORD not set"
print "Exiting without trying to close the associated JIRA."
else:
print "Could not find jira-python library. Run 'sudo pip install jira' to install."
print "Exiting without trying to close the associated JIRA."
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparse(self):
self.doTestSparse(use_resource=False)
def testResourceSparse(self):
self.doTestSparse(use_resource=True)
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = adam.AdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.AdamOptimizer().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.AdamOptimizer().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def doTestBasic(self, use_resource=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.test_session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
opt_variables = opt.variables()
self.assertIn(opt._beta1_power, opt_variables)
self.assertIn(opt._beta2_power, opt_variables)
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
if context.in_graph_mode():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
if context.in_graph_mode():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta2_power))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasic(self):
with self.test_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTwoSessions(self):
optimizer = adam.AdamOptimizer()
g = ops.Graph()
with g.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
if __name__ == "__main__":
test.main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.neutron import neutron
from heat.engine.resources.openstack.neutron import subnet
from heat.engine import support
LOG = logging.getLogger(__name__)
class Port(neutron.NeutronResource):
PROPERTIES = (
NAME, NETWORK_ID, NETWORK, FIXED_IPS, SECURITY_GROUPS,
REPLACEMENT_POLICY, DEVICE_ID, DEVICE_OWNER
) = (
'name', 'network_id', 'network', 'fixed_ips', 'security_groups',
'replacement_policy', 'device_id', 'device_owner'
)
EXTRA_PROPERTIES = (
VALUE_SPECS, ADMIN_STATE_UP, MAC_ADDRESS,
ALLOWED_ADDRESS_PAIRS, VNIC_TYPE, QOS_POLICY,
PORT_SECURITY_ENABLED,
) = (
'value_specs', 'admin_state_up', 'mac_address',
'allowed_address_pairs', 'binding:vnic_type', 'qos_policy',
'port_security_enabled',
)
_FIXED_IP_KEYS = (
FIXED_IP_SUBNET_ID, FIXED_IP_SUBNET, FIXED_IP_IP_ADDRESS,
) = (
'subnet_id', 'subnet', 'ip_address',
)
_ALLOWED_ADDRESS_PAIR_KEYS = (
ALLOWED_ADDRESS_PAIR_MAC_ADDRESS, ALLOWED_ADDRESS_PAIR_IP_ADDRESS,
) = (
'mac_address', 'ip_address',
)
ATTRIBUTES = (
ADMIN_STATE_UP_ATTR, DEVICE_ID_ATTR, DEVICE_OWNER_ATTR, FIXED_IPS_ATTR,
MAC_ADDRESS_ATTR, NAME_ATTR, NETWORK_ID_ATTR, SECURITY_GROUPS_ATTR,
STATUS, TENANT_ID, ALLOWED_ADDRESS_PAIRS_ATTR, SUBNETS_ATTR,
PORT_SECURITY_ENABLED_ATTR, QOS_POLICY_ATTR,
) = (
'admin_state_up', 'device_id', 'device_owner', 'fixed_ips',
'mac_address', 'name', 'network_id', 'security_groups',
'status', 'tenant_id', 'allowed_address_pairs', 'subnets',
'port_security_enabled', 'qos_policy_id',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('A symbolic name for this port.'),
update_allowed=True
),
NETWORK_ID: properties.Schema(
properties.Schema.STRING,
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('Use property %s.') % NETWORK,
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2'
)
),
constraints=[
constraints.CustomConstraint('neutron.network')
],
),
NETWORK: properties.Schema(
properties.Schema.STRING,
_('Network this port belongs to. If you plan to use current port '
'to assign Floating IP, you should specify %(fixed_ips)s '
'with %(subnet)s. Note if this changes to a different network '
'update, the port will be replaced') %
{'fixed_ips': FIXED_IPS, 'subnet': FIXED_IP_SUBNET},
support_status=support.SupportStatus(version='2014.2'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.network')
],
update_allowed=True,
),
DEVICE_ID: properties.Schema(
properties.Schema.STRING,
_('Device ID of this port.'),
update_allowed=True
),
DEVICE_OWNER: properties.Schema(
properties.Schema.STRING,
_('Name of the network owning the port. '
'The value is typically network:floatingip '
'or network:router_interface or network:dhcp'),
update_allowed=True
),
FIXED_IPS: properties.Schema(
properties.Schema.LIST,
_('Desired IPs for this port.'),
default=[],
schema=properties.Schema(
properties.Schema.MAP,
schema={
FIXED_IP_SUBNET_ID: properties.Schema(
properties.Schema.STRING,
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('Use property %s.') % FIXED_IP_SUBNET,
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2 '
)
),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
FIXED_IP_SUBNET: properties.Schema(
properties.Schema.STRING,
_('Subnet in which to allocate the IP address for '
'this port.'),
support_status=support.SupportStatus(version='2014.2'),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
),
FIXED_IP_IP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address desired in the subnet for this port.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
},
),
update_allowed=True
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.LIST,
_('Security group IDs to associate with this port.'),
update_allowed=True
),
REPLACEMENT_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to respond to a stack-update for this resource. '
'REPLACE_ALWAYS will replace the port regardless of any '
'property changes. AUTO will update the existing port for any '
'changed update-allowed property.'),
default='AUTO',
constraints=[
constraints.AllowedValues(['REPLACE_ALWAYS', 'AUTO']),
],
update_allowed=True
),
}
# NOTE(prazumovsky): properties_schema has been separated because some
# properties used in server for creating internal port.
extra_properties_schema = {
VALUE_SPECS: properties.Schema(
properties.Schema.MAP,
_('Extra parameters to include in the "port" object in the '
'creation request.'),
default={}
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of this port.'),
default=True,
update_allowed=True
),
MAC_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('MAC address to give to this port.'),
constraints=[
constraints.CustomConstraint('mac_addr')
]
),
ALLOWED_ADDRESS_PAIRS: properties.Schema(
properties.Schema.LIST,
_('Additional MAC/IP address pairs allowed to pass through the '
'port.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
ALLOWED_ADDRESS_PAIR_MAC_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('MAC address to allow through this port.'),
constraints=[
constraints.CustomConstraint('mac_addr')
]
),
ALLOWED_ADDRESS_PAIR_IP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address to allow through this port.'),
required=True,
constraints=[
constraints.CustomConstraint('net_cidr')
]
),
},
)
),
VNIC_TYPE: properties.Schema(
properties.Schema.STRING,
_('The vnic type to be bound on the neutron port. '
'To support SR-IOV PCI passthrough networking, you can request '
'that the neutron port to be realized as normal (virtual nic), '
'direct (pci passthrough), or macvtap '
'(virtual interface with a tap-like software interface). Note'
' that this only works for Neutron deployments that support '
'the bindings extension.'),
constraints=[
constraints.AllowedValues(['normal', 'direct', 'macvtap']),
],
support_status=support.SupportStatus(version='2015.1'),
update_allowed=True
),
PORT_SECURITY_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Flag to enable/disable port security on the port. '
'When disable this feature(set it to False), there will be no '
'packages filtering, like security-group and address-pairs.'),
update_allowed=True,
support_status=support.SupportStatus(version='5.0.0')
),
QOS_POLICY: properties.Schema(
properties.Schema.STRING,
_('The name or ID of QoS policy to attach to this port.'),
constraints=[
constraints.CustomConstraint('neutron.qos_policy')
],
update_allowed=True,
support_status=support.SupportStatus(version='6.0.0')
),
}
attributes_schema = {
ADMIN_STATE_UP_ATTR: attributes.Schema(
_("The administrative state of this port."),
type=attributes.Schema.STRING
),
DEVICE_ID_ATTR: attributes.Schema(
_("Unique identifier for the device."),
type=attributes.Schema.STRING
),
DEVICE_OWNER: attributes.Schema(
_("Name of the network owning the port."),
type=attributes.Schema.STRING
),
FIXED_IPS_ATTR: attributes.Schema(
_("Fixed IP addresses."),
type=attributes.Schema.LIST
),
MAC_ADDRESS_ATTR: attributes.Schema(
_("MAC address of the port."),
type=attributes.Schema.STRING
),
NAME_ATTR: attributes.Schema(
_("Friendly name of the port."),
type=attributes.Schema.STRING
),
NETWORK_ID_ATTR: attributes.Schema(
_("Unique identifier for the network owning the port."),
type=attributes.Schema.STRING
),
SECURITY_GROUPS_ATTR: attributes.Schema(
_("A list of security groups for the port."),
type=attributes.Schema.LIST
),
STATUS: attributes.Schema(
_("The status of the port."),
type=attributes.Schema.STRING
),
TENANT_ID: attributes.Schema(
_("Tenant owning the port."),
type=attributes.Schema.STRING
),
ALLOWED_ADDRESS_PAIRS_ATTR: attributes.Schema(
_("Additional MAC/IP address pairs allowed to pass through "
"a port."),
type=attributes.Schema.LIST
),
SUBNETS_ATTR: attributes.Schema(
_("A list of all subnet attributes for the port."),
type=attributes.Schema.LIST
),
PORT_SECURITY_ENABLED_ATTR: attributes.Schema(
_("Port security enabled of the port."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.BOOLEAN
),
QOS_POLICY_ATTR: attributes.Schema(
_("The QoS policy ID attached to this port."),
type=attributes.Schema.STRING,
support_status=support.SupportStatus(version='6.0.0'),
),
}
# The network property can be updated, but only to switch between
# a name and ID for the same network, which is handled in _needs_update
update_exclude_properties = [NETWORK]
def __init__(self, name, definition, stack):
"""Overloaded init in case of merging two schemas to one."""
self.properties_schema.update(self.extra_properties_schema)
super(Port, self).__init__(name, definition, stack)
def translation_rules(self, props):
return [
properties.TranslationRule(
props,
properties.TranslationRule.REPLACE,
[self.NETWORK],
value_path=[self.NETWORK_ID]
),
properties.TranslationRule(
props,
properties.TranslationRule.REPLACE,
[self.FIXED_IPS, self.FIXED_IP_SUBNET],
value_name=self.FIXED_IP_SUBNET_ID
)
]
def add_dependencies(self, deps):
super(Port, self).add_dependencies(deps)
# Depend on any Subnet in this template with the same
# network_id as this network_id.
# It is not known which subnet a port might be assigned
# to so all subnets in a network should be created before
# the ports in that network.
for res in six.itervalues(self.stack):
if res.has_interface('OS::Neutron::Subnet'):
dep_network = res.properties.get(subnet.Subnet.NETWORK)
network = self.properties[self.NETWORK]
if dep_network == network:
deps += (self, res)
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
self.client_plugin().resolve_network(props, self.NETWORK, 'network_id')
self._prepare_port_properties(props)
qos_policy = props.pop(self.QOS_POLICY, None)
if qos_policy:
props['qos_policy_id'] = self.client_plugin().get_qos_policy_id(
qos_policy)
port = self.client().create_port({'port': props})['port']
self.resource_id_set(port['id'])
def _prepare_port_properties(self, props, prepare_for_update=False):
for fixed_ip in props.get(self.FIXED_IPS, []):
for key, value in list(fixed_ip.items()):
if value is None:
fixed_ip.pop(key)
if fixed_ip.get(self.FIXED_IP_SUBNET):
self.client_plugin().resolve_subnet(
fixed_ip, self.FIXED_IP_SUBNET, 'subnet_id')
# delete empty MAC addresses so that Neutron validation code
# wouldn't fail as it not accepts Nones
for pair in props.get(self.ALLOWED_ADDRESS_PAIRS, []):
if (self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS in pair and
pair[self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS] is None):
del pair[self.ALLOWED_ADDRESS_PAIR_MAC_ADDRESS]
# if without 'security_groups', don't set the 'security_groups'
# property when creating, neutron will create the port with the
# 'default' securityGroup. If has the 'security_groups' and the
# value is [], which means to create the port without securityGroup.
if props.get(self.SECURITY_GROUPS) is not None:
props[self.SECURITY_GROUPS] = self.client_plugin(
).get_secgroup_uuids(props.get(self.SECURITY_GROUPS))
else:
# And the update should has the same behavior.
if prepare_for_update:
props[self.SECURITY_GROUPS] = self.client_plugin(
).get_secgroup_uuids(['default'])
if not props[self.FIXED_IPS]:
del(props[self.FIXED_IPS])
del(props[self.REPLACEMENT_POLICY])
def _show_resource(self):
return self.client().show_port(
self.resource_id)['port']
def check_create_complete(self, *args):
attributes = self._show_resource()
return self.is_built(attributes)
def handle_delete(self):
try:
self.client().delete_port(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def _resolve_attribute(self, name):
if name == self.SUBNETS_ATTR:
subnets = []
try:
fixed_ips = self._show_resource().get('fixed_ips', [])
for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get('subnet_id')
if subnet_id:
subnets.append(self.client().show_subnet(
subnet_id)['subnet'])
except Exception as ex:
LOG.warn(_LW("Failed to fetch resource attributes: %s"), ex)
return
return subnets
return super(Port, self)._resolve_attribute(name)
def _needs_update(self, after, before, after_props, before_props,
prev_resource, check_init_complete=True):
if after_props.get(self.REPLACEMENT_POLICY) == 'REPLACE_ALWAYS':
raise exception.UpdateReplace(self.name)
# Switching between name and ID is OK, provided the value resolves
# to the same network. If the network changes, the port is replaced.
before_net = before_props.get(self.NETWORK)
after_net = after_props.get(self.NETWORK)
if None not in (before_net, after_net):
before_id = self.client_plugin().find_resourceid_by_name_or_id(
'network', before_net)
after_id = self.client_plugin().find_resourceid_by_name_or_id(
'network', after_net)
if before_id != after_id:
raise exception.UpdateReplace(self.name)
return super(Port, self)._needs_update(
after, before, after_props, before_props, prev_resource,
check_init_complete)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
props = self.prepare_update_properties(json_snippet)
self._prepare_port_properties(props, prepare_for_update=True)
qos_policy = props.pop(self.QOS_POLICY, None)
if self.QOS_POLICY in prop_diff:
props['qos_policy_id'] = self.client_plugin().get_qos_policy_id(
qos_policy) if qos_policy else None
LOG.debug('updating port with %s' % props)
self.client().update_port(self.resource_id, {'port': props})
def check_update_complete(self, *args):
attributes = self._show_resource()
return self.is_built(attributes)
def prepare_for_replace(self):
# if the port has not been created yet, return directly
if self.resource_id is None:
return
# store port fixed_ips for restoring after failed update
fixed_ips = self._show_resource().get('fixed_ips', [])
self.data_set('port_fip', jsonutils.dumps(fixed_ips))
# reset fixed_ips for this port by setting fixed_ips to []
props = {'fixed_ips': []}
self.client().update_port(self.resource_id, {'port': props})
def restore_prev_rsrc(self, convergence=False):
# In case of convergence, during rollback, the previous rsrc is
# already selected and is being acted upon.
prev_port = self if convergence else \
self.stack._backup_stack().resources.get(self.name)
fixed_ips = prev_port.data().get('port_fip', [])
props = {'fixed_ips': []}
if convergence:
existing_port, rsrc_owning_stack, stack = resource.Resource.load(
prev_port.context, prev_port.replaced_by, True,
prev_port.stack.cache_data
)
existing_port_id = existing_port.resource_id
else:
existing_port_id = self.resource_id
if existing_port_id:
# reset fixed_ips to [] for new resource
self.client().update_port(existing_port_id, {'port': props})
if fixed_ips and prev_port.resource_id:
# restore ip for old port
prev_port_props = {'fixed_ips': jsonutils.loads(fixed_ips)}
self.client().update_port(prev_port.resource_id,
{'port': prev_port_props})
def resource_mapping():
return {
'OS::Neutron::Port': Port,
}
|
|
""" Methods related to fussing with a catalog"""
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.cosmology import Planck15 as cosmo
from astropy.table import Table
from astropy import units
from frb.galaxies.defs import valid_filters
import warnings
from IPython import embed
def clean_heasarc(catalog):
"""
Insure RA/DEC are ra/dec in the Table
Table is modified in place
Args:
catalog (astropy.table.Table): Catalog generated by astroquery
"""
# RA/DEC
catalog.rename_column("RA", "ra")
catalog.rename_column("DEC", "dec")
for key in ['ra', 'dec']:
catalog[key].unit = units.deg
def clean_cat(catalog, pdict, fill_mask=None):
"""
Convert table column names intrinsic to the slurped
catalog with the FRB survey desired values
Args:
catalog (astropy.table.Table): Catalog generated by astroquery
pdict (dict): Defines the original key and desired key
fill_mask (int or float, optional): Fill masked items with this value
Returns:
astropy.table.Table: modified catalog
"""
for key,value in pdict.items():
if value in catalog.keys():
catalog.rename_column(value, key)
# Mask
if fill_mask is not None:
if catalog.mask is not None:
catalog = catalog.filled(fill_mask)
return catalog
def sort_by_separation(catalog, coord, radec=('ra','dec'), add_sep=True):
"""
Sort an input catalog by separation from input coordinate
Args:
catalog (astropy.table.Table): Table of sources
coord (astropy.coordinates.SkyCoord): Reference coordinate for sorting
radec (tuple): Defines catalog columns holding RA, DEC (in deg)
add_sep (bool, optional): Add a 'separation' column with units of arcmin
Returns:
astropy.table.Table: Sorted catalog
"""
# Check
for key in radec:
if key not in catalog.keys():
print("RA/DEC key: {:s} not in your Table".format(key))
raise IOError("Try again..")
# Grab coords
cat_coords = SkyCoord(ra=catalog[radec[0]].data,
dec=catalog[radec[1]].data, unit='deg')
# Separations
seps = coord.separation(cat_coords)
isrt = np.argsort(seps)
# Add?
if add_sep:
catalog['separation'] = seps.to('arcmin')
# Sort
srt_catalog = catalog[isrt]
# Return
return srt_catalog
def match_ids(IDs, match_IDs, require_in_match=True):
""" Match input IDs to another array of IDs (usually in a table)
Return the rows aligned with input IDs
Args:
IDs (ndarray): ID values to match
match_IDs (ndarray): ID values to match to
require_in_match (bool, optional): Require that each of the
input IDs occurs within the match_IDs
Returns:
ndarray: Rows in match_IDs that match to IDs, aligned -1 if there is no match
"""
rows = -1 * np.ones_like(IDs).astype(int)
# Find which IDs are in match_IDs
in_match = np.in1d(IDs, match_IDs)
if require_in_match:
if np.sum(~in_match) > 0:
raise IOError("qcat.match_ids: One or more input IDs not in match_IDs")
rows[~in_match] = -1
#
IDs_inmatch = IDs[in_match]
# Find indices of input IDs in meta table -- first instance in meta only!
xsorted = np.argsort(match_IDs)
ypos = np.searchsorted(match_IDs, IDs_inmatch, sorter=xsorted)
indices = xsorted[ypos]
rows[in_match] = indices
return rows
def summarize_catalog(frbc, catalog, summary_radius, photom_column, magnitude):
"""
Generate simple text describing the sources from
an input catalog within a given radius
Args:
frbc: FRB Candidate object
catalog (astropy.table.Table): Catalog table
summary_radius (Angle): Radius to summarize on
photom_column (str): Column specifying which flux to work on
magnitude (bool): Is the flux a magnitude?
Returns:
list: List of comments on the catalog
"""
# Init
summary_list = []
coords = SkyCoord(ra=catalog['ra'], dec=catalog['dec'], unit='deg')
# Find all within the summary radius
seps = frbc['coord'].separation(coords)
in_radius = seps < summary_radius
# Start summarizing
summary_list += ['{:s}: There are {:d} source(s) within {:0.1f} arcsec'.format(
catalog.meta['survey'], np.sum(in_radius), summary_radius.to('arcsec').value)]
# If any found
if np.any(in_radius):
# Brightest
if magnitude:
brightest = np.argmin(catalog[photom_column][in_radius])
else:
brightest = np.argmax(catalog[photom_column][in_radius])
summary_list += ['{:s}: The brightest source has {:s} of {:0.2f}'.format(
catalog.meta['survey'], photom_column,
catalog[photom_column][in_radius][brightest])]
# Closest
closest = np.argmin(seps[in_radius])
summary_list += ['{:s}: The closest source is at separation {:0.2f} arcsec and has {:s} of {:0.2f}'.format(
catalog.meta['survey'],
seps[in_radius][closest].to('arcsec').value,
photom_column, catalog[photom_column][in_radius][brightest])]
# Return
return summary_list
def xmatch_catalogs(cat1, cat2, skydist = 5*units.arcsec,
RACol1 = "ra", DecCol1 = "dec",
RACol2 = "ra", DecCol2 = "dec"):
"""
Cross matches two astronomical catalogs and returns
the matched tables.
Args:
cat1, cat2: astropy Tables
Two tables with sky coordinates to be
matched.
skydist: astropy Quantity, optional
Maximum separation for a valid match.
5 arcsec by default.
RACol1, RACol2: str, optional
Names of columns in cat1 and cat2
respectively that contain RA in degrees.
DecCol1, DecCol2: str, optional
Names of columns in cat1 and cat2
respectively that contain Dec in degrees.
zCol1, zCol2: str, optional
Names of columns in cat1 and cat2
respectively that contain redshift in degrees.
Matches in 3D if supplied. Both should be given.
returns:
match1, match2: astropy Table
Tables of matched rows from cat1 and cat2.
"""
# TODO add assertion statements to test input validity.
# Get corodinates
cat1_coord = SkyCoord(cat1[RACol1], cat1[DecCol1], unit = "deg")
cat2_coord = SkyCoord(cat2[RACol2], cat2[DecCol2], unit = "deg")
# Match 2D
idx, d2d, _ = cat1_coord.match_to_catalog_sky(cat2_coord)
# Get matched tables
match1 = cat1[d2d < skydist]
match2 = cat2[idx[d2d < skydist]]
return match1, match2
def _detect_mag_cols(photometry_table):
"""
Searches the column names of a
photometry table for columns with mags.
Args:
photometry_table: astropy Table
A table containing photometric
data from a catlog.
Returns:
mag_colnames: list
A list of column names with magnitudes
mag_err_colnames: list
A list of column names with errors
in the magnitudes.
"""
assert type(photometry_table)==Table, "Photometry table must be an astropy Table instance."
allcols = photometry_table.colnames
photom_cols = np.array(valid_filters)
photom_errcols = np.array([filt+"_err" for filt in photom_cols])
photom_cols = photom_cols[[elem in allcols for elem in photom_cols]]
photom_errcols = photom_errcols[[elem in allcols for elem in photom_errcols]]
return photom_cols.tolist(), photom_errcols.tolist()
def mag_from_flux(flux, flux_err=None):
"""
Get the AB magnitude from a flux
Parameters
----------
flux : Quantity
Flux
flux_err : Quantity
Error in flux (optional)
Returns
-------
mag, mag_err : float, float
AB magnitude and its error (if flux_err is given)
AB magnitude and `None` (if flux_err is `None`)
"""
# convert flux to Jansky
flux_Jy = flux.to('Jy').value
# get mag
mag_AB = -2.5*np.log10(flux_Jy) + 8.9
# get error
if flux_err is not None:
flux_Jy_err = flux_err.to('Jy').value
err_mag2 = (-2.5/np.log(10.) / flux_Jy)**2 * flux_Jy_err**2
err_mag = np.sqrt(err_mag2)
else:
err_mag = None
return mag_AB, err_mag
def _mags_to_flux(mag:float, zpt_flux:units.Quantity=3630.7805*units.Jy, mag_err:float=None)->float:
"""
Convert a magnitude to mJy
Args:
mag (column): magnitude
zpt_flux (Quantity, optional): Zero point flux for the magnitude.
Assumes AB mags by default (i.e. zpt_flux = 3630.7805 Jy).
mag_err (float, optional): uncertainty in magnitude
Returns:
flux (float): flux in mJy
flux_err (float): if mag_err is given, a corresponding
flux_err is returned.
"""
# Data validation
#assert np.isreal(mag), "Mags must be floats."
#assert (np.isreal(mag_err)) + (mag_err==None), "Mag errs must be floats"
assert (type(zpt_flux) == units.Quantity)*(zpt_flux.decompose().unit == units.kg/units.s**2), "zpt_flux units should be Jy or with dimensions kg/s^2."
flux = mag.copy()
# Conver fluxes
badmags = mag<-10
flux[badmags] = -99.
flux[~badmags] = zpt_flux.value*10**(-mag[~badmags]/2.5)
if mag_err is not None:
flux_err = mag_err.copy()
baderrs = mag_err < 0
flux_err[baderrs] = -99.
flux_err[~baderrs] = flux[~baderrs]*(10**(mag_err[~baderrs]/2.5)-1)
return flux, flux_err
else:
return flux
def convert_mags_to_flux(photometry_table, fluxunits='mJy'):
"""
Takes a table of photometric measurements
in mags and converts it to flux units.
Args:
photometry_table (astropy.table.Table):
A table containing photometric
data from a catlog.
fluxunits (str, optional):
Flux units to convert the magnitudes
to, as parsed by astropy.units. Default is mJy.
Returns:
fluxtable: astropy Table
`photometry_table` but the magnitudes
are converted to fluxes.
"""
fluxtable = photometry_table.copy()
mag_cols, mag_errcols = _detect_mag_cols(fluxtable)
convert = units.Jy.to(fluxunits)
#If there's a "W" in the column name, it's from WISE
# TODO -- We need to deal with this hack
wisecols = sorted([col for col in mag_cols if ("W" in col and 'WFC3' not in col)])
wise_errcols = sorted([col for col in mag_errcols if ("W" in col and 'WFC3' not in col)])
#Similarly define vista cols
vistacols = sorted([col for col in mag_cols if "VISTA" in col])
vista_errcols = sorted([col for col in mag_errcols if "VISTA" in col])
fnu0 = {'WISE_W1':309.54,
'WISE_W2':171.787,
'WISE_W3':31.674,
'WISE_W4':8.363,
'VISTA_Y':2087.32,
'VISTA_J':1554.03,
'VISTA_H':1030.40,
'VISTA_Ks':674.83} #http://wise2.ipac.caltech.edu/docs/release/allsky/expsup/sec4_4h.html#conv2flux
#http://svo2.cab.inta-csic.es/svo/theory/fps3/index.php?mode=browse&gname=Paranal&gname2=VISTA
for mag,err in zip(wisecols+vistacols,wise_errcols+vista_errcols):
flux, flux_err = _mags_to_flux(photometry_table[mag], fnu0[mag]*units.Jy, photometry_table[err])
badflux = flux == -99.
fluxtable[mag][badflux] = flux[badflux]
fluxtable[mag][~badflux] = flux[~badflux]*convert
#if flux != -99.:
# fluxtable[mag] = flux*convert
#else:
# fluxtable[mag] = flux
baderr = flux_err == -99.0
fluxtable[err][baderr] = flux_err[baderr]
fluxtable[err][~baderr] = flux_err[~baderr]*convert
#if flux_err != -99.:
# fluxtable[err] = flux_err*convert
#else:
# fluxtable[err] = flux_err
if "W" in mag and "WISE" not in mag and 'WFC3' not in mag:
fluxtable.rename_column(mag,mag.replace("W","WISE"))
fluxtable.rename_column(err,err.replace("W","WISE"))
#For all other photometry:
other_mags = np.setdiff1d(mag_cols, wisecols+vistacols)
other_errs = np.setdiff1d(mag_errcols, wise_errcols+vista_errcols)
for mag, err in zip(other_mags, other_errs):
flux, flux_err = _mags_to_flux(photometry_table[mag], mag_err = photometry_table[err])
badflux = flux == -99.
fluxtable[mag][badflux] = flux[badflux]
fluxtable[mag][~badflux] = flux[~badflux]*convert
#if flux != -99.:
# fluxtable[mag] = flux*convert
#else:
# fluxtable[mag] = flux
baderr = flux_err == -99.0
fluxtable[err][baderr] = flux_err[baderr]
fluxtable[err][~baderr] = flux_err[~baderr]*convert
# Upper limits -- Assume to have been recorded as 3 sigma
# Arbitrarily set the value to 1/3 of the error (could even set to 0)
uplimit = photometry_table[err] == 999.
fluxtable[err][uplimit] = fluxtable[mag][uplimit] / 3.
fluxtable[mag][uplimit] = fluxtable[mag][uplimit] / 9.
return fluxtable
'''
TODO: Write this function once CDS starts working again (through astroquery)
def xmatch_gaia(catalog,max_sep = 5*u.arcsec,racol='ra',deccol='dec'):
"""
Cross match against Gaia DR2
and return the cross matched table.
Args:
max_sep (Angle): maximum separation to be
considered a valid match.
Returns:
xmatch_tab (Table): a table with corss matched
entries.
"""
'''
|
|
from decimal import Decimal as D
import hmac
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db import transaction
from django.db.models import Sum
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from treebeard.mp_tree import MP_Node
from accounts import exceptions
class ActiveAccountManager(models.Manager):
def get_query_set(self):
now = timezone.now()
qs = super(ActiveAccountManager, self).get_query_set()
return qs.filter(
models.Q(start_date__lte=now) |
models.Q(start_date=None)).filter(
models.Q(end_date__gte=now) |
models.Q(end_date=None))
class ExpiredAccountManager(models.Manager):
def get_query_set(self):
now = timezone.now()
qs = super(ExpiredAccountManager, self).get_query_set()
return qs.filter(end_date__lt=now)
class AccountType(MP_Node):
code = models.CharField(max_length=128, unique=True, null=True, blank=True)
name = models.CharField(max_length=128)
class Meta:
abstract = True
def __unicode__(self):
return self.name
@property
def full_name(self):
names = [a.name for a in self.get_ancestors()]
names.append(self.name)
return " / ".join(names)
class Account(models.Model):
# Metadata
name = models.CharField(
max_length=128, unique=True, null=True, blank=True)
description = models.TextField(
null=True, blank=True, help_text=_(
"This text is shown to customers during checkout"))
account_type = models.ForeignKey(
'AccountType', related_name='accounts', null=True)
# Some accounts are not linked to a specific user but are activated by
# entering a code at checkout.
code = models.CharField(
max_length=128, unique=True, null=True, blank=True)
# Each account can have multiple users who can use it for transactions. In
# most cases, there will only be one user and so we use a 'primary'
# user FK for this scenario for simplicitiy.
#
# In other circumstances, there will be a group of users who can access the
# account - and so we use 'secondary' users for this purpose.
#
# As a rule of thumb, you don't normally need to use both primary_user and
# secondary_users within the same project - just one or the other.
primary_user = models.ForeignKey('auth.User', related_name="accounts",
null=True, blank=True,
on_delete=models.SET_NULL)
secondary_users = models.ManyToManyField('auth.User', blank=True)
# Track the status of a account - this is often used so that expired
# account can have their money transferred back to some parent account and
# then be closed.
OPEN, FROZEN, CLOSED = 'Open', 'Frozen', 'Closed'
status = models.CharField(max_length=32, default=OPEN)
# This is the limit to which the account can go into debt. The default is
# zero which means the account cannot run a negative balance. A 'source'
# account will have no credit limit meaning it can transfer funds to other
# accounts without limit.
credit_limit = models.DecimalField(decimal_places=2, max_digits=12,
default=D('0.00'), null=True,
blank=True)
# For performance, we keep a cached balance. This can always be
# recalculated from the account transactions.
balance = models.DecimalField(decimal_places=2, max_digits=12,
default=D('0.00'), null=True)
# Accounts can have an date range to indicate when they are 'active'. Note
# that these dates are ignored when creating a transfer. It is up to your
# client code to use them to enforce business logic.
start_date = models.DateTimeField(null=True, blank=True)
end_date = models.DateTimeField(null=True, blank=True)
# Accounts are sometimes restricted to only work on a specific range of
# products. This is the only link with Oscar.
product_range = models.ForeignKey('offer.Range', null=True, blank=True)
# Allow accounts to be restricted for products only (ie can't be used to
# pay for shipping)
can_be_used_for_non_products = models.BooleanField(
default=True,
help_text=("Whether this account can be used to pay for "
"shipping and other charges"))
date_created = models.DateTimeField(auto_now_add=True)
objects = models.Manager()
active = ActiveAccountManager()
expired = ExpiredAccountManager()
class Meta:
abstract = True
def __unicode__(self):
if self.code:
return self.code
if self.name:
return self.name
return 'Anonymous'
def is_active(self):
if self.start_date is None and self.end_date is None:
return True
now = timezone.now()
if self.start_date and self.end_date is None:
return now >= self.start_date
if self.start_date is None and self.end_date:
return now < self.end_date
return self.start_date <= now < self.end_date
def save(self, *args, **kwargs):
if self.code:
self.code = self.code.upper()
# Ensure the balance is always correct when saving
self.balance = self._balance()
return super(Account, self).save(*args, **kwargs)
def _balance(self):
aggregates = self.transactions.aggregate(sum=Sum('amount'))
sum = aggregates['sum']
return D('0.00') if sum is None else sum
def num_transactions(self):
return self.transactions.all().count()
@property
def has_credit_limit(self):
return self.credit_limit is not None
def is_debit_permitted(self, amount):
"""
Test if the a debit for the passed amount is permitted
"""
if self.amount_available is None:
return True
return amount <= self.amount_available
@property
def amount_available(self):
if self.credit_limit is None:
return None
return self.balance + self.credit_limit
def permitted_allocation(self, basket, shipping_total, order_total):
"""
Return max permitted allocation from this account to pay for the passed
basket
:basket: The basket being paid for
:shipping_total: The cost of shipping
:order_total: The order total (which includes the shipping total)
"""
if not self.can_be_used_for_non_products:
total = order_total - shipping_total
else:
total = order_total
if not self.product_range:
return min(total, self.balance)
range_total = D('0.00')
for line in basket.all_lines():
if self.product_range.contains_product(line.product):
range_total += line.line_price_incl_tax_and_discounts
if self.can_be_used_for_non_products:
range_total += shipping_total
return min(range_total, self.balance)
def is_open(self):
return self.status == self.__class__.OPEN
def is_closed(self):
return self.status == self.__class__.CLOSED
def is_frozen(self):
return self.status == self.__class__.FROZEN
@property
def is_editable(self):
"""
Test whether this account can be edite within the dashboard
"""
return self.code is not None
def can_be_authorised_by(self, user=None):
"""
Test whether the passed user can authorise a transfer from this account
"""
if user is None:
return True
if self.primary_user:
return user == self.primary_user
secondary_users = self.secondary_users.all()
if secondary_users.count() > 0:
return user in secondary_users
return True
def days_remaining(self, from_date=None):
if self.end_date is None:
return None
if from_date is None:
from_date = timezone.now()
if from_date > self.end_date:
return 0
return (self.end_date - from_date).days
def close(self):
# Only account with zero balance can be closed
if self.balance > 0:
raise exceptions.AccountNotEmpty()
self.status = self.__class__.CLOSED
self.save()
def as_dict(self):
data = {
'code': self.code,
'start_date': '',
'end_date': '',
'status': self.status,
'balance': "%.2f" % self.balance,
'redemptions_url': reverse('account-redemptions',
kwargs={'code': self.code}),
'refunds_url': reverse('account-refunds',
kwargs={'code': self.code})}
if self.start_date:
data['start_date'] = self.start_date.isoformat()
if self.end_date:
data['end_date'] = self.end_date.isoformat()
return data
class PostingManager(models.Manager):
"""
Custom manager to provide a new 'create' method to create a new transfer.
Apparently, finance people refer to "posting a transaction"; hence why this
"""
def create(self, source, destination, amount, parent=None,
user=None, merchant_reference=None, description=None):
# Write out transfer (which involves multiple writes). We use a
# database transaction to ensure that all get written out correctly.
self.verify_transfer(source, destination, amount, user)
with transaction.commit_on_success():
transfer = self.get_query_set().create(
source=source,
destination=destination,
amount=amount,
parent=parent,
user=user,
merchant_reference=merchant_reference,
description=description)
# Create transaction records for audit trail
transfer.transactions.create(
account=source, amount=-amount)
transfer.transactions.create(
account=destination, amount=amount)
# Update the cached balances on the accounts
source.save()
destination.save()
return self._wrap(transfer)
def _wrap(self, obj):
# Dumb method that is here only so that it can be mocked to test the
# transaction behaviour.
return obj
def verify_transfer(self, source, destination, amount, user=None):
"""
Test whether the proposed transaction is permitted. Raise an exception
if not.
"""
if amount <= 0:
raise exceptions.InvalidAmount("Debits must use a positive amount")
if not source.is_open():
raise exceptions.ClosedAccount("Source account has been closed")
if not source.can_be_authorised_by(user):
raise exceptions.AccountException(
"This user is not authorised to make transfers from "
"this account")
if not destination.is_open():
raise exceptions.ClosedAccount(
"Destination account has been closed")
if not source.is_debit_permitted(amount):
msg = "Unable to debit %.2f from account #%d:"
raise exceptions.InsufficientFunds(
msg % (amount, source.id))
class Transfer(models.Model):
"""
A transfer of funds between two accounts.
This object records the meta-data about the transfer such as a reference
number for it and who was the authorisor. The financial details are help
within the transactions. Each transfer links to TWO account transactions
"""
# We generate a reference for each transaction to avoid passing around
# primary keys
reference = models.CharField(max_length=64, unique=True, null=True)
source = models.ForeignKey('accounts.Account',
related_name='source_transfers')
destination = models.ForeignKey('accounts.Account',
related_name='destination_transfers')
amount = models.DecimalField(decimal_places=2, max_digits=12)
# We keep track of related transfers (eg multiple refunds of the same
# redemption) using a parent system
parent = models.ForeignKey('self', null=True,
related_name='related_transfers')
# Optional meta-data about transfer
merchant_reference = models.CharField(max_length=128, null=True)
description = models.CharField(max_length=256, null=True)
# We record who the user was who authorised this transaction. As
# transactions should never be deleted, we allow this field to be null and
# also record some audit information.
user = models.ForeignKey('auth.User', related_name="transfers",
null=True, on_delete=models.SET_NULL)
username = models.CharField(max_length=128)
date_created = models.DateTimeField(auto_now_add=True)
# Use a custom manager that extends the create method to also create the
# account transactions.
objects = PostingManager()
def __unicode__(self):
return self.reference
class Meta:
abstract = True
ordering = ('-date_created',)
def delete(self, *args, **kwargs):
raise RuntimeError("Transfers cannot be deleted")
def save(self, *args, **kwargs):
# Store audit information about authorising user (if one is set)
if self.user:
self.username = self.user.username
# We generate a transaction reference using the PK of the transfer so
# we save the transfer first
super(Transfer, self).save(*args, **kwargs)
if not self.reference:
self.reference = self._generate_reference()
super(Transfer, self).save()
def _generate_reference(self):
obj = hmac.new(key=settings.SECRET_KEY,
msg=unicode(self.id))
return obj.hexdigest().upper()
@property
def authorisor_username(self):
if self.user:
return self.user.username
return self.username
def max_refund(self):
"""
Return the maximum amount that can be refunded against this transfer
"""
aggregates = self.related_transfers.filter(
source=self.destination).aggregate(sum=Sum('amount'))
already_refunded = aggregates['sum']
if already_refunded is None:
return self.amount
return self.amount - already_refunded
def as_dict(self):
return {
'reference': self.reference,
'source_code': self.source.code,
'source_name': self.source.name,
'destination_code': self.destination.code,
'destination_name': self.destination.name,
'amount': "%.2f" % self.amount,
'available_to_refund': "%.2f" % self.max_refund(),
'datetime': self.date_created.isoformat(),
'merchant_reference': self.merchant_reference,
'description': self.description,
'reverse_url': reverse(
'transfer-reverse',
kwargs={'reference': self.reference}),
'refunds_url': reverse(
'transfer-refunds',
kwargs={'reference': self.reference})}
class Transaction(models.Model):
# Every transfer of money should create two rows in this table.
# (a) the debit from the source account
# (b) the credit to the destination account
transfer = models.ForeignKey('accounts.Transfer',
related_name="transactions")
account = models.ForeignKey('accounts.Account',
related_name='transactions')
# The sum of this field over the whole table should always be 0.
# Credits should be positive while debits should be negative
amount = models.DecimalField(decimal_places=2, max_digits=12)
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u"Ref: %s, amount: %.2f" % (
self.transfer.reference, self.amount)
class Meta:
unique_together = ('transfer', 'account')
abstract = True
def delete(self, *args, **kwargs):
raise RuntimeError("Transactions cannot be deleted")
class IPAddressRecord(models.Model):
ip_address = models.IPAddressField(_("IP address"), unique=True)
total_failures = models.PositiveIntegerField(default=0)
consecutive_failures = models.PositiveIntegerField(default=0)
date_created = models.DateTimeField(auto_now_add=True)
date_last_failure = models.DateTimeField(null=True)
# Above this threshold, you have to wait for the cooling off period
# between attempts
FREEZE_THRESHOLD = 3
# Above this threshold, you are blocked indefinitely
BLOCK_THRESHOLD = 10
# Blocking period (in seconds)
COOLING_OFF_PERIOD = 5 * 60
class Meta:
abstract = True
verbose_name = _("IP address record")
verbose_name_plural = _("IP address records")
def increment_failures(self):
self.total_failures += 1
self.consecutive_failures += 1
self.date_last_failure = timezone.now()
self.save()
def increment_blocks(self):
self.total_blocks += 1
self.save()
def reset(self):
self.consecutive_failures = 0
self.save()
def is_blocked(self):
return (self.is_temporarily_blocked() or
self.is_permanently_blocked())
def is_temporarily_blocked(self):
if self.consecutive_failures < self.FREEZE_THRESHOLD:
return False
# If you've had several consecutive failures, we impose a miniumum
# period between each allowed request.
now = timezone.now()
time_since_last_failure = now - self.date_last_failure
return time_since_last_failure.seconds < self.COOLING_OFF_PERIOD
def is_permanently_blocked(self):
return self.total_failures > self.BLOCK_THRESHOLD
|
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulates all partitioning logic used by VCF to BigQuery pipeline.
VariantSharding class basically returns an index for a given
(reference_name, pos) pair. The main utilization of this class is in
partition_for() function used by DataFlow pipeline.
This class has 2 main operating modes:
1) Automatic: it will partition variants based on their reference_name
2) Based on user provided config file: Users can parition output tables as
they wish by providing a partition config file, example config files are
available at gcp_variant_transforms/testing/data/misc/*.yaml
"""
from collections import defaultdict
import re
from typing import List, Optional # pylint: disable=unused-import
from apache_beam.io.filesystems import FileSystems
import intervaltree
import yaml
from gcp_variant_transforms.libs import genomic_region_parser
# At most 100 shards (distinct tables) can be set as output of VariantTransform.
_MAX_NUM_SHARDS = 100
# Each shard can contain at most 64 regions.
_MAX_NUM_REGIONS = 64
# A special literal for identifying residual partition's region name.
_RESIDUAL_REGION_LITERAL = 'residual'
_UNDEFINED_SHARD_INDEX = -1
_TABLE_NAME_REGEXP = re.compile(r'^[a-zA-Z0-9_]*$')
# yaml config file constants
_OUTPUT_TABLE = 'output_table'
_TABLE_NAME_SUFFIX = 'table_name_suffix'
_REGIONS = 'regions'
_PARTITION_RANGE_END = 'partition_range_end'
class _ChromosomeSharder():
"""Assigns shard indices to multiple regions inside a chromosome.
This class logic is implemented using an interval tree, each region is
considered as an interval and will be added to the interval tree. Note all
regions must be pairwise disjoint, i.e. no overlapping interval is accepted.
"""
def __init__(self):
# Each instance contains multiple regions of one chromosome.
self._interval_tree = intervaltree.IntervalTree()
def add_region(self, start, end, shard_index):
if start < 0:
raise ValueError(
'Start position on a region cannot be negative: {}'.format(start))
if end <= start:
raise ValueError('End position must be larger than start position: {} '
'vs {}'.format(end, start))
if shard_index < 0:
raise ValueError(
'Index of a region cannot be negative {}'.format(shard_index))
if self._interval_tree.overlaps_range(start, end):
raise ValueError('Wrong sharding config file, regions must be unique in '
'config file: {}-{}'.format(start, end))
# If everything goes well we add the new region to the interval tree.
self._interval_tree.addi(start, end, shard_index)
def get_shard_index(self, pos=0):
"""Finds an interval that pos falls into and return its index.
If no interval is found returns _UNDEFINED_SHARD_INDEX.
"""
matched_regions = self._interval_tree.search(pos)
# Ensure at most one region is matching to the give position.
assert len(matched_regions) <= 1
if len(matched_regions) == 1:
return next(iter(matched_regions)).data
else:
return _UNDEFINED_SHARD_INDEX
class VariantSharding():
"""Sharding variants based on their reference_name [and position]."""
def __init__(self, config_file_path=None):
if not config_file_path or not config_file_path.strip():
raise ValueError('You must provide path to a yaml config file.')
self._use_interval_tree = self._validate_config_and_check_intervals(
config_file_path)
# Residual partition will contain all remaining variants that do not match
# to any other partition.
self._num_shards = 0
self._residual_index = _UNDEFINED_SHARD_INDEX
self._should_keep_residual = False
# If none of the regions contain interval (such as "chr1:2000-3000") then we
# don't need interval trees and shard index only depends on CHROM value.
if self._use_interval_tree:
self._region_to_shard = defaultdict(_ChromosomeSharder)
else:
self._region_to_shard = {}
self._table_name_suffixes = []
self._partition_range_end = []
self._parse_config(config_file_path)
assert len(self._table_name_suffixes) == len(self._partition_range_end)
def _is_residual_shard(self, regions):
# type: (List[str]) -> bool
return (len(regions) == 1 and
regions[0].strip() == _RESIDUAL_REGION_LITERAL)
def _validate_config_and_check_intervals(self, config_file_path):
# type: (str) -> bool
"""Validates the config file and finds if any region contains interval.
Args:
config_file_path: name of the input partition_config file.
Raises:
A ValueError if any of the expected config formats are violated.
Returns:
True if any region is interval, for example "chr1:1000-2000" is interval.
False if all regions are simple CHROM value, for example "chr1".
"""
has_any_interval = False
with FileSystems.open(config_file_path, 'r') as f:
try:
shards = yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as e:
raise ValueError('Invalid yaml file: {}'.format(str(e))) from e
if len(shards) > _MAX_NUM_SHARDS:
raise ValueError(
'There can be at most {} output tables but given config file '
'contains {}'.format(_MAX_NUM_SHARDS, len(shards)))
if not shards:
raise ValueError('At least one output table is needed in config file.')
existing_suffixes = set()
existing_ref_names = set()
residual_partition_index = _UNDEFINED_SHARD_INDEX
for item in shards:
output_table = item.get(_OUTPUT_TABLE, None)
if output_table is None:
raise ValueError('Wrong sharing config file, {} field missing.'.format(
_OUTPUT_TABLE))
# Validate table_name_suffix
table_name_suffix = output_table.get(_TABLE_NAME_SUFFIX)
if not table_name_suffix:
raise ValueError('Wrong sharding config file, {} field missing.'.format(
_TABLE_NAME_SUFFIX))
table_name_suffix = table_name_suffix.strip()
if not table_name_suffix:
raise ValueError(
'Wrong sharding config file, table_name_suffix can not be empty.')
if not _TABLE_NAME_REGEXP.match(table_name_suffix):
raise ValueError(
'Wrong sharding config file, BigQuery table name can only contain '
'letters (upper or lower case), numbers, and underscores.')
if table_name_suffix in existing_suffixes:
raise ValueError('Wrong sharding config file, table name suffixes must '
'be unique, "{}" is not.'.format(table_name_suffix))
existing_suffixes.add(table_name_suffix)
# Validate regions
regions = output_table.get(_REGIONS, None)
if regions is None:
raise ValueError('Wrong sharding config file, {} field missing.'.format(
_REGIONS))
if len(regions) > _MAX_NUM_REGIONS:
raise ValueError('Wrong sharding config file, at most {} CHROM '
'values per output table is allowed: {}'.format(
_MAX_NUM_REGIONS, regions))
if self._is_residual_shard(regions):
if residual_partition_index != _UNDEFINED_SHARD_INDEX:
raise ValueError('Wrong sharding config file, there can be only '
'one residual output table.')
residual_partition_index += 1
else:
for r in regions:
ref_name, start, end = genomic_region_parser.parse_genomic_region(r)
if (start != genomic_region_parser._DEFAULT_START_POSITION or
end != genomic_region_parser._DEFAULT_END_POSITION):
has_any_interval = True
else:
if not ref_name:
raise ValueError('Wrong sharding config file, reference_name can '
'not be empty string: {}'.format(r))
if ref_name in existing_ref_names:
raise ValueError('Wrong sharding config file, regions must be '
'unique in config file: {}'.format(ref_name))
existing_ref_names.add(ref_name)
# Validate partition_range_end
partition_range_end = output_table.get(_PARTITION_RANGE_END, None)
if not partition_range_end:
raise ValueError('Wrong sharding config file, {} field missing.'.format(
_PARTITION_RANGE_END))
if not isinstance(partition_range_end, int):
try:
partition_range_end = genomic_region_parser.parse_comma_sep_int(
partition_range_end)
except Exception as e:
raise ValueError(
'Wrong sharding config file, each output table '
'needs an integer for partition_range_end > 0.') from e
if partition_range_end <= 0:
raise ValueError('Wrong sharding config file, each output table '
'needs an integer for partition_range_end > 0.')
return has_any_interval
def _parse_config(self, config_file_path):
# type: (str) -> None
"""Parses the given partitioning config file.
Args:
config_file_path: name of the input partition_config file.
"""
with FileSystems.open(config_file_path, 'r') as f:
try:
shards = yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as e:
raise ValueError('Invalid yaml file: {}'.format(str(e))) from e
self._num_shards = len(shards)
for shard_index in range(self._num_shards):
output_table = shards[shard_index].get(_OUTPUT_TABLE)
# Store table_name_suffix
self._table_name_suffixes.insert(
shard_index, output_table.get(_TABLE_NAME_SUFFIX).strip())
# Store regions
regions = output_table.get(_REGIONS, None)
if self._is_residual_shard(regions):
self._residual_index = shard_index
self._should_keep_residual = True
else:
for r in regions:
ref_name, start, end = genomic_region_parser.parse_genomic_region(r)
if self._use_interval_tree:
self._region_to_shard[ref_name].add_region(start, end, shard_index)
else:
self._region_to_shard[ref_name] = shard_index
# Store partition_range_end
partition_range_end = output_table.get(_PARTITION_RANGE_END)
if not isinstance(partition_range_end, int):
partition_range_end = genomic_region_parser.parse_comma_sep_int(
partition_range_end)
self._partition_range_end.insert(shard_index, partition_range_end)
if self._residual_index == _UNDEFINED_SHARD_INDEX:
# We add an extra dummy partition for residuals.
# Note, here self._should_keep_residual is False.
self._residual_index = self._num_shards
self._num_shards += 1
def get_shard_index(self, chrom, pos=None):
# type: (str, int) -> int
"""Returns output table index for the given chrom value and position."""
if not chrom or pos < 0:
raise ValueError('Cannot shard given {}:{}'.format(chrom, pos))
shard_index = _UNDEFINED_SHARD_INDEX
if self._use_interval_tree:
sharder = self._region_to_shard.get(chrom, None)
if sharder:
shard_index = sharder.get_shard_index(pos)
else:
shard_index = self._region_to_shard.get(chrom, _UNDEFINED_SHARD_INDEX)
if shard_index == _UNDEFINED_SHARD_INDEX:
return self._residual_index
else:
return shard_index
def get_residual_index(self):
return self._residual_index
def should_keep_shard(self, shard_index):
# type: (int) -> bool
"""Returns False only for dummy extra residual partition (if was added)."""
if shard_index == self._residual_index:
return self._should_keep_residual
elif self._is_index_in_the_range(shard_index):
return True
else:
raise ValueError(
'Given shard index {} is outside of expected range: '
'[0, {})'.format(shard_index, self._num_shards))
def _is_index_in_the_range(self, shard_index):
if shard_index < 0:
return False
if self._should_keep_residual:
if shard_index >= self._num_shards:
return False
else:
if shard_index >= self._num_shards - 1:
return False
return True
def get_output_table_suffix(self, shard_index):
# type: (int) -> Optional[str]
if not self._is_index_in_the_range(shard_index):
raise ValueError(
'Given shard index {} is outside of expected range: '
'[0, {})'.format(shard_index, self._num_shards))
return self._table_name_suffixes[shard_index]
def get_output_table_partition_range_end(self, shard_index):
# type: (int) -> Optional[int]
if not self._is_index_in_the_range(shard_index):
raise ValueError(
'Given shard index {} is outside of expected range: '
'[0, {})'.format(shard_index, self._num_shards))
return self._partition_range_end[shard_index]
def get_num_shards(self):
# type: (None) -> int
return self._num_shards
|
|
import pickle
from copy import deepcopy
from indra.statements import *
from indra.tools import assemble_corpus as ac
a = Agent('a', db_refs={'HGNC': '1234', 'TEXT': 'a'})
b = Agent('b', db_refs={'UP': 'P15056', 'TEXT': 'b'})
c = Agent('c', db_refs={'FPLX': 'XXX', 'TEXT': 'c'})
d = Agent('d', db_refs={'TEXT': 'd'})
e = Agent('e', db_refs={'CHEBI': 'CHEBI:1234', 'TEXT': 'e'})
f = Agent('b', db_refs={'UP': 'P28028', 'TEXT': 'b'})
g = Agent('g', db_refs={'FPLX': 'ERK'})
h = Agent('g', mods=['x', 'y'], mutations=['x', 'y'], activity='x',
location='nucleus', bound_conditions=['x', 'y', 'z'])
i = Agent('a', db_refs={'HGNC': '1234', 'TEXT': 'a'},
bound_conditions=[BoundCondition(d)])
j = Agent('a', db_refs={'HGNC': '1234', 'TEXT': 'a'},
bound_conditions=[BoundCondition(b)])
k = Agent('a', db_refs={'HGNC': '1234', 'TEXT': 'a'},
bound_conditions=[BoundCondition(f)])
l = Agent('a', db_refs={'HGNC': '1234', 'TEXT': 'a'},
bound_conditions=[BoundCondition(a)])
mapk1 = Agent('MAPK1', db_refs={'HGNC':'6871', 'UP':'P28482'})
erk = Agent('ERK', db_refs={'FPLX': 'ERK'})
st1 = Phosphorylation(a, b, evidence=[Evidence(text='a->b', source_api='assertion')])
st2 = Phosphorylation(a, d, evidence=[Evidence(text='a->d', source_api='assertion')])
st3 = Phosphorylation(c, d, evidence=[Evidence(text='c->d', source_api='assertion')])
st4 = Phosphorylation(b, e, evidence=[Evidence(text='b->e', source_api='assertion')])
st5 = Phosphorylation(None, b, evidence=[Evidence(text='->b', source_api='assertion')])
st6 = Phosphorylation(None, d, evidence=[Evidence(text='->d', source_api='assertion')])
st7 = Phosphorylation(None, e, evidence=[Evidence(text='->e', source_api='assertion')])
st8 = Phosphorylation(b, f, evidence=[Evidence(text='b->f', source_api='assertion')])
st9 = Phosphorylation(None, f, evidence=[Evidence(text='->f', source_api='assertion')])
st10 = Phosphorylation(None, g, evidence=[Evidence(text='->g', source_api='assertion')])
st11 = Phosphorylation(None, h, evidence=[Evidence(text='->h', source_api='assertion')])
st12 = Phosphorylation(a, b, evidence=[Evidence(epistemics={'direct': True})])
st13 = Phosphorylation(a, b, evidence=[Evidence(epistemics={'direct': False})])
st14 = Activation(a, b, 'activity')
st15 = Activation(a, b, 'kinase')
st14.supports = [st15]
st15.supported_by = [st14]
st16 = Phosphorylation(a, mapk1)
st17 = Phosphorylation(a, erk)
st18 = Phosphorylation(a, i)
st19 = Phosphorylation(a, j)
st20 = Phosphorylation(a, k)
st21 = Phosphorylation(a, l)
st1.belief = 0.9
st2.belief = 0.8
st3.belief = 0.7
def test_load_stmts():
with open('_test.pkl', 'wb') as fh:
pickle.dump([st1], fh)
st_loaded = ac.load_statements('_test.pkl')
assert len(st_loaded) == 1
assert st_loaded[0].equals(st1)
def test_dump_stmts():
ac.dump_statements([st1], '_test.pkl')
st_loaded = ac.load_statements('_test.pkl')
assert len(st_loaded) == 1
assert st_loaded[0].equals(st1)
def test_filter_grounded_only():
# st18 has and i, which has an ungrounded bound condition
st_out = ac.filter_grounded_only([st1, st4])
assert len(st_out) == 2
st_out = ac.filter_grounded_only([st3])
assert len(st_out) == 0
# Do we filter out a statement with an ungrounded bound condition?
st_out = ac.filter_grounded_only([st18])
assert len(st_out) == 0
# When we request to remove ungrounded bound conditions, do we?
st18_copy = deepcopy(st18)
assert len(st18_copy.sub.bound_conditions) == 1
st_out = ac.filter_grounded_only([st18_copy], remove_bound=True)
assert len(st_out[0].sub.bound_conditions) == 0
# When we request to remove ungrounded bound conditions, do we leave
# grounded bound conditions in place?
st19_copy = deepcopy(st19)
assert len(st19_copy.sub.bound_conditions) == 1
st_out = ac.filter_grounded_only([st19_copy], remove_bound=True)
assert len(st_out[0].sub.bound_conditions) == 1
# Do we filter out a statement with an grounded bound condition?
st_out = ac.filter_grounded_only([st19])
assert len(st_out) == 1
def test_filter_grounded_only_score():
c1 = Event(Concept('x', db_refs={'a': [('x', 0.5), ('y', 0.8)]}))
c2 = Event(Concept('x', db_refs={'a': [('x', 0.7), ('y', 0.9)]}))
st1 = Influence(c1, c2)
assert len(ac.filter_grounded_only([st1])) == 1
assert len(ac.filter_grounded_only([st1], score_threshold=0.4)) == 1
assert len(ac.filter_grounded_only([st1], score_threshold=0.6)) == 1
assert len(ac.filter_grounded_only([st1], score_threshold=0.85)) == 0
assert len(ac.filter_grounded_only([st1], score_threshold=0.95)) == 0
c3 = Event(Concept('x', db_refs={'a': []}))
st2 = Influence(c1, c3)
assert len(ac.filter_grounded_only([st2])) == 0
def test_filter_uuid_list():
st_out = ac.filter_uuid_list([st1, st4], [st1.uuid])
assert len(st_out) == 1
def test_filter_genes_only():
st_out = ac.filter_genes_only([st1, st5])
assert len(st_out) == 2
st_out = ac.filter_genes_only([st6, st7])
assert len(st_out) == 0
st_out = ac.filter_genes_only([st4])
assert len(st_out) == 0
st_out = ac.filter_genes_only([st3], specific_only=True)
assert len(st_out) == 0
# Can we remove statements with non-gene bound conditions?
st_out = ac.filter_genes_only([st18]) # remove_bound defaults to False
assert len(st_out) == 0
st_out = ac.filter_genes_only([st18], remove_bound=False)
assert len(st_out) == 0
# Can we remove non-gene bound conditions?
st18_copy = deepcopy(st18)
assert len(st18_copy.sub.bound_conditions) == 1
st_out = ac.filter_genes_only([st18_copy], remove_bound=True)
assert len(st_out[0].sub.bound_conditions) == 0
def test_filter_human_only():
st_out = ac.filter_human_only([st1, st5])
assert len(st_out) == 2
st_out = ac.filter_human_only([st8, st9])
assert len(st_out) == 0
# Can we filter out statements with bound conditions grounded to non-human
# genes?
st_out = ac.filter_human_only([st20], remove_bound=False)
assert len(st_out) == 0
# When we do such filtering, do we keep statements bounded to human genes?
st_out = ac.filter_human_only([st21], remove_bound=False)
assert len(st_out) == 1
# Can we remove bound conditions grounded to non-human genes?
st_out = ac.filter_human_only([st20], remove_bound=True)
assert len(st_out) == 1
assert len(st_out[0].sub.bound_conditions) == 0
# When we do so, do we keep bound conditions not grounded to non-human
# genes?
st_out = ac.filter_human_only([st21], remove_bound=True)
assert len(st_out) == 1
assert len(st_out[0].sub.bound_conditions) == 1
def test_filter_gene_list_one():
st_out = ac.filter_gene_list([st1, st2], ['a'], 'one')
assert len(st_out) == 2
st_out = ac.filter_gene_list([st1, st2], ['a'], 'all')
assert len(st_out) == 0
st_out = ac.filter_gene_list([st1, st2], ['a', 'b'], 'all')
assert len(st_out) == 1
st_out = ac.filter_gene_list([st1, st2], ['a', 'b'], 'invalid')
assert len(st_out) == 2
# Can we exclude a statement with a bound condition agent not on the filter
# list?
st_out = ac.filter_gene_list([st18], ['a', 'b', 'd'], 'all')
# All genes in the list
assert len(st_out) == 1
st_out = ac.filter_gene_list([st18], ['a', 'b'], 'all')
# Bound condition for sub not in list
assert len(st_out) == 0
st_out = ac.filter_gene_list([st18], ['a', 'b'], 'one')
# Bound condition for sub not in list but we only need to match one
assert len(st_out) == 1
st_out = ac.filter_gene_list([st18], ['d'], 'one')
# Only the bound condition is in filter list
assert len(st_out) == 1
# Can we remove bound conditions that are not in the filter list?
st_out = ac.filter_gene_list([st18], ['a', 'b', 'd'], 'all',
remove_bound=True)
assert len(st_out[0].sub.bound_conditions) == 1
st_out = ac.filter_gene_list([st18], ['a', 'b'], 'all',
remove_bound=True)
assert len(st_out[0].sub.bound_conditions) == 0
def test_filter_gene_list_invert():
st_out = ac.filter_gene_list([st1, st2], ['a'], 'one', invert=True)
assert len(st_out) == 0
st_out = ac.filter_gene_list([st1, st2], ['d'], 'one', invert=True)
assert len(st_out) == 1
assert st_out[0].sub.name == 'b'
st_out = ac.filter_gene_list([st1, st2], ['a', 'd'], 'all', invert=True)
assert len(st_out) == 1
assert st_out[0].sub.name == 'b'
st_out = ac.filter_gene_list([st1, st2], ['a', 'b', 'd'], 'all',
invert=True)
assert len(st_out) == 0
def test_filter_gene_list_families():
stmts_out = ac.filter_gene_list([st16, st17], ['MAPK1'], 'one',
allow_families=False)
assert len(stmts_out) == 1
assert stmts_out[0] == st16
stmts_out = ac.filter_gene_list([st16, st17], ['MAPK1'], 'one',
allow_families=True)
assert len(stmts_out) == 2
assert st16 in stmts_out
assert st17 in stmts_out
def test_run_preassembly():
st_out = ac.run_preassembly([st1, st3, st5, st6])
assert len(st_out) == 2
def test_run_preassembly_all_stmts():
st_out = ac.run_preassembly([st1, st3, st5, st6], return_toplevel=False)
assert len(st_out) == 4
def test_expand_families():
st_out = ac.expand_families([st10])
assert len(st_out) == 2
def test_strip_agent_context():
st_out = ac.strip_agent_context([st11])
assert len(st_out) == 1
assert not st_out[0].sub.mods
assert not st_out[0].sub.mutations
assert not st_out[0].sub.bound_conditions
assert not st_out[0].sub.activity
assert not st_out[0].sub.location
def test_filter_direct():
st_out = ac.filter_direct([st12])
assert len(st_out) == 1
st_out = ac.filter_direct([st13])
assert len(st_out) == 0
def test_filter_belief():
st_out = ac.filter_belief([st1, st2, st3], 0.75)
assert len(st_out) == 2
def test_reduce_activities():
st_out = ac.reduce_activities([st14, st15])
assert st_out[0].obj_activity == 'kinase'
assert st_out[1].obj_activity == 'kinase'
def test_filter_source():
ev1 = Evidence(source_api='bel')
ev2 = Evidence(source_api='biopax')
ev3 = Evidence(source_api='reach')
st1 = Activation(Agent('a'), Agent('b'), evidence=[ev3])
st2 = Activation(Agent('a'), Agent('b'), evidence=[ev1, ev2])
st3 = Activation(Agent('a'), Agent('b'), evidence=[ev1, ev3])
st_out = ac.filter_evidence_source([st1, st2], ['reach'], 'one')
assert len(st_out) == 1
st_out = ac.filter_evidence_source([st1, st2, st3], ['reach'], 'all')
assert (len(st_out) == 2)
st_out = ac.filter_evidence_source([st1, st2, st3], ['bel', 'biopax'],
'one')
assert (len(st_out) == 2)
st_out = ac.filter_evidence_source([st1, st2, st3], ['bel', 'biopax'],
'all')
assert (len(st_out) == 1)
st_out = ac.filter_evidence_source([st1, st2, st3], ['bel', 'biopax'],
'none')
assert (len(st_out) == 1)
def test_map_grounding():
a = Agent('MEK', db_refs={'TEXT': 'MEK'})
b = Agent('X', db_refs={'TEXT': 'ERK'})
st = Activation(a, b)
st_out = ac.map_grounding([st], do_rename=False)
assert len(st_out) == 1
assert st_out[0].subj.db_refs.get('FPLX')
assert st_out[0].obj.db_refs.get('FPLX')
assert st_out[0].obj.name == 'X'
st_out = ac.map_grounding([st], do_rename=True)
assert len(st_out) == 1
assert st_out[0].subj.db_refs.get('FPLX')
assert st_out[0].obj.db_refs.get('FPLX')
assert st_out[0].obj.name == 'ERK'
def test_map_grounding_user_map():
gm = {'MEK': {'XXX': 'YYY'}, 'ERK': {'FPLX': 'ERK'}}
a = Agent('MEK', db_refs={'TEXT': 'MEK'})
b = Agent('X', db_refs={'TEXT': 'ERK'})
st = Activation(a, b)
st_out = ac.map_grounding([st], grounding_map=gm, do_rename=True)
assert len(st_out) == 1
assert st_out[0].subj.db_refs.get('XXX') == 'YYY'
assert st_out[0].obj.db_refs.get('FPLX') == 'ERK'
assert st_out[0].obj.name == 'ERK'
gm = {'ERK': {'FPLX': 'ERK_TEST'}}
st_out = ac.map_grounding([st], grounding_map=gm,
grounding_map_policy='extend')
assert len(st_out) == 1
assert st_out[0].subj.db_refs.get('FPLX') == 'MEK'
assert st_out[0].obj.db_refs.get('FPLX') == 'ERK_TEST'
st_out = ac.map_grounding([st])
# Make sure the extension to the default grounding map doesn't persist
assert len(st_out) == 1
assert st_out[0].subj.db_refs.get('FPLX') == 'MEK'
assert st_out[0].obj.db_refs.get('FPLX') == 'ERK'
assert st_out[0].obj.name == 'ERK'
def test_map_sequence():
a = Agent('MAPK1', db_refs={'UP': 'P28482', 'HGNC': '6871'})
st1 = Phosphorylation(None, a, 'T', '182')
st2 = Phosphorylation(None, a, 'T', '185')
st3 = Phosphorylation(None, a, 'Y', '999')
st_out = ac.map_sequence([st1])
assert len(st_out) == 1, st_out
assert st_out[0].position == '185'
st_out = ac.map_sequence([st2])
assert len(st_out) == 1, st_out
assert st_out[0].position == '185'
st_out = ac.map_sequence([st3])
assert len(st_out) == 0, st_out
def test_map_sequence_blank_entries():
"""Make sure sites curated as erroneous with no mappings don't
get treated as valid mappings."""
mapk1 = Agent('MAPK1', db_refs={'UP': 'P28482'})
rps6 = Agent('RPS6', db_refs={'UP': 'P62753'})
phos_rps6 = Agent('RPS6',
mods=[ModCondition('phosphorylation', 'T', '389')],
db_refs={'UP': 'P62753'})
st1 = Phosphorylation(mapk1, rps6, 'T', '389')
st2 = Phosphorylation(phos_rps6, mapk1, 'T', '185')
mapped = ac.map_sequence([st1, st2])
assert len(mapped) == 0
def test_filter_by_type():
st_out = ac.filter_by_type([st1, st14], Phosphorylation)
assert len(st_out) == 1
st_out = ac.filter_by_type([st1, st14], "Phosphorylation")
assert len(st_out) == 1
def test_filter_top_level():
st_out = ac.filter_top_level([st14, st15])
assert len(st_out) == 1
def test_filter_no_hypothesis():
a = Agent('MAPK1')
ev1 = Evidence(epistemics={'hypothesis': True})
ev2 = Evidence(epistemics={'hypothesis': False})
st1 = Phosphorylation(None, a, evidence=[ev1, ev2])
st2 = Phosphorylation(None, a, evidence=[ev1, ev1])
st_out = ac.filter_no_hypothesis([st1, st2])
assert len(st_out) == 1
def test_filter_no_negated():
a = Agent('MAPK1')
ev1 = Evidence(epistemics={'negated': True})
ev2 = Evidence(epistemics={'negated': False})
st1 = Phosphorylation(None, a, evidence=[ev1, ev2])
st2 = Phosphorylation(None, a, evidence=[ev1, ev1])
st_out = ac.filter_no_negated([st1, st2])
assert len(st_out) == 1
def test_belief_cut_plus_filter_top():
st1 = Phosphorylation(None, Agent('a'))
st2 = Phosphorylation(Agent('b'), Agent('a'))
st1.supports = [st2]
st2.supported_by = [st1]
st1.belief = 0.9
st2.belief = 0.1
st_high_belief = ac.filter_belief([st1, st2], 0.5)
st_top_level = ac.filter_top_level(st_high_belief)
assert len(st_top_level) == 1
def test_filter_inconsequential_mods():
mc = ModCondition('phosphorylation', None, None, True)
st1 = Phosphorylation(None, Agent('a'))
st2 = Phosphorylation(Agent('a', mods=[mc]), Agent('b'))
st_out = ac.filter_inconsequential_mods([st1, st2])
assert len(st_out) == 1
whitelist = {'b': [('phosphorylation', None, None)]}
st_out = ac.filter_inconsequential_mods([st1, st2], whitelist=whitelist)
assert len(st_out) == 2
def test_filter_inconsequential_mods2():
st1 = Phosphorylation(Agent('a'), Agent('b'), 'S', '315')
whitelist = {'b': [('phosphorylation', 'S', '315')]}
st_out = ac.filter_inconsequential_mods([st1, st2], whitelist=whitelist)
assert len(st_out) == 1
def test_filter_inconsequential_activities():
st1 = Activation(Agent('a', activity=ActivityCondition('kinase', True)),
Agent('b'), 'activity')
st2 = Activation(Agent('c'), Agent('a'), 'kinase')
st_out = ac.filter_inconsequential_acts([st1, st2])
assert len(st_out) == 1
st_out = ac.filter_inconsequential_acts(st_out)
assert len(st_out) == 0
def test_filter_mutation_status():
braf_mut = Agent('BRAF', mutations=MutCondition('600', 'V', 'E'))
braf_other_mut = Agent('BRAF', mutations=MutCondition('555', 'K', 'G'))
st1 = Phosphorylation(braf_mut, Agent('a'))
st2 = Phosphorylation(braf_other_mut, Agent('a'))
mutations = {'BRAF': [('V', '600', 'E')]}
deletions = []
st_out = ac.filter_mutation_status([st1, st2], mutations, deletions)
assert len(st_out) == 1
mutations = {}
deletions = ['a']
st_out = ac.filter_mutation_status([st1, st2], mutations, deletions)
assert len(st_out) == 0
# Can we filter statements out based on bound conditions?
mutations = {'BRAF': [('V', '600', 'E')]}
deletions = []
braf_good_bound = deepcopy(braf_mut)
braf_good_bound.bound_conditions = [BoundCondition(braf_mut)]
#
braf_bad_bound = deepcopy(braf_mut)
braf_bad_bound.bound_conditions = [BoundCondition(braf_other_mut)]
#
st3 = Phosphorylation(braf_good_bound, Agent('a'))
st4 = Phosphorylation(braf_bad_bound, Agent('a'))
#
st_out = ac.filter_mutation_status([st3], mutations, deletions)
assert len(st_out) == 1
#
st_out = ac.filter_mutation_status([st4], mutations, deletions)
assert len(st_out) == 0
# Can we remove bound conditions based on our filter?
st_out = ac.filter_mutation_status([st3], mutations, deletions,
remove_bound=True)
assert len(st_out[0].enz.bound_conditions) == 1
#
st_out = ac.filter_mutation_status([st4], mutations, deletions,
remove_bound=True)
assert len(st_out[0].enz.bound_conditions) == 0
def test_get_unreachable_mods():
st1 = Phosphorylation(Agent('X'), Agent('Y'), 'S', '222')
mcs = [ModCondition('phosphorylation', 'S', '218', True),
ModCondition('phosphorylation', 'S', '222', True)]
st2 = ActiveForm(Agent('Y', mods=mcs), 'activity', True)
res = ac.get_unreachable_mods([st1, st2])
assert 'Y' in res, res
assert res['Y'] == set([('phosphorylation', 'S', '218')])
def test_rename_db_ref():
x = Agent('X', db_refs={'BE': 'X'})
y = Agent('Y', db_refs={'FPLX': 'Y'})
st1 = Phosphorylation(x, y)
stmts = ac.rename_db_ref([st1], 'BE', 'FPLX')
assert len(stmts) == 1
assert stmts[0].enz.db_refs.get('FPLX') == 'X'
assert 'BE' not in stmts[0].enz.db_refs
assert stmts[0].sub.db_refs.get('FPLX') == 'Y'
def test_filter_concept_names():
stmts = [
Influence(Event(Concept('a')), Event(Concept('b'))),
Influence(Event(Concept('a')), Event(Concept('c'))),
Influence(Event(Concept('a')), Event(Concept('d'))),
Influence(Event(Concept('c')), Event(Concept('d')))
]
stmts_out = ac.filter_concept_names(stmts, ['a'], 'one')
assert len(stmts_out) == 3, stmts_out
stmts_out = ac.filter_concept_names(stmts, ['a', 'b', 'c'], 'all')
assert len(stmts_out) == 2, stmts_out
stmts_out = ac.filter_concept_names(stmts, ['a', 'd'], 'one')
assert len(stmts_out) == 4, stmts_out
stmts_out = ac.filter_concept_names(stmts, ['a', 'b'], 'one', invert=True)
assert len(stmts_out) == 1, stmts_out
def test_filter_namespace_concepts_simple():
def make_statement(a, b):
return Influence(Event(Concept(a, db_refs={'TEXT': a})),
Event(Concept(b, db_refs={'TEXT': b})))
stmts = [make_statement('education', 'thinking'),
make_statement('doubt', 'government')]
fs = ac.filter_by_db_refs(stmts, 'TEXT', ['education'], 'one')
assert [stmts[0]] == fs, fs
fs = ac.filter_by_db_refs(stmts, 'TEXT', ['education'], 'one',
invert=True)
assert stmts == fs, fs
fs = ac.filter_by_db_refs(stmts, 'TEXT', ['education'], 'all',
invert=True)
assert [stmts[1]] == fs, fs
fs = ac.filter_by_db_refs(stmts, 'TEXT', ['education'], 'all')
assert not fs, fs
def test_filter_namespace_concepts_list():
def make_statement(a, b):
return Influence(Event(Concept(a, db_refs={'UN': [(a, 1.0)]})),
Event(Concept(b, db_refs={'UN': [(b, 1.0)]})))
stmts = [make_statement('UN/entities/human/education',
'UN/entities/human/food/food_security'),
make_statement('UN/entities/human/fishery',
'UN/entities/human/government')]
fs = ac.filter_by_db_refs(stmts, 'UN', ['education'], 'one',
match_suffix=True)
assert [stmts[0]] == fs, fs
fs = ac.filter_by_db_refs(stmts, 'UN', ['education', 'fishery'],
'one', match_suffix=True)
assert stmts == fs, fs
fs = ac.filter_by_db_refs(stmts, 'UN',
['fishery', 'government'], 'all',
match_suffix=True)
assert [stmts[1]] == fs, fs
def test_merge_groundings():
refs1 = {'UN': [('x', 0.8), ('y', 0.7)],
'B': 'x',
'C': 'y'}
refs2 = {'UN': [('x', 0.9), ('y', 0.6), ('z', 0.5)],
'B': 'x',
'D': 'z'}
stmts = [Influence(Event(Concept('a', db_refs=refs1)),
Event(Concept('b', db_refs=refs2)),
evidence=[Evidence(source_api='eidos', text='1')]),
Influence(Event(Concept('a', db_refs=refs2)),
Event(Concept('b', db_refs=refs1)),
evidence=[Evidence(source_api='eidos', text='2')])]
stmts = ac.run_preassembly(stmts)
assert len(stmts) == 1
stmts = ac.merge_groundings(stmts)
assert stmts[0].subj.concept.db_refs == \
{'UN': [('x', 0.9), ('y', 0.7), ('z', 0.5)],
'B': 'x', 'C': 'y', 'D': 'z'}, \
stmts[0].subj.db_refs
assert stmts[0].obj.concept.db_refs == stmts[0].subj.concept.db_refs
def test_preassemble_flatten():
st_out = ac.run_preassembly([st1, st3, st5, st6], flatten_evidence=False)
assert len(st_out[0].evidence) == 1
assert len(st_out[1].evidence) == 1
st_out = ac.run_preassembly([st1, st3, st5, st6], flatten_evidence=True,
flatten_evidence_collect_from='supported_by')
assert len(st_out[0].evidence) == 2
assert len(st_out[1].evidence) == 2
st_out = ac.run_preassembly([st1, st3, st5, st6], flatten_evidence=True,
flatten_evidence_collect_from='supports')
assert len(st_out[0].evidence) == 1
assert len(st_out[1].evidence) == 1
def test_filter_by_curation():
new_st1 = deepcopy(st1)
new_ev = Evidence(text='a -> b', source_api='new')
new_st1.evidence.append(new_ev)
stmts_in = [new_st1, st2, st3]
assert len(new_st1.evidence) == 2
assert all(st.belief != 1 for st in stmts_in)
cur1 = {'pa_hash': new_st1.get_hash(),
'source_hash': new_st1.evidence[0].get_source_hash(),
'tag': 'grounding'}
cur2 = {'pa_hash': new_st1.get_hash(),
'source_hash': new_st1.evidence[1].get_source_hash(),
'tag': 'wrong_relation'}
cur3 = {'pa_hash': new_st1.get_hash(),
'source_hash': new_st1.evidence[0].get_source_hash(),
'tag': 'correct'}
cur4 = {'pa_hash': st2.get_hash(),
'source_hash': st2.evidence[0].get_source_hash(),
'tag': 'correct'}
# With 'any' policy it is enough to have one incorrect curation
any_incorrect_one_cur = ac.filter_by_curation(stmts_in, [cur1], 'any')
assert len(any_incorrect_one_cur) == 2
assert new_st1 not in any_incorrect_one_cur
# With 'all' policy all evidences have to be curated
all_incorrect_one_cur = ac.filter_by_curation(stmts_in, [cur1], 'all')
assert len(all_incorrect_one_cur) == 3, len(all_incorrect_one_cur)
assert new_st1 in all_incorrect_one_cur
all_incorrect_two_cur = ac.filter_by_curation(stmts_in, [cur1, cur2], 'all')
assert len(all_incorrect_two_cur) == 2
assert new_st1 not in all_incorrect_two_cur
# Correct curation cancels out incorrect
assert len(new_st1.evidence) == 2
correct_incorrect = ac.filter_by_curation(
stmts_in, [cur1, cur2, cur3, cur4], 'all', update_belief=False)
assert len(correct_incorrect) == 3, len(correct_incorrect)
assert new_st1 in correct_incorrect
# new_st1.evidence[1] should be filtered out because there's only incorrect
# curation(cur2), new_st1.evidence[0] stays because correct cancels out
# incorrect (cur1, cur3)
assert len(new_st1.evidence) == 1
assert new_st1.evidence[0].source_api == 'assertion'
assert all(st.belief != 1 for st in correct_incorrect)
# Optionally update belief to 1 for correct curation
new_belief = ac.filter_by_curation(
stmts_in, [cur1, cur2, cur3, cur4], 'all', update_belief=True)
assert new_belief[0].belief == 1
assert new_belief[1].belief == 1
assert new_belief[2].belief == 0.7
def test_eidos_ungrounded():
a = Agent('x', db_refs={'TEXT': 'x', 'TEXT_NORM': 'y'})
b = Agent('x', db_refs={'TEXT': 'x', })
c = Agent('x', db_refs={'TEXT': 'x', 'GO': 'GO:1234'})
stmts = [Activation(a, b),
Activation(a, c),
Activation(b, c),
Activation(c,c)]
stmts_out = ac.filter_grounded_only(stmts)
assert len(stmts_out) == 1
def test_filter_large_complexes():
stmt1 = Complex([Agent('x'), Agent('y'), Agent('z')])
stmt2 = Complex([Agent('x'), Agent('y')])
stmt3 = Phosphorylation(None, Agent('x'))
stmts = ac.filter_complexes_by_size([stmt1, stmt2, stmt3])
assert len(stmts) == 3
stmts = ac.filter_complexes_by_size([stmt1, stmt2, stmt3],
members_allowed=2)
assert len(stmts) == 2
def test_map_db_refs():
a = Agent('a', db_refs={'db1': 'a1', 'db2': 'a2'})
b = Agent('b', db_refs={'db1': 'b1', 'db2': 'b2'})
stmt = Activation(a, b)
assert stmt.subj.db_refs['db1'] == 'a1'
assert stmt.subj.db_refs['db2'] == 'a2'
assert stmt.obj.db_refs['db1'] == 'b1'
assert stmt.obj.db_refs['db2'] == 'b2'
# Only provided IDs change, keep the rest unchanged
db_refs_map = {('db1', 'a1'): 'A1',
('db2', 'b2'): 'B2'}
regr_stmt = ac.map_db_refs([stmt], db_refs_map=db_refs_map)[0]
assert regr_stmt.subj.db_refs['db1'] == 'A1'
assert regr_stmt.subj.db_refs['db2'] == 'a2'
assert regr_stmt.obj.db_refs['db1'] == 'b1'
assert regr_stmt.obj.db_refs['db2'] == 'B2'
def test_strip_supports():
stmts = [deepcopy(st14), deepcopy(st15)]
assert stmts[0].supports
assert stmts[1].supported_by
no_support = ac.strip_supports(stmts)
assert not no_support[0].supports
assert not no_support[0].supported_by
assert not no_support[1].supports
assert not no_support[1].supported_by
def test_normalize_active_forms():
af1 = ActiveForm(Agent('a',
mods=[ModCondition('phosphorylation', 'T', 93),
ModCondition('phosphorylation', 'T', 63)]),
'activity', True)
af2 = ActiveForm(Agent('a',
mods=[ModCondition('phosphorylation', 'T', 63),
ModCondition('phosphorylation', 'T', 93)]),
'activity', True)
af3 = ActiveForm(Agent('a',
mods=[ModCondition('phosphorylation')]),
'activity', True)
ph = Phosphorylation(Agent('a'), Agent('b'))
act = Activation(Agent('b'), Agent('a'))
stmts = [af1, af2, af3, ph, act]
norm_stmts = ac.normalize_active_forms(stmts)
assert len(norm_stmts) == 3
norm_stmt_hashes = [stmt.get_hash() for stmt in norm_stmts]
assert af1.get_hash() in norm_stmt_hashes
assert af3.get_hash() not in norm_stmt_hashes
assert ph.get_hash() in norm_stmt_hashes
assert act.get_hash() in norm_stmt_hashes
def test_run_mechlinker():
# Reduce activities
a1 = Agent('a', location='cytoplasm')
a2 = Agent('a', location='nucleus')
af1 = ActiveForm(a1, 'activity', True)
af2 = ActiveForm(a2, 'kinase', True)
af3 = ActiveForm(a1, 'catalytic',True)
stmts = [af1, af2, af3]
stmts_out = ac.run_mechlinker(stmts, reduce_activities=True)
for st in stmts_out:
assert st.activity == 'kinase'
# Reduce modifications
phos1 = Phosphorylation(Agent('b'), Agent('a'))
phos2 = Phosphorylation(Agent('c'), Agent('a'), 'T')
phos3 = Phosphorylation(Agent('d'), Agent('a'), 'T', '143')
stmts = [phos1, phos2, phos3]
stmts_out = ac.run_mechlinker(stmts, reduce_modifications=True)
assert len(stmts_out) == 3
for st in stmts_out:
assert st.residue == 'T'
assert st.position == '143'
# Replace activations
af = ActiveForm(Agent('a', mods=[ModCondition('phosphorylation')]),
'activity', True)
phos = Phosphorylation(Agent('b'), Agent('a'))
act = Activation(Agent('b'), Agent('a'))
stmts = [af, phos, act]
stmts_out = ac.run_mechlinker(stmts, replace_activations=True)
assert len(stmts_out) == 2
# Require active forms
af = ActiveForm(Agent('a', mods=[ModCondition('phosphorylation')]),
'activity', True)
ph = Phosphorylation(Agent('a'), Agent('b'))
stmts = [af, ph]
stmts_out = ac.run_mechlinker(stmts, require_active_forms=True)
assert len(stmts_out) == 2
assert stmts_out[1].enz.mods, stmts_out
def test_filter_inconsequential():
mc = ModCondition('phosphorylation', None, None, True)
phos1 = Phosphorylation(None, Agent('a'))
phos2 = Phosphorylation(Agent('a', mods=[mc]), Agent('b'))
act1 = Activation(Agent('a', activity=ActivityCondition('kinase', True)),
Agent('b'), 'activity')
act2 = Activation(Agent('c'), Agent('a'), 'kinase')
stmts = [phos1, phos2, act1, act2]
stmts_out = ac.filter_inconsequential(stmts)
assert len(stmts_out) == 0, stmts_out
mod_whitelist = {'b': [('phosphorylation', 'S', '315')]}
act_whitelist = {'a': ['kinase']}
stmts_out = ac.filter_inconsequential(
stmts, acts=False, mod_whitelist=mod_whitelist)
assert len(stmts_out) == 3, stmts_out
stmts_out = ac.filter_inconsequential(stmts, mod_whitelist=mod_whitelist)
assert len(stmts_out) == 1, stmts_out
stmts_out = ac.filter_inconsequential(
stmts, mods=False, act_whitelist=act_whitelist)
assert len(stmts_out) == 3, stmts_out
stmts_out = ac.filter_inconsequential(stmts, act_whitelist=act_whitelist)
assert len(stmts_out) == 1, stmts_out
stmts_out = ac.filter_inconsequential(
stmts, mod_whitelist=mod_whitelist, act_whitelist=act_whitelist)
assert len(stmts_out) == 2, stmts_out
|
|
# Copyright 2013 Lars Butler & individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import StringIO
except ImportError:
import io
StringIO = io
import geomet
import unittest
from geomet import wkt
WKT = {}
WKT['point'] = {
'2d': 'POINT (0.0000000000000000 1.0000000000000000)',
'3d': 'POINT (0.0000000000000000 -1.0000000000000000 2.0000000000000000)',
'4d': ('POINT (-0.0000000000000000 -1.0000000000000000 '
'-2.0000000000000000 -4.0000000000000000)'),
}
WKT['linestring'] = {
'2d': ('LINESTRING (-100.0000000000000000 0.0000000000000000, '
'-101.0000000000000000 -1.0000000000000000)'),
'3d': ('LINESTRING ('
'100.0000000000000000 0.0000000000000000 -60.0000000000000000, '
'101.0000000000000000 1.0000000000000000 -65.2500000000000000)'),
'4d': ('LINESTRING ('
'100.0000000000000000 0.0000000000000000 -60.0000000000000000 '
'0.1000000000000000, '
'101.0000000000000000 1.0000000000000000 -65.2500000000000000 '
'0.2000000000000000)'),
}
WKT['polygon'] = {
'2d': ('POLYGON ((100.0010 0.0010, 101.1235 0.0010, 101.0010 1.0010, '
'100.0010 0.0010), '
'(100.2010 0.2010, 100.8010 0.2010, 100.8010 0.8010, '
'100.2010 0.2010))'),
'3d': ('POLYGON ((100.0 0.0 3.1, 101.0 0.0 2.1, 101.0 1.0 1.1, '
'100.0 0.0 3.1), '
'(100.2 0.2 3.1, 100.8 0.2 2.1, 100.8 0.8 1.1, 100.2 0.2 3.1))'),
'4d': 'POLYGON ((1 2 3 4, 5 6 7 8, 9 10 11 12, 1 2 3 4))',
}
WKT['multipoint'] = {
'2d': 'MULTIPOINT ((100.000 3.101), (101.000 2.100), (3.140 2.180))',
'3d': ('MULTIPOINT ((100.00 3.10 1.00), (101.00 2.10 2.00), '
'(3.14 2.18 3.00))'),
'4d': ('MULTIPOINT ((100.00 3.10 1.00 0.00), (101.00 2.10 2.00 0.00), '
'(3.14 2.18 3.00 0.00))'),
}
WKT['multilinestring'] = (
'MULTILINESTRING ((0 -1, -2 -3, -4 -5), '
'(1.66 -31023.5 1.1, 10000.9999 3.0 2.2, 100.9 1.1 3.3, 0 0 4.4))'
)
WKT['multipolygon'] = (
'MULTIPOLYGON (((100.001 0.001, 101.001 0.001, 101.001 1.001, '
'100.001 0.001), '
'(100.201 0.201, 100.801 0.201, 100.801 0.801, '
'100.201 0.201)), ((1 2 3 4, 5 6 7 8, 9 10 11 12, 1 2 3 4)))'
)
class WKTTestCase(unittest.TestCase):
def test_dumps_unsupported_geom_type(self):
geom = dict(type='Tetrahedron', coordinates=[])
with self.assertRaises(ValueError) as ar:
wkt.dumps(geom)
self.assertEqual("Unsupported geometry type 'Tetrahedron'",
str(ar.exception))
def test_loads_unsupported_geom_type(self):
geom = 'TETRAHEDRON (0 0)' # This obviously isn't a valid tetrahedron
with self.assertRaises(ValueError) as ar:
wkt.loads(geom)
self.assertEqual("Unsupported geometry type 'TETRAHEDRON'",
str(ar.exception))
def test_dumps_empty_geoms(self):
types = [
'Point',
'LineString',
'Polygon',
'MultiPoint',
'MultiLineString',
'MultiPolygon',
]
expected = ['%s EMPTY' % x.upper() for x in types]
for i, t in enumerate(types):
geom = dict(type=t, coordinates=[])
self.assertEqual(expected[i], wkt.dumps(geom))
def test_loads_empty_geoms(self):
types = [
'Point',
'LineString',
'Polygon',
'MultiPoint',
'MultiLineString',
'MultiPolygon',
]
wkts = ['%s EMPTY' % x.upper() for x in types]
for i, each_wkt in enumerate(wkts):
expected = dict(type=types[i], coordinates=[])
self.assertEqual(expected, wkt.loads(each_wkt))
self.assertEqual(dict(type='GeometryCollection', geometries=[]),
wkt.loads('GEOMETRYCOLLECTION EMPTY'))
def test_dumps_empty_geometrycollection(self):
geom = dict(type='GeometryCollection', geometries=[])
self.assertEqual('GEOMETRYCOLLECTION EMPTY', wkt.dumps(geom))
def test_malformed_geojson(self):
bad_geojson = [
# GEOMETRYCOLLECTIONs have 'geometries', not coordinates
dict(type='GeometryCollection', coordinates=[]),
# All other geometry types must have coordinates
dict(type='Point'),
# and a type
dict(coordinates=[]),
]
for each in bad_geojson:
with self.assertRaises(geomet.InvalidGeoJSONException):
wkt.dumps(each)
class TestFileInteractions(unittest.TestCase):
def test_load(self):
fobj = StringIO.StringIO()
geom = 'POINT (0 0)'
fobj.write(geom)
fobj.seek(0)
loaded = wkt.load(fobj)
expected = dict(type='Point', coordinates=[0, 0])
self.assertEqual(expected, loaded)
def test_dump(self):
fobj = StringIO.StringIO()
geom = dict(type='Point', coordinates=[0, 0])
wkt.dump(geom, fobj)
fobj.seek(0)
written = fobj.read()
expected = 'POINT (0.0000000000000000 0.0000000000000000)'
self.assertEqual(expected, written)
class PointDumpsTestCase(unittest.TestCase):
def test_2d(self):
# Tests a typical 2D Point case:
pt = dict(type='Point', coordinates=[0.0, 1.0])
expected = WKT['point']['2d']
self.assertEqual(expected, wkt.dumps(pt))
def test_3d(self):
# Test for an XYZ/XYM Point:
pt = dict(type='Point', coordinates=[0.0, -1.0, 2.0])
expected = WKT['point']['3d']
self.assertEqual(expected, wkt.dumps(pt))
def test_4d(self):
# Test for an XYZM Point:
pt = dict(type='Point', coordinates=[-0.0, -1.0, -2.0, -4.0])
expected = WKT['point']['4d']
self.assertEqual(expected, wkt.dumps(pt))
def test_2d_6_decimals(self):
pt = dict(type='Point', coordinates=[-10, -77])
expected = 'POINT (-10.000000 -77.000000)'
self.assertEqual(expected, wkt.dumps(pt, decimals=6))
def test_2d_srid4326(self):
# SRID just from meta:
pt = dict(type='Point', coordinates=[0.0, 1.0], meta=dict(srid=4326))
expected = 'SRID=4326;' + WKT['point']['2d']
self.assertEqual(expected, wkt.dumps(pt))
# SRID from both meta and crs:
pt = dict(
type='Point', coordinates=[0.0, 1.0], meta=dict(srid=4326),
crs={'type': 'name', 'properties': {'name': 'EPSG4326'}},
)
expected = 'SRID=4326;' + WKT['point']['2d']
self.assertEqual(expected, wkt.dumps(pt))
# SRID just from crs:
pt = dict(
type='Point', coordinates=[0.0, 1.0],
crs={'type': 'name', 'properties': {'name': 'EPSG4326'}},
)
expected = 'SRID=4326;' + WKT['point']['2d']
self.assertEqual(expected, wkt.dumps(pt))
# Conflicting SRID from meta and crs:
pt = dict(
type='Point', coordinates=[0.0, 1.0], meta=dict(srid=4326),
crs={'type': 'name', 'properties': {'name': 'EPSG4327'}},
)
expected = 'SRID=4326;' + WKT['point']['2d']
with self.assertRaises(ValueError) as ar:
wkt.dumps(pt)
self.assertEqual('Ambiguous CRS/SRID values: 4326 and 4327',
str(ar.exception))
class PointLoadsTestCase(unittest.TestCase):
def test_2d(self):
pt = 'POINT (-0.0000000000000000 1.0000000000000000)'
expected = dict(type='Point', coordinates=[0.0, 1.0])
self.assertEqual(expected, wkt.loads(pt))
def test_3d(self):
pt = 'POINT (-0.0 -1.0 -2.0)'
expected = dict(type='Point', coordinates=[0.0, -1.0, -2.0])
self.assertEqual(expected, wkt.loads(pt))
def test_4d(self):
pt = 'POINT (0.0 1.0 2.0 -4.0)'
expected = dict(type='Point', coordinates=[0.0, 1.0, 2.0, -4.0])
self.assertEqual(expected, wkt.loads(pt))
def test_raises_unmatched_paren(self):
pt = 'POINT (0.0 1.0'
with self.assertRaises(ValueError) as ar:
wkt.loads(pt)
self.assertEqual('Invalid WKT: `POINT (0.0 1.0`',
str(ar.exception))
def test_raises_invalid_wkt(self):
pt = 'POINT 0.0 1.0'
with self.assertRaises(ValueError) as ar:
wkt.loads(pt)
self.assertEqual('Invalid WKT: `POINT 0.0 1.0`', str(ar.exception))
def test_2d_srid664(self):
pt = 'SRID=664;POINT (-0.0000000000000000 1.0000000000000000)'
expected = dict(
type='Point', coordinates=[0.0, 1.0], meta=dict(srid=664)
)
self.assertEqual(expected, wkt.loads(pt))
class LineStringDumpsTestCase(unittest.TestCase):
def test_2d(self):
# Test a typical 2D LineString case:
ls = dict(type='LineString', coordinates=[[-100.0, 0.0],
[-101.0, -1.0]])
expected = WKT['linestring']['2d']
self.assertEqual(expected, wkt.dumps(ls))
def test_3d(self):
ls = dict(type='LineString', coordinates=[[100.0, 0.0, -60.0],
[101.0, 1.0, -65.25]])
expected = WKT['linestring']['3d']
self.assertEqual(expected, wkt.dumps(ls))
def test_4d(self):
ls = dict(type='LineString', coordinates=[[100.0, 0.0, -60.0, 0.1],
[101.0, 1.0, -65.25, 0.2]])
expected = WKT['linestring']['4d']
self.assertEqual(expected, wkt.dumps(ls))
def test_2d_3_decimals(self):
ls = dict(type='LineString', coordinates=[[100.0, 0.0], [101.0, 1.0]])
expected = 'LINESTRING (100.000 0.000, 101.000 1.000)'
self.assertEqual(expected, wkt.dumps(ls, decimals=3))
def test_2d_srid4326(self):
# Test a typical 2D LineString case:
ls = dict(
type='LineString',
coordinates=[[-100.0, 0.0], [-101.0, -1.0]],
meta=dict(srid=4326),
)
expected = 'SRID=4326;' + WKT['linestring']['2d']
self.assertEqual(expected, wkt.dumps(ls))
class LineStringLoadsTestCase(unittest.TestCase):
def test_2d(self):
ls = 'LINESTRING (0 -1, -2 -3, -4 5)'
expected = dict(type='LineString', coordinates=[[0.0, -1.0],
[-2.0, -3.0],
[-4.0, 5.0]])
self.assertEqual(expected, wkt.loads(ls))
def test_3d(self):
ls = 'LINESTRING (0 1 2, 3 4 5)'
expected = dict(type='LineString', coordinates=[[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0]])
self.assertEqual(expected, wkt.loads(ls))
def test_4d(self):
ls = 'LINESTRING (0 1 2 3, 4 5 6 7)'
expected = dict(type='LineString', coordinates=[[0.0, 1.0, 2.0, 3.0],
[4.0, 5.0, 6.0, 7.0]])
self.assertEqual(expected, wkt.loads(ls))
def test_raises_unmatched_paren(self):
ls = 'LINESTRING (0.0 1.0'
with self.assertRaises(ValueError) as ar:
wkt.loads(ls)
self.assertEqual('Invalid WKT: `LINESTRING (0.0 1.0`',
str(ar.exception))
def test_raises_invalid_wkt(self):
ls = 'LINESTRING 0.0 1.0'
with self.assertRaises(ValueError) as ar:
wkt.loads(ls)
self.assertEqual('Invalid WKT: `LINESTRING 0.0 1.0`',
str(ar.exception))
def test_2d_srid1234(self):
ls = 'SRID=1234;LINESTRING (0 -1, -2 -3, -4 5)'
expected = dict(
type='LineString',
coordinates=[[0.0, -1.0], [-2.0, -3.0], [-4.0, 5.0]],
meta=dict(srid=1234),
)
self.assertEqual(expected, wkt.loads(ls))
class PolygonDumpsTestCase(unittest.TestCase):
def test_2d(self):
poly = dict(type='Polygon', coordinates=[
[[100.001, 0.001], [101.12345, 0.001], [101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201], [100.801, 0.201], [100.801, 0.801],
[100.201, 0.201]],
])
expected = WKT['polygon']['2d']
self.assertEqual(expected, wkt.dumps(poly, decimals=4))
def test_3d(self):
poly = dict(type='Polygon', coordinates=[
[[100.0, 0.0, 3.1], [101.0, 0.0, 2.1], [101.0, 1.0, 1.1],
[100.0, 0.0, 3.1]],
[[100.2, 0.2, 3.1], [100.8, 0.2, 2.1], [100.8, 0.8, 1.1],
[100.2, 0.2, 3.1]],
])
expected = WKT['polygon']['3d']
self.assertEqual(expected, wkt.dumps(poly, decimals=1))
def test_4d(self):
poly = dict(type='Polygon', coordinates=[
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [1, 2, 3, 4]]
])
expected = WKT['polygon']['4d']
self.assertEqual(expected, wkt.dumps(poly, decimals=0))
def test_2d_srid2666(self):
poly = dict(
type='Polygon',
coordinates=[
[[100.001, 0.001], [101.12345, 0.001], [101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201], [100.801, 0.201], [100.801, 0.801],
[100.201, 0.201]],
],
meta=dict(srid=2666),
)
expected = 'SRID=2666;' + WKT['polygon']['2d']
self.assertEqual(expected, wkt.dumps(poly, decimals=4))
class PolygonLoadsTestCase(unittest.TestCase):
def test_2d(self):
poly = (
'POLYGON ((100.001 0.001, 101.001 0.001, 101.001 1.001, '
'100.001 0.001), '
'(100.201 0.201, 100.801 0.201, 100.801 0.801, '
'100.201 0.201))'
)
expected = dict(type='Polygon', coordinates=[
[[100.001, 0.001], [101.001, 0.001], [101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201], [100.801, 0.201], [100.801, 0.801],
[100.201, 0.201]],
])
self.assertEqual(expected, wkt.loads(poly))
def test_3d(self):
poly = (
'POLYGON ((100.0 0.0 3.1, 101.0 0.0 2.1, 101.0 1.0 1.1, '
'100.0 0.0 3.1), '
'(100.2 0.2 3.1, 100.8 0.2 2.1, 100.8 0.8 1.1, 100.2 0.2 3.1))'
)
expected = dict(type='Polygon', coordinates=[
[[100.0, 0.0, 3.1], [101.0, 0.0, 2.1], [101.0, 1.0, 1.1],
[100.0, 0.0, 3.1]],
[[100.2, 0.2, 3.1], [100.8, 0.2, 2.1], [100.8, 0.8, 1.1],
[100.2, 0.2, 3.1]],
])
self.assertEqual(expected, wkt.loads(poly))
def test_4d(self):
poly = 'POLYGON ((1 2 3 4, 5 6 7 8, 9 10 11 12, 1 2 3 4))'
expected = dict(type='Polygon', coordinates=[
[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0], [1.0, 2.0, 3.0, 4.0]]
])
self.assertEqual(expected, wkt.loads(poly))
def test_raises_unmatched_paren(self):
poly = 'POLYGON ((0.0 0.0, 1.0 4.0, 4.0 1.0, 0.0 0.0)'
with self.assertRaises(ValueError) as ar:
wkt.loads(poly)
self.assertEqual(
'Invalid WKT: `POLYGON ((0.0 0.0, 1.0 4.0, 4.0 1.0, 0.0 0.0)`',
str(ar.exception)
)
def test_raises_invalid_wkt(self):
poly = 'POLYGON 0.0 0.0, 1.0 4.0, 4.0 1.0, 0.0 0.0))'
with self.assertRaises(ValueError) as ar:
wkt.loads(poly)
self.assertEqual(
'Invalid WKT: `POLYGON 0.0 0.0, 1.0 4.0, 4.0 1.0, 0.0 0.0))`',
str(ar.exception)
)
def test_2d_srid2666(self):
poly = (
'SRID=2666;POLYGON ((100.001 0.001, 101.001 0.001, 101.001 1.001, '
'100.001 0.001), '
'(100.201 0.201, 100.801 0.201, 100.801 0.801, '
'100.201 0.201))'
)
expected = dict(
type='Polygon',
coordinates=[
[[100.001, 0.001], [101.001, 0.001], [101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201], [100.801, 0.201], [100.801, 0.801],
[100.201, 0.201]],
],
meta=dict(srid=2666),
)
self.assertEqual(expected, wkt.loads(poly))
class MultiPointLoadsTestCase(unittest.TestCase):
def test_2d(self):
mp = WKT['multipoint']['2d']
expected = dict(type='MultiPoint', coordinates=[
[100.0, 3.101], [101.0, 2.1], [3.14, 2.18],
])
self.assertEqual(expected, wkt.loads(mp))
def test_2d_alternate(self):
# alternative style for representing a multipoint in WKT
mp = 'MULTIPOINT (100.000 3.101, 101.000 2.100, 3.140 2.180)'
expected = dict(type='MultiPoint', coordinates=[
[100.0, 3.101], [101.0, 2.1], [3.14, 2.18],
])
self.assertEqual(expected, wkt.loads(mp))
def test_3d(self):
mp = WKT['multipoint']['3d']
expected = dict(type='MultiPoint', coordinates=[
[100.0, 3.1, 1.0], [101.0, 2.1, 2.0], [3.14, 2.18, 3.0],
])
self.assertEqual(expected, wkt.loads(mp))
def test_4d(self):
mp = WKT['multipoint']['4d']
expected = dict(type='MultiPoint', coordinates=[
[100.0, 3.1, 1.0, 0.0],
[101.0, 2.1, 2.0, 0.0],
[3.14, 2.18, 3.0, 0.0],
])
self.assertEqual(expected, wkt.loads(mp))
def test_2d_srid4326(self):
mp = 'SRID=4326;' + WKT['multipoint']['2d']
expected = dict(
type='MultiPoint',
coordinates=[[100.0, 3.101], [101.0, 2.1], [3.14, 2.18]],
meta=dict(srid=4326),
)
self.assertEqual(expected, wkt.loads(mp))
def test_malformed_wkt(self):
mp = 'MULTIPOINT 0 1, 0 0'
with self.assertRaises(ValueError) as ar:
wkt.loads(mp)
expected = 'Invalid WKT: `MULTIPOINT 0 1, 0 0`'
self.assertEqual(expected, str(ar.exception))
def test_malformed_wkt_misbalanced_parens(self):
mp = 'MULTIPOINT ((0 0), (0 1)'
with self.assertRaises(ValueError) as ar:
wkt.loads(mp)
expected = 'Invalid WKT: `MULTIPOINT ((0 0), (0 1)`'
self.assertEqual(expected, str(ar.exception))
class MultiPointDumpsTestCase(unittest.TestCase):
def test_2d(self):
mp = dict(type='MultiPoint', coordinates=[
[100.0, 3.101], [101.0, 2.1], [3.14, 2.18],
])
expected = WKT['multipoint']['2d']
self.assertEqual(expected, wkt.dumps(mp, decimals=3))
def test_3d(self):
mp = dict(type='MultiPoint', coordinates=[
[100.0, 3.1, 1], [101.0, 2.1, 2], [3.14, 2.18, 3],
])
expected = WKT['multipoint']['3d']
self.assertEqual(expected, wkt.dumps(mp, decimals=2))
def test_4d(self):
mp = dict(type='MultiPoint', coordinates=[
[100.0, 3.1, 1, 0], [101.0, 2.1, 2, 0], [3.14, 2.18, 3, 0],
])
expected = WKT['multipoint']['4d']
self.assertEqual(expected, wkt.dumps(mp, decimals=2))
def test_2d_srid4326(self):
mp = dict(
type='MultiPoint',
coordinates=[[100.0, 3.101], [101.0, 2.1], [3.14, 2.18]],
meta=dict(srid=4326),
)
expected = 'SRID=4326;' + WKT['multipoint']['2d']
self.assertEqual(expected, wkt.dumps(mp, decimals=3))
class MultiPolygonDumpsTestCase(unittest.TestCase):
def test(self):
mpoly = dict(type='MultiPolygon', coordinates=[
[[[100.001, 0.001], [101.001, 0.001], [101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201], [100.801, 0.201], [100.801, 0.801],
[100.201, 0.201]]],
[[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0], [1.0, 2.0, 3.0, 4.0]]],
])
expected = (
'MULTIPOLYGON (((100.001 0.001, 101.001 0.001, 101.001 1.001, '
'100.001 0.001), '
'(100.201 0.201, 100.801 0.201, 100.801 0.801, '
'100.201 0.201)), '
'((1.000 2.000 3.000 4.000, 5.000 6.000 7.000 8.000, '
'9.000 10.000 11.000 12.000, 1.000 2.000 3.000 4.000)))'
)
self.assertEqual(expected, wkt.dumps(mpoly, decimals=3))
def test_srid4326(self):
mpoly = dict(
type='MultiPolygon',
coordinates=[
[[[100.001, 0.001], [101.001, 0.001], [101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201], [100.801, 0.201], [100.801, 0.801],
[100.201, 0.201]]],
[[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0], [1.0, 2.0, 3.0, 4.0]]],
],
meta=dict(srid=4326),
)
expected = (
'SRID=4326;MULTIPOLYGON (('
'(100.001 0.001, 101.001 0.001, 101.001 1.001, 100.001 0.001), '
'(100.201 0.201, 100.801 0.201, 100.801 0.801, 100.201 0.201)), '
'((1.000 2.000 3.000 4.000, 5.000 6.000 7.000 8.000, '
'9.000 10.000 11.000 12.000, 1.000 2.000 3.000 4.000)))'
)
self.assertEqual(expected, wkt.dumps(mpoly, decimals=3))
class MultiPolygonLoadsTestCase(unittest.TestCase):
def test(self):
mpoly = WKT['multipolygon']
expected = dict(type='MultiPolygon', coordinates=[
[[[100.001, 0.001], [101.001, 0.001], [101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201], [100.801, 0.201], [100.801, 0.801],
[100.201, 0.201]]],
[[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0], [1.0, 2.0, 3.0, 4.0]]],
])
self.assertEqual(expected, wkt.loads(mpoly))
def test_srid667(self):
mpoly = 'SRID=667;' + WKT['multipolygon']
expected = dict(
type='MultiPolygon',
coordinates=[
[[[100.001, 0.001], [101.001, 0.001], [101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201], [100.801, 0.201], [100.801, 0.801],
[100.201, 0.201]]],
[[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0], [1.0, 2.0, 3.0, 4.0]]],
],
meta=dict(srid=667),
)
self.assertEqual(expected, wkt.loads(mpoly))
def test_malformed_wkt(self):
mp = 'MULTIPOLYGON 0 1, 0 0'
with self.assertRaises(ValueError) as ar:
wkt.loads(mp)
expected = 'Invalid WKT: `MULTIPOLYGON 0 1, 0 0`'
self.assertEqual(expected, str(ar.exception))
def test_malformed_wkt_misbalanced_parens(self):
mp = (
'MULTIPOLYGON (((0 0, 0 1, 1 1, 1 0, 0 0)), '
'((0 0, 0 1, 1 1, 1 0, 0 0))'
)
with self.assertRaises(ValueError) as ar:
wkt.loads(mp)
expected = (
'Invalid WKT: `MULTIPOLYGON (((0 0, 0 1, 1 1, 1 0, 0 0)), '
'((0 0, 0 1, 1 1, 1 0, 0 0))`'
)
self.assertEqual(expected, str(ar.exception))
class MultiLineStringDumpsTestCase(unittest.TestCase):
def test_2d(self):
mlls = dict(type='MultiLineString', coordinates=[
[[0.0, -1.0], [-2.0, -3.0], [-4.0, -5.0]],
[[1.66, -31023.5], [10000.9999, 3.0], [100.9, 1.1],
[0.0, 0.0]],
])
expected = (
'MULTILINESTRING ((0.000 -1.000, -2.000 -3.000, -4.000 -5.000), '
'(1.660 -31023.500, 10001.000 3.000, 100.900 1.100, 0.000 0.000))'
)
self.assertEqual(expected, wkt.dumps(mlls, decimals=3))
def test_3d(self):
mlls = dict(type='MultiLineString', coordinates=[
[[0.0, -1.0, 1.0], [-2.0, -3.0, 1.0], [-4.0, -5.0, 1.0]],
[[1.66, -31023.5, 1.1], [10000.9999, 3.0, 2.2], [100.9, 1.1, 3.3],
[0.0, 0.0, 4.4]],
])
expected = (
'MULTILINESTRING ((0.000 -1.000 1.000, -2.000 -3.000 1.000, '
'-4.000 -5.000 1.000), '
'(1.660 -31023.500 1.100, 10001.000 3.000 2.200, '
'100.900 1.100 3.300, 0.000 0.000 4.400))'
)
self.assertEqual(expected, wkt.dumps(mlls, decimals=3))
def test_4d(self):
mlls = dict(type='MultiLineString', coordinates=[
[[0.0, -1.0, 1.0, 0.0], [-2.0, -3.0, 1.0, 0.0],
[-4.0, -5.0, 1.0, 0.0]],
[[1.66, -31023.5, 1.1, 0.0], [10000.9999, 3.0, 2.2, 0.0],
[100.9, 1.1, 3.3, 0.0], [0.0, 0.0, 4.4, 0.0]],
])
expected = (
'MULTILINESTRING ((0.00 -1.00 1.00 0.00, '
'-2.00 -3.00 1.00 0.00, -4.00 -5.00 1.00 0.00), '
'(1.66 -31023.50 1.10 0.00, 10001.00 3.00 2.20 0.00, '
'100.90 1.10 3.30 0.00, 0.00 0.00 4.40 0.00))'
)
self.assertEqual(expected, wkt.dumps(mlls, decimals=2))
def test_2d_srid4326(self):
mlls = dict(
type='MultiLineString',
coordinates=[
[[0.0, -1.0], [-2.0, -3.0], [-4.0, -5.0]],
[[1.66, -31023.5], [10000.9999, 3.0], [100.9, 1.1],
[0.0, 0.0]],
],
meta=dict(srid=4326),
)
expected = (
'SRID=4326;MULTILINESTRING ('
'(0.000 -1.000, -2.000 -3.000, -4.000 -5.000), '
'(1.660 -31023.500, 10001.000 3.000, 100.900 1.100, 0.000 0.000))'
)
self.assertEqual(expected, wkt.dumps(mlls, decimals=3))
class MultiLineStringLoadsTestCase(unittest.TestCase):
def test(self):
mlls = WKT['multilinestring']
expected = dict(
type='MultiLineString',
coordinates=[
[[0.0, -1.0], [-2.0, -3.0], [-4.0, -5.0]],
[[1.66, -31023.5, 1.1], [10000.9999, 3.0, 2.2],
[100.9, 1.1, 3.3], [0.0, 0.0, 4.4]],
]
)
self.assertEqual(expected, wkt.loads(mlls))
def test_srid1234(self):
mlls = 'SRID=1234;' + WKT['multilinestring']
expected = dict(
type='MultiLineString',
coordinates=[
[[0.0, -1.0], [-2.0, -3.0], [-4.0, -5.0]],
[[1.66, -31023.5, 1.1], [10000.9999, 3.0, 2.2],
[100.9, 1.1, 3.3], [0.0, 0.0, 4.4]],
],
meta=dict(srid=1234),
)
self.assertEqual(expected, wkt.loads(mlls))
def test_malformed_wkt(self):
mp = 'MULTILINESTRING 0 1, 0 0'
with self.assertRaises(ValueError) as ar:
wkt.loads(mp)
expected = 'Invalid WKT: `MULTILINESTRING 0 1, 0 0`'
self.assertEqual(expected, str(ar.exception))
def test_malformed_wkt_misbalanced_parens(self):
mp = 'MULTILINESTRING ((0 0, 0 1), (0 2, 2 2)'
with self.assertRaises(ValueError) as ar:
wkt.loads(mp)
expected = 'Invalid WKT: `MULTILINESTRING ((0 0, 0 1), (0 2, 2 2)`'
self.assertEqual(expected, str(ar.exception))
class GeometryCollectionDumpsTestCase(unittest.TestCase):
def test_basic(self):
gc = {
'geometries': [
{'coordinates': [0.0, 1.0], 'type': 'Point'},
{'coordinates': [[-100.0, 0.0], [-101.0, -1.0]],
'type': 'LineString'},
{'coordinates': [[[100.001, 0.001],
[101.1235, 0.001],
[101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201],
[100.801, 0.201],
[100.801, 0.801],
[100.201, 0.201]]],
'type': 'Polygon'},
{'coordinates': [[100.0, 3.101], [101.0, 2.1], [3.14, 2.18]],
'type': 'MultiPoint'},
{'coordinates': [[[0.0, -1.0], [-2.0, -3.0], [-4.0, -5.0]],
[[1.66, -31023.5, 1.1],
[10000.9999, 3.0, 2.2],
[100.9, 1.1, 3.3],
[0.0, 0.0, 4.4]]],
'type': 'MultiLineString'},
{'coordinates': [[[[100.001, 0.001],
[101.001, 0.001],
[101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201],
[100.801, 0.201],
[100.801, 0.801],
[100.201, 0.201]]],
[[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[1.0, 2.0, 3.0, 4.0]]]],
'type': 'MultiPolygon'},
],
'type': 'GeometryCollection',
}
expected = (
'GEOMETRYCOLLECTION '
'(POINT (0.000 1.000),'
'LINESTRING (-100.000 0.000, -101.000 -1.000),'
'POLYGON ((100.001 0.001, 101.124 0.001, 101.001 1.001, '
'100.001 0.001), (100.201 0.201, 100.801 0.201, 100.801 0.801, '
'100.201 0.201)),'
'MULTIPOINT ((100.000 3.101), (101.000 2.100), (3.140 2.180)),'
'MULTILINESTRING ((0.000 -1.000, -2.000 -3.000, -4.000 -5.000), '
'(1.660 -31023.500 1.100, 10001.000 3.000 2.200, '
'100.900 1.100 3.300, 0.000 0.000 4.400)),'
'MULTIPOLYGON (((100.001 0.001, 101.001 0.001, 101.001 1.001, '
'100.001 0.001), '
'(100.201 0.201, 100.801 0.201, 100.801 0.801, 100.201 0.201)), '
'((1.000 2.000 3.000 4.000, 5.000 6.000 7.000 8.000, '
'9.000 10.000 11.000 12.000, 1.000 2.000 3.000 4.000))))'
)
self.assertEqual(expected, wkt.dumps(gc, decimals=3))
def test_nested_gc(self):
gc = {
"type": "GeometryCollection",
"geometries": [
{
"type": "GeometryCollection",
"geometries": [
{
"type": "Point",
"coordinates": [
1.0,
2.0
]
},
{
"type": "Point",
"coordinates": [
3.0,
4.0
]
},
],
},
{
"type": "Point",
"coordinates": [
5.0,
6.0
],
},
],
}
expected = (
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (1 2),POINT (3 4)),"
"POINT (5 6))"
)
self.assertEqual(expected, wkt.dumps(gc, decimals=0))
def test_srid26618(self):
gc = {
'geometries': [
{'coordinates': [0.0, 1.0], 'type': 'Point'},
{'coordinates': [[-100.0, 0.0], [-101.0, -1.0]],
'type': 'LineString'},
{'coordinates': [[[100.001, 0.001],
[101.1235, 0.001],
[101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201],
[100.801, 0.201],
[100.801, 0.801],
[100.201, 0.201]]],
'type': 'Polygon'},
{'coordinates': [[100.0, 3.101], [101.0, 2.1], [3.14, 2.18]],
'type': 'MultiPoint'},
{'coordinates': [[[0.0, -1.0], [-2.0, -3.0], [-4.0, -5.0]],
[[1.66, -31023.5, 1.1],
[10000.9999, 3.0, 2.2],
[100.9, 1.1, 3.3],
[0.0, 0.0, 4.4]]],
'type': 'MultiLineString'},
{'coordinates': [[[[100.001, 0.001],
[101.001, 0.001],
[101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201],
[100.801, 0.201],
[100.801, 0.801],
[100.201, 0.201]]],
[[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[1.0, 2.0, 3.0, 4.0]]]],
'type': 'MultiPolygon'},
],
'type': 'GeometryCollection',
'meta': dict(srid=26618),
}
expected = (
'SRID=26618;GEOMETRYCOLLECTION '
'(POINT (0.000 1.000),'
'LINESTRING (-100.000 0.000, -101.000 -1.000),'
'POLYGON ((100.001 0.001, 101.124 0.001, 101.001 1.001, '
'100.001 0.001), (100.201 0.201, 100.801 0.201, 100.801 0.801, '
'100.201 0.201)),'
'MULTIPOINT ((100.000 3.101), (101.000 2.100), (3.140 2.180)),'
'MULTILINESTRING ((0.000 -1.000, -2.000 -3.000, -4.000 -5.000), '
'(1.660 -31023.500 1.100, 10001.000 3.000 2.200, '
'100.900 1.100 3.300, 0.000 0.000 4.400)),'
'MULTIPOLYGON (((100.001 0.001, 101.001 0.001, 101.001 1.001, '
'100.001 0.001), '
'(100.201 0.201, 100.801 0.201, 100.801 0.801, 100.201 0.201)), '
'((1.000 2.000 3.000 4.000, 5.000 6.000 7.000 8.000, '
'9.000 10.000 11.000 12.000, 1.000 2.000 3.000 4.000))))'
)
self.assertEqual(expected, wkt.dumps(gc, decimals=3))
def test_with_empty_component_simple(self):
gc = {
'type': 'GeometryCollection',
'geometries': [
{'type': 'Point', 'coordinates': [0, 0]},
{'type': 'Point', 'coordinates': []}
]
}
expected = 'GEOMETRYCOLLECTION (POINT (0 0),POINT EMPTY)'
self.assertEqual(expected, wkt.dumps(gc, decimals=0))
def test_with_empty_component(self):
# Example from https://github.com/geomet/geomet/issues/49
gc = {
'type': 'GeometryCollection',
'geometries': [
{
'type': 'Polygon',
'coordinates': [
[
[27.0, 25.0],
[102.0, 36.0],
[102.0, 46.0],
[92.0, 61.0],
[13.0, 41.0],
[16.0, 30.0],
[27.0, 25.0]
]
]
},
{'type': 'LineString', 'coordinates': []}
]}
expected = (
'GEOMETRYCOLLECTION ('
'POLYGON ((27 25, 102 36, 102 46, 92 61, 13 41, 16 30, 27 25)),'
'LINESTRING EMPTY)'
)
self.assertEqual(expected, wkt.dumps(gc, decimals=0))
def test_empty_component_with_srid(self):
gc = {
'type': 'GeometryCollection',
'meta': {'srid': 4326},
'geometries': [
{'type': 'Point', 'coordinates': []}
]
}
expected = 'SRID=4326;GEOMETRYCOLLECTION (POINT EMPTY)'
self.assertEqual(expected, wkt.dumps(gc))
def test_all_types_empty(self):
gc = {
'type': 'GeometryCollection',
'geometries': [
{'geometries': [], 'type': 'GeometryCollection'},
{'coordinates': [], 'type': 'LineString'},
{'coordinates': [], 'type': 'MultiLineString'},
{'coordinates': [], 'type': 'MultiPoint'},
{'coordinates': [], 'type': 'MultiPolygon'},
{'coordinates': [], 'type': 'Point'},
{'coordinates': [], 'type': 'Polygon'}
]
}
expected = 'GEOMETRYCOLLECTION (%s)' % ','.join(
'%s EMPTY' % typ for typ in sorted(
wkt._type_map_caps_to_mixed.keys()))
self.assertEqual(expected, wkt.dumps(gc))
class GeometryCollectionLoadsTestCase(unittest.TestCase):
def test_basic_gc(self):
gc = 'GEOMETRYCOLLECTION (%s,%s,%s,%s,%s,%s)' % (
WKT['point']['2d'],
WKT['linestring']['2d'],
WKT['polygon']['2d'],
WKT['multipoint']['2d'],
WKT['multilinestring'],
WKT['multipolygon'],
)
expected = {
'geometries': [
{'coordinates': [0.0, 1.0], 'type': 'Point'},
{'coordinates': [[-100.0, 0.0], [-101.0, -1.0]],
'type': 'LineString'},
{'coordinates': [[[100.001, 0.001],
[101.1235, 0.001],
[101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201],
[100.801, 0.201],
[100.801, 0.801],
[100.201, 0.201]]],
'type': 'Polygon'},
{'coordinates': [[100.0, 3.101], [101.0, 2.1], [3.14, 2.18]],
'type': 'MultiPoint'},
{'coordinates': [[[0.0, -1.0], [-2.0, -3.0], [-4.0, -5.0]],
[[1.66, -31023.5, 1.1],
[10000.9999, 3.0, 2.2],
[100.9, 1.1, 3.3],
[0.0, 0.0, 4.4]]],
'type': 'MultiLineString'},
{'coordinates': [[[[100.001, 0.001],
[101.001, 0.001],
[101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201],
[100.801, 0.201],
[100.801, 0.801],
[100.201, 0.201]]],
[[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[1.0, 2.0, 3.0, 4.0]]]],
'type': 'MultiPolygon'},
],
'type': 'GeometryCollection',
}
self.assertEqual(expected, wkt.loads(gc))
def test_nested_gc(self):
# Test the parsing of a nested geometry collection.
gc = (
"GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(POINT(1 2), POINT(3 4)), "
"POINT(5 6))"
)
expected = {
"type": "GeometryCollection",
"geometries": [
{
"type": "GeometryCollection",
"geometries": [
{
"type": "Point",
"coordinates": [
1.0,
2.0
]
},
{
"type": "Point",
"coordinates": [
3.0,
4.0
]
},
],
},
{
"type": "Point",
"coordinates": [
5.0,
6.0
],
},
],
}
self.assertEqual(expected, wkt.loads(gc))
def test_srid662(self):
gc = 'SRID=662;GEOMETRYCOLLECTION (%s,%s,%s,%s,%s,%s)' % (
WKT['point']['2d'],
WKT['linestring']['2d'],
WKT['polygon']['2d'],
WKT['multipoint']['2d'],
WKT['multilinestring'],
WKT['multipolygon'],
)
expected = {
'geometries': [
{'coordinates': [0.0, 1.0], 'type': 'Point'},
{'coordinates': [[-100.0, 0.0], [-101.0, -1.0]],
'type': 'LineString'},
{'coordinates': [[[100.001, 0.001],
[101.1235, 0.001],
[101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201],
[100.801, 0.201],
[100.801, 0.801],
[100.201, 0.201]]],
'type': 'Polygon'},
{'coordinates': [[100.0, 3.101], [101.0, 2.1], [3.14, 2.18]],
'type': 'MultiPoint'},
{'coordinates': [[[0.0, -1.0], [-2.0, -3.0], [-4.0, -5.0]],
[[1.66, -31023.5, 1.1],
[10000.9999, 3.0, 2.2],
[100.9, 1.1, 3.3],
[0.0, 0.0, 4.4]]],
'type': 'MultiLineString'},
{'coordinates': [[[[100.001, 0.001],
[101.001, 0.001],
[101.001, 1.001],
[100.001, 0.001]],
[[100.201, 0.201],
[100.801, 0.201],
[100.801, 0.801],
[100.201, 0.201]]],
[[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[1.0, 2.0, 3.0, 4.0]]]],
'type': 'MultiPolygon'},
],
'type': 'GeometryCollection',
'meta': dict(srid=662),
}
self.assertEqual(expected, wkt.loads(gc))
def test_with_empty_component_simple(self):
gc = 'GEOMETRYCOLLECTION (POINT (0 0), POINT EMPTY)'
expected = {
'type': 'GeometryCollection',
'geometries': [
{'type': 'Point', 'coordinates': [0, 0]},
{'type': 'Point', 'coordinates': []}
]
}
self.assertEqual(expected, wkt.loads(gc))
def test_with_empty_component(self):
# Example from https://github.com/geomet/geomet/issues/49
gc = (
'GEOMETRYCOLLECTION ('
'POLYGON((27 25,102 36,102 46,92 61,13 41,16 30,27 25)),'
'LINESTRING EMPTY)'
)
expected = {
'type': 'GeometryCollection',
'geometries': [
{
'type': 'Polygon',
'coordinates': [
[
[27.0, 25.0],
[102.0, 36.0],
[102.0, 46.0],
[92.0, 61.0],
[13.0, 41.0],
[16.0, 30.0],
[27.0, 25.0]
]
]
},
{'type': 'LineString', 'coordinates': []}
]}
self.assertEqual(expected, wkt.loads(gc))
def test_empty_component_with_srid(self):
gc = 'SRID=4326;GEOMETRYCOLLECTION (POINT EMPTY)'
expected = {
'type': 'GeometryCollection',
'meta': {'srid': 4326},
'geometries': [
{'type': 'Point', 'coordinates': []}
]
}
self.assertEqual(expected, wkt.loads(gc))
def test_all_types_empty(self):
gc = 'GEOMETRYCOLLECTION (%s)' % ','.join(
'%s EMPTY' % typ for typ in sorted(
wkt._type_map_caps_to_mixed.keys()))
expected = {
'type': 'GeometryCollection',
'geometries': [
{'geometries': [], 'type': 'GeometryCollection'},
{'coordinates': [], 'type': 'LineString'},
{'coordinates': [], 'type': 'MultiLineString'},
{'coordinates': [], 'type': 'MultiPoint'},
{'coordinates': [], 'type': 'MultiPolygon'},
{'coordinates': [], 'type': 'Point'},
{'coordinates': [], 'type': 'Polygon'}
]
}
self.assertEqual(expected, wkt.loads(gc))
def test_malformed_wkt(self):
mp = 'GEOMETRYCOLLECTION 0 1, 0 0'
with self.assertRaises(ValueError) as ar:
wkt.loads(mp)
expected = 'Invalid WKT: `GEOMETRYCOLLECTION 0 1, 0 0`'
self.assertEqual(expected, str(ar.exception))
def test_malformed_wkt_no_ending_paren(self):
mp = 'GEOMETRYCOLLECTION (POINT EMPTY'
with self.assertRaises(ValueError) as ar:
wkt.loads(mp)
expected = 'Invalid WKT: `GEOMETRYCOLLECTION (POINT EMPTY`'
self.assertEqual(expected, str(ar.exception))
class TestRoundAndPad(unittest.TestCase):
def test(self):
test_cases = [
[(-1.000000000000000, 16), '-1.' + '0' * 16],
[(-83.2496395, 16), '-83.2496395000000000'],
[(35.917330500000006, 16), '35.9173305000000060']
]
for args, expected in test_cases:
self.assertEqual(expected, wkt._round_and_pad(*args))
class TestMisc(unittest.TestCase):
def test_assert_next_token(self):
gen = (letter for letter in 'abcd')
next(gen)
wkt._assert_next_token(gen, 'b')
def test_assert_next_token_raises(self):
gen = (letter for letter in 'abcd')
with self.assertRaises(ValueError) as ar:
wkt._assert_next_token(gen, 'b')
expected = 'Expected "b" but found "a"'
self.assertEqual(expected, str(ar.exception))
|
|
from __future__ import absolute_import, print_function, division
import copy
import traceback as tb
import warnings
import numpy
from six import integer_types
from six.moves import xrange
import theano
from theano.compat import PY3
from theano.scalar import ComplexError, IntegerDivisionError
from theano.gof import Constant, Variable
from theano.gof.utils import hashtype
from theano.tensor.utils import hash_from_ndarray
from theano.tensor.type import TensorType
from theano.configparser import config
def equal_slices(s1, s2):
return (s1.start == s2.start and
s1.stop == s2.stop and
s1.step == s2.step)
class AsTensorError(TypeError):
"""
Raised when as_tensor_variable isn't able to create a TensorVariable.
"""
pass
class _tensor_py_operators(object):
# UNARY
def __abs__(self):
return theano.tensor.basic.abs_(self)
def __neg__(self):
return theano.tensor.basic.neg(self)
# CASTS
# REMOVED THESE BECAUSE PYTHON appears to require __int__ to return
# an int. -JB 20081112
# def __int__(self): return convert_to_int32(self)
# def __float__(self): return convert_to_float64(self)
# def __complex__(self): return convert_to_complex128(self)
# COMPARISONS
_is_nonzero = True
def __lt__(self, other):
rval = theano.tensor.basic.lt(self, other)
rval._is_nonzero = False
return rval
def __le__(self, other):
rval = theano.tensor.basic.le(self, other)
rval._is_nonzero = False
return rval
def __gt__(self, other):
rval = theano.tensor.basic.gt(self, other)
rval._is_nonzero = False
return rval
def __ge__(self, other):
rval = theano.tensor.basic.ge(self, other)
rval._is_nonzero = False
return rval
def __nonzero__(self):
# Python 2.x
return self.__bool__()
def __bool__(self):
# This is meant to prohibit stuff like a < b < c, which is internally
# implemented as (a < b) and (b < c). The trouble with this is the
# side-effect that checking for a non-NULL a by typing "if a: ..."
# uses the same __nonzero__ method. We want these both to work, but
# it seems impossible. Currently, all vars evaluate to nonzero except
# the return values of comparison operators, which raise this
# exception. If you can think of a better solution, go for it!
#
# __bool__ is Python 3.x data model. __nonzero__ is Python 2.x.
if self._is_nonzero:
return True
else:
raise TypeError(
"Variables do not support boolean operations. This "
"can happen if you do a logical operation (<, <=, >, <=, "
"==, !=) between a numpy.ndarray and a Theano tensor"
"variable. Due to NumPy implementation before NumPy 1.8, "
"we cannot make the Python syntax work when the ndarray "
"is on the left, and this results in this error. To work "
"around that, either call "
"theano.tensor.{lt,le,eq,ne,gt,ge}(ndarray, tensor), or "
"use the Python syntax with the Theano tensor on the "
"left. Or update to NumPy 1.8 or above."
)
# BITWISE
def __invert__(self):
return theano.tensor.basic.invert(self)
def __and__(self, other):
return theano.tensor.basic.and_(self, other)
def __or__(self, other):
return theano.tensor.basic.or_(self, other)
def __xor__(self, other):
return theano.tensor.basic.xor(self, other)
def __rand__(self, other):
return theano.tensor.basic.and_(other, self)
def __ror__(self, other):
return theano.tensor.basic.or_(other, self)
def __rxor__(self, other):
return theano.tensor.basic.xor(other, self)
# def __iand__(self, other):
# return _and_inplace(self, other)
#
# def __ior__(self, other):
# return _or_inplace(self, other)
#
# def __ixor__(self, other):
# return _xor_inplace(self, other)
# ARITHMETIC - NORMAL
def __add__(self, other):
try:
return theano.tensor.basic.add(self, other)
# We should catch the minimum number of exception here.
# Otherwise this will convert error when Theano flags
# compute_test_value is used
# Evidently, we need to catch NotImplementedError
# TypeError from as_tensor_variable are caught in Elemwise.make_node
# Oterwise TensorVariable * SparseVariable won't work!
except (NotImplementedError, AsTensorError):
# We must return NotImplemented and not an
# NotImplementedError or raise an NotImplementedError.
# That way python will give a good error message like this
# `TypeError: unsupported operand type(s) for +:
# 'TensorVariable' and 'TensorVariable'`
return NotImplemented
def __sub__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.basic.sub(self, other)
except (NotImplementedError, AsTensorError):
return NotImplemented
def __mul__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.mul(self, other)
except (NotImplementedError, AsTensorError):
return NotImplemented
def __div__(self, other):
# See explanation in __add__ for the error catched
# and the return value in that case
try:
return theano.tensor.basic.div_proxy(self, other)
except IntegerDivisionError:
# This is to raise the exception that occurs when trying to divide
# two integer arrays (currently forbidden).
raise
except (NotImplementedError, AsTensorError):
return NotImplemented
if PY3:
__truediv__ = __div__
def __pow__(self, other):
# See explanation in __add__ for the error catched
# adn the return value in that case
try:
return theano.tensor.basic.pow(self, other)
except (NotImplementedError, AsTensorError):
return NotImplemented
def __mod__(self, other):
# See explanation in __add__ for the error catched
# adn the return value in that case
try:
return theano.tensor.basic.mod_check(self, other)
except ComplexError:
# This is to raise the exception that occurs when trying to compute
# x % y with either x or y a complex number.
raise
except (NotImplementedError, AsTensorError):
return NotImplemented
def __divmod__(self, other):
return theano.tensor.basic.divmod(self, other)
def __truediv__(self, other):
return theano.tensor.basic.true_div(self, other)
def __floordiv__(self, other):
return theano.tensor.basic.floor_div(self, other)
def __rtruediv__(self, other):
return theano.tensor.basic.true_div(other, self)
def __rfloordiv__(self, other):
return theano.tensor.basic.floor_div(other, self)
# DO NOT USE THESE BECAUSE INPLACE OPS SHOULD BE INSERTED
# BY OPTIMIZATIONS ONLY
# ARITHMETIC - INPLACE
# def __iadd__(self, other):
# return _add_inplace(self, other)
# def __isub__(self, other):
# return _sub_inplace(self, other)
#
# def __imul__(self, other):
# return _mul_inplace(self, other)
#
# def __idiv__(self, other):
# return _div_inplace(self, other)
#
# def __ipow__(self, other):
# return _pow_inplace(self, other)
# ARITHMETIC - RIGHT-OPERAND
def __radd__(self, other):
return theano.tensor.basic.add(other, self)
def __rsub__(self, other):
return theano.tensor.basic.sub(other, self)
def __rmul__(self, other):
return theano.tensor.basic.mul(other, self)
def __rdiv__(self, other):
return theano.tensor.basic.div_proxy(other, self)
def __rmod__(self, other):
return theano.tensor.basic.mod(other, self)
def __rdivmod__(self, other):
return theano.tensor.basic.divmod(other, self)
def __rpow__(self, other):
return theano.tensor.basic.pow(other, self)
# TRANSPOSE
T = property(lambda self: theano.tensor.basic.transpose(self))
def transpose(self, *axes):
"""
Returns
-------
object
`tensor.transpose(self, axes)` or `tensor.transpose(self, axes[0])`.
If only one `axes` argument is provided and it is iterable, then it is
assumed to be the entire axes tuple, and passed intact to
tensor.transpose.
"""
if len(axes) == 0:
return theano.tensor.basic.transpose(self)
try:
iter(axes[0])
iterable = True
except TypeError:
iterable = False
if len(axes) == 1 and iterable:
return theano.tensor.basic.transpose(self, axes[0])
else:
return theano.tensor.basic.transpose(self, axes)
shape = property(lambda self: theano.tensor.basic.shape(self))
size = property(lambda self: self.shape[0] if self.ndim == 1 else
theano.tensor.basic.prod(self.shape))
# We can't implement __len__ to provide a better error message.
def any(self, axis=None, keepdims=False):
return theano.tensor.basic.any(self, axis=axis, keepdims=keepdims)
def all(self, axis=None, keepdims=False):
return theano.tensor.basic.all(self, axis=axis, keepdims=keepdims)
# Otherwise TensorVariable[:-1] does not work as Python 2.5.1 calls
# __len__ before calling __getitem__. It also does not catch the raised
# Exception!
# def __len__(self):
# # We can't implement __len__ as Python requests that this
# # function returns an integer >=0
# raise Exception("Theano Variables can't work with len(Theano "
# "Variable) due to Python restriction. You can use "
# "TheanoVariable.shape[0] instead.")
def reshape(self, shape, ndim=None):
"""Return a reshaped view/copy of this variable.
Parameters
----------
shape
Something that can be converted to a symbolic vector of integers.
ndim
The length of the shape. Passing None here means for
Theano to try and guess the length of `shape`.
.. warning:: This has a different signature than numpy's
ndarray.reshape!
In numpy you do not need to wrap the shape arguments
in a tuple, in theano you do need to.
"""
if ndim is not None:
if not isinstance(ndim, integer_types):
raise ValueError("Expected ndim to be an integer, is " +
str(type(ndim)))
return theano.tensor.basic.reshape(self, shape, ndim=ndim)
def dimshuffle(self, *pattern):
"""
Reorder the dimensions of this variable, optionally inserting
broadcasted dimensions.
Parameters
----------
pattern
List/tuple of int mixed with 'x' for broadcastable dimensions.
Examples
--------
For example, to create a 3D view of a [2D] matrix, call
``dimshuffle([0,'x',1])``. This will create a 3D view such that the
middle dimension is an implicit broadcasted dimension. To do the same
thing on the transpose of that matrix, call ``dimshuffle([1, 'x', 0])``.
Notes
-----
This function supports the pattern passed as a tuple, or as a
variable-length argument (e.g. ``a.dimshuffle(pattern)`` is equivalent
to ``a.dimshuffle(*pattern)`` where ``pattern`` is a list/tuple of ints
mixed with 'x' characters).
See Also
--------
DimShuffle
"""
if (len(pattern) == 1) and (isinstance(pattern[0], (list, tuple))):
pattern = pattern[0]
op = theano.tensor.basic.DimShuffle(list(self.type.broadcastable),
pattern)
return op(self)
def flatten(self, ndim=1):
return theano.tensor.basic.flatten(self, ndim)
def ravel(self):
return theano.tensor.basic.flatten(self)
def diagonal(self, offset=0, axis1=0, axis2=1):
return theano.tensor.basic.diagonal(self, offset, axis1, axis2)
# Transfer the data to another device
def transfer(self, target):
"""
If `target` is `'cpu'` this will transfer to a TensorType (if
not already one). Other types may define additional targets.
Parameters
----------
target : str
The desired location of the output variable
"""
return theano.tensor.transfer(self, target)
# Elemwise
def arccos(self):
return theano.tensor.arccos(self)
def arccosh(self):
return theano.tensor.arccosh(self)
def arcsin(self):
return theano.tensor.arcsin(self)
def arcsinh(self):
return theano.tensor.arcsinh(self)
def arctan(self):
return theano.tensor.arctan(self)
def arctanh(self):
return theano.tensor.arctanh(self)
def ceil(self):
return theano.tensor.ceil(self)
def cos(self):
return theano.tensor.cos(self)
def cosh(self):
return theano.tensor.cosh(self)
def deg2rad(self):
return theano.tensor.deg2rad(self)
def exp(self):
return theano.tensor.exp(self)
def exp2(self):
return theano.tensor.exp2(self)
def expm1(self):
return theano.tensor.expm1(self)
def floor(self):
return theano.tensor.floor(self)
def log(self):
return theano.tensor.log(self)
def log10(self):
return theano.tensor.log10(self)
def log1p(self):
return theano.tensor.log1p(self)
def log2(self):
return theano.tensor.log2(self)
def rad2deg(self):
return theano.tensor.rad2deg(self)
def sin(self):
return theano.tensor.sin(self)
def sinh(self):
return theano.tensor.sinh(self)
def sqrt(self):
return theano.tensor.sqrt(self)
def tan(self):
return theano.tensor.tan(self)
def tanh(self):
return theano.tensor.tanh(self)
def trunc(self):
return theano.tensor.trunc(self)
# CASTING
def astype(self, dtype):
return theano.tensor.cast(self, dtype)
# SLICING/INDEXING
def __getitem__(self, args):
if (isinstance(args, list) and
any([isinstance(a, slice) for a in args])):
pass
elif not isinstance(args, tuple):
args = args,
# Convert python literals to theano constants
args = theano.tensor.subtensor.make_constant(args)
# Determine if advanced indexing is needed or not
# The logic is already in Subtensor.convert: if it succeeds,
# standard indexing is used; if it fails with
# AdvancedIndexingError, advanced indexing
advanced = False
axis = None
for i, arg in enumerate(args):
try:
if arg is not numpy.newaxis:
theano.tensor.subtensor.Subtensor.convert(arg)
except theano.tensor.subtensor.AdvancedIndexingError:
if advanced:
axis = None
break
else:
advanced = True
axis = i
if advanced:
if (axis is not None and
all(isinstance(a, slice) and
equal_slices(a, slice(None)) for a in args[:axis]) and
all(isinstance(a, slice) and
equal_slices(a, slice(None)) for a in args[axis + 1:]) and
isinstance(args[axis],
(numpy.ndarray, list,
TensorVariable, TensorConstant,
theano.tensor.sharedvar.TensorSharedVariable))):
return self.take(args[axis], axis)
else:
return theano.tensor.subtensor.advanced_subtensor(self, *args)
else:
if numpy.newaxis in args:
# None (aka np.newaxis) in numpy indexing means to add a
# broadcastable dimension, which theano traditionally did with
# the dimshuffle op. The following code converts numpy-style
# indexing on self to traditional [read: implemented] theano
# indexing on a dimshuffled view of self.
counter = 0
pattern = []
new_args = []
for arg in args:
if arg == numpy.newaxis:
pattern.append('x')
new_args.append(slice(None, None, None))
else:
pattern.append(counter)
counter += 1
new_args.append(arg)
view = self.dimshuffle(pattern)
full_slices = True
for arg in new_args:
# We can't do arg == slice(None, None, None) as in
# Python 2.7, this call __lt__ if we have a slice
# with some symbolic variable.
if not (isinstance(arg, slice) and
arg.start is None and
arg.stop is None and
arg.step is None):
full_slices = False
if full_slices:
return view
else:
return view.__getitem__(tuple(new_args))
else:
return theano.tensor.subtensor.Subtensor(args)(
self, *theano.tensor.subtensor.Subtensor.collapse(
args,
lambda entry: isinstance(entry, Variable)))
def take(self, indices, axis=None, mode='raise'):
return theano.tensor.subtensor.take(self, indices, axis, mode)
# COPYING
def copy(self, name=None):
"""Return a symbolic copy and optionally assign a name.
Does not copy the tags.
"""
copied_variable = theano.tensor.basic.tensor_copy(self)
copied_variable.name = name
return copied_variable
def __iter__(self):
try:
for i in xrange(theano.tensor.basic.get_vector_length(self)):
yield self[i]
except TypeError:
# This prevents accidental iteration via builtin.sum(self)
raise TypeError(('TensorType does not support iteration. '
'Maybe you are using builtin.sum instead of '
'theano.tensor.sum? (Maybe .max?)'))
# CONVENIENT ACCESS TO TYPE PROPERTIES
ndim = property(lambda self: self.type.ndim)
"""The rank of this tensor."""
broadcastable = property(lambda self: self.type.broadcastable)
"""
The broadcastable signature of this tensor.
See Also
--------
broadcasting
"""
dtype = property(lambda self: self.type.dtype)
"""The dtype of this tensor."""
# extra pseudo-operator symbols
def __dot__(left, right):
return theano.tensor.basic.dot(left, right)
def __rdot__(right, left):
return theano.tensor.basic.dot(left, right)
dot = __dot__
def sum(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""See `theano.tensor.sum`."""
return theano.tensor.basic.sum(self, axis=axis,
dtype=dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
def prod(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""See `theano.tensor.prod`."""
return theano.tensor.basic.prod(self, axis=axis,
dtype=dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
def norm(self, L, axis=None, keepdims=False):
if L == 0:
raise NotImplementedError()
if numpy.isinf(L):
raise NotImplementedError()
# optimizations will/should catch cases like L=1, L=2
y = theano.tensor.basic.pow(
theano.tensor.basic.pow(
theano.tensor.basic.abs_(self), L).sum(axis=axis), 1.0 / L)
if keepdims:
return theano.tensor.basic.makeKeepDims(self, y, axis)
else:
return y
def mean(self, axis=None, dtype=None, keepdims=False, acc_dtype=None):
"""See `theano.tensor.mean`."""
return theano.tensor.basic.mean(self, axis=axis,
dtype=dtype, keepdims=keepdims,
acc_dtype=acc_dtype)
def var(self, axis=None, keepdims=False):
"""See `theano.tensor.var`."""
return theano.tensor.basic.var(self, axis, keepdims=keepdims)
def std(self, axis=None, keepdims=False):
"""See `theano.tensor.std`."""
return theano.tensor.basic.std(self, axis, keepdims=keepdims)
def min(self, axis=None, keepdims=False):
"""See `theano.tensor.min`."""
return theano.tensor.basic.min(self, axis, keepdims=keepdims)
def max(self, axis=None, keepdims=False):
"""See `theano.tensor.max`."""
return theano.tensor.basic.max(self, axis, keepdims=keepdims)
def argmin(self, axis=None, keepdims=False):
"""See `theano.tensor.argmin`."""
return theano.tensor.basic.argmin(self, axis, keepdims=keepdims)
def argmax(self, axis=None, keepdims=False):
"""See `theano.tensor.argmax`."""
return theano.tensor.basic.argmax(self, axis, keepdims=keepdims)
def nonzero(self, return_matrix=False):
"""See `theano.tensor.nonzero`."""
return theano.tensor.basic.nonzero(self, return_matrix=return_matrix)
def nonzero_values(self):
"""See `theano.tensor.nonzero_values`."""
return theano.tensor.basic.nonzero_values(self)
def sort(self, axis=-1, kind='quicksort', order=None):
"""See `theano.tensor.sort`."""
return theano.tensor.sort(self, axis, kind, order)
def argsort(self, axis=-1, kind='quicksort', order=None):
"""See `theano.tensor.argsort`."""
return theano.tensor.argsort(self, axis, kind, order)
def clip(self, a_min, a_max):
"Clip (limit) the values in an array."
return theano.tensor.basic.clip(self, a_min, a_max)
def conj(self):
"""See `theano.tensor.conj`."""
return theano.tensor.basic.conj(self)
conjugate = conj
def repeat(self, repeats, axis=None):
"""See `theano.tensor.repeat`."""
return theano.tensor.extra_ops.repeat(self, repeats, axis)
def round(self, mode="half_away_from_zero"):
"""See `theano.tensor.round`."""
return theano.tensor.basic.round(self, mode)
def trace(self):
return theano.tensor.nlinalg.trace(self)
# TO TRUMP NUMPY OPERATORS
__array_priority__ = 1000
def get_scalar_constant_value(self):
return theano.tensor.basic.get_scalar_constant_value(self)
def zeros_like(model, dtype=None):
return theano.tensor.basic.zeros_like(model, dtype=dtype)
def cumsum(self, axis=None):
return theano.tensor.extra_ops.cumsum(self, axis)
def cumprod(self, axis=None):
return theano.tensor.extra_ops.cumprod(self, axis)
def searchsorted(self, v, side='left', sorter=None):
return theano.tensor.extra_ops.searchsorted(self, v, side, sorter)
def ptp(self, axis=None):
"""See 'theano.tensor.ptp'."""
return theano.tensor.ptp(self, axis)
def swapaxes(self, axis1, axis2):
"""
Return 'tensor.swapaxes(self, axis1, axis2).
If a matrix is provided with the right axes, its transpose
will be returned.
"""
return theano.tensor.basic.swapaxes(self, axis1, axis2)
def fill(self, value):
"""Fill inputted tensor with the assigned value."""
return theano.tensor.basic.fill(self, value)
def choose(self, a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose
from.
"""
return theano.tensor.basic.choose(self, a, choices, out=None,
mode='raise')
def squeeze(self):
"""
Remove broadcastable dimensions from the shape of an array.
It returns the input array, but with the broadcastable dimensions
removed. This is always `x` itself or a view into `x`.
"""
return theano.tensor.extra_ops.squeeze(self)
def compress(self, a, axis=None):
"""Return selected slices only."""
return theano.tensor.extra_ops.compress(self, a, axis=axis)
class TensorVariable(_tensor_py_operators, Variable):
"""
Subclass to add the tensor operators to the basic `Variable` class.
"""
def __init__(self, type, owner=None, index=None, name=None):
super(TensorVariable, self).__init__(type, owner=owner,
index=index, name=name)
if (config.warn_float64 != 'ignore' and type.dtype == 'float64'):
msg = ('You are creating a TensorVariable '
'with float64 dtype. You requested an action via '
'the Theano flag warn_float64={ignore,warn,raise,pdb}.')
if config.warn_float64 == "warn":
# Get the user stack. We don't want function inside the
# tensor and gof directory to be shown to the user.
x = tb.extract_stack()
nb_rm = 0
while x:
file_path = x[-1][0]
rm = False
for p in ["theano/tensor/", "theano\\tensor\\",
"theano/gof/", "theano\\tensor\\"]:
if p in file_path:
x = x[:-1]
nb_rm += 1
rm = True
break
if not rm:
break
warnings.warn(msg, stacklevel=1 + nb_rm)
elif config.warn_float64 == "raise":
raise Exception(msg)
elif config.warn_float64 == 'pdb':
import pdb
pdb.set_trace()
TensorType.Variable = TensorVariable
class TensorConstantSignature(tuple):
"""
A Signature object for comparing TensorConstant instances.
An instance is a pair: (Type instance, ndarray).
"""
def __eq__(self, other):
if type(self) != type(other):
return False
try:
(t0, d0), (t1, d1) = self, other
except Exception:
return False
# N.B. compare shape to ensure no broadcasting in ==
if t0 != t1 or d0.shape != d1.shape:
return False
self.no_nan # Ensure has_nan is computed.
# Note that in the comparisons below, the elementwise comparisons
# come last because they are the most expensive checks.
if self.has_nan:
other.no_nan # Ensure has_nan is computed.
return (other.has_nan and
self.sum == other.sum and
(self.no_nan.mask == other.no_nan.mask).all() and
# Note that the second test below (==) may crash e.g. for
# a single scalar NaN value, so we do not run it when all
# values are missing.
(self.no_nan.mask.all() or
(self.no_nan == other.no_nan).all()))
else:
# Simple case where we do not need to worry about NaN values.
# (note that if there are NaN values in d1, this will return
# False, which is why we do not bother with testing `other.has_nan`
# here).
return (self.sum == other.sum) and numpy.all(d0 == d1)
def __hash__(self):
t, d = self
return hashtype(self) ^ hash(t) ^ hash(d.shape) ^ hash(self.sum)
def theano_hash(self):
_, d = self
return hash_from_ndarray(d)
def _get_sum(self):
"""Compute sum of non NaN / Inf values in the array."""
try:
return self._sum
except AttributeError:
self._sum = self.no_nan.sum()
# The following 2 lines are needede as in Python 3.3 with NumPy
# 1.7.1, numpy.ndarray and numpy.memmap aren't hashable.
if type(self._sum) is numpy.memmap:
self._sum = numpy.asarray(self._sum).item()
if self.has_nan and self.no_nan.mask.all():
# In this case the sum is not properly computed by numpy.
self._sum = 0
if numpy.isinf(self._sum) or numpy.isnan(self._sum):
# NaN may happen when there are both -inf and +inf values.
if self.has_nan:
# Filter both NaN and Inf values.
mask = self.no_nan.mask + numpy.isinf(self[1])
else:
# Filter only Inf values.
mask = numpy.isinf(self[1])
if mask.all():
self._sum = 0
else:
self._sum = numpy.ma.masked_array(self[1], mask).sum()
# At this point there should be no more NaN.
assert not numpy.isnan(self._sum)
return self._sum
sum = property(_get_sum)
def _get_no_nan(self):
try:
return self._no_nan
except AttributeError:
nan_mask = numpy.isnan(self[1])
if nan_mask.any():
self._no_nan = numpy.ma.masked_array(self[1], nan_mask)
self.has_nan = True
else:
self._no_nan = self[1]
self.has_nan = False
return self._no_nan
no_nan = property(_get_no_nan)
class TensorConstant(_tensor_py_operators, Constant):
"""Subclass to add the tensor operators to the basic `Constant` class.
To create a TensorConstant, use the `constant` function in this module.
"""
def __init__(self, type, data, name=None):
Constant.__init__(self, type, data, name)
self.tag.unique_value = None
if isinstance(data, numpy.ndarray) and data.ndim > 0:
flat_data = data.ravel()
if flat_data.shape[0]:
if (flat_data == flat_data[0]).all():
self.tag.unique_value = flat_data[0]
def __str__(self):
if self.tag.unique_value is not None:
name = "%s of %s" % (str(self.data.shape),
str(self.tag.unique_value))
else:
name = "%s" % self.data
if len(name) > 20:
name = name[:10] + ".." + name[-10:]
return "TensorConstant{%s}" % name
def signature(self):
return TensorConstantSignature((self.type, self.data))
def equals(self, other):
# Override Contant.equals to allow to compare with
# numpy.ndarray, and python type.
if isinstance(other, (numpy.ndarray, int, float)):
# Make a TensorConstant to be able to compare
other = theano.tensor.basic.constant(other)
return (isinstance(other, TensorConstant) and
self.signature() == other.signature())
def __copy__(self):
# We need to do this to remove the cached attribute
return type(self)(self.type, self.data, self.name)
def __deepcopy__(self, memo):
# We need to do this to remove the cached attribute
return type(self)(copy.deepcopy(self.type, memo),
copy.deepcopy(self.data, memo),
copy.deepcopy(self.name, memo))
TensorType.Constant = TensorConstant
|
|
'''
MAP Client Plugin Step
'''
import os
from PySide import QtGui
from PySide import QtCore
from mapclient.mountpoints.workflowstep import WorkflowStepMountPoint
from mapclientplugins.pelvislandmarkshjcpredictionstep.configuredialog import ConfigureDialog
from mapclientplugins.pelvislandmarkshjcpredictionstep.hjcpredictionviewerwidget import MayaviHJCPredictionViewerWidget
from gias.musculoskeletal import pelvis_hjc_estimation as hjc
from gias.musculoskeletal import model_alignment as ma
import numpy as np
METHODS = ('Seidel', 'Bell', 'Tylkowski')
POP_CLASS = ('adults', 'men', 'women')
HIPLANDMARKS = ('LASIS', 'RASIS', 'LPSIS', 'RPSIS', 'PS')
class PelvisLandmarksHJCPredictionStep(WorkflowStepMountPoint):
'''
Skeleton step which is intended to be a helpful starting point
for new steps.
'''
def __init__(self, location):
super(PelvisLandmarksHJCPredictionStep, self).__init__('Pelvis Landmark HJC Prediction', location)
self._configured = False # A step cannot be executed until it has been configured.
self._category = 'Anthropometry'
# Add any other initialisation code here:
# Ports:
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'python#dict'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#provides',
'python#dict'))
self._config = {}
self._config['identifier'] = ''
self._config['Prediction Method'] = METHODS[0]
self._config['Population Class'] = POP_CLASS[0]
self._config['GUI'] = 'True'
for l in HIPLANDMARKS:
self._config[l] = l
self._landmarks = None
self._hipLandmarks = None
self._hipLandmarksAligned = None
def execute(self):
'''
Add your code here that will kick off the execution of the step.
Make sure you call the _doneExecution() method when finished. This method
may be connected up to a button in a widget for example.
'''
self._landmarks['HJC_left'] = np.array([0,0,0], dtype=float)
self._landmarks['HJC_right'] = np.array([0,0,0], dtype=float)
self._getHipLandmarks()
self._alignHipCS()
self._hipLandmarks['HJC_left'] = np.array([0,0,0], dtype=float)
self._hipLandmarks['HJC_right'] = np.array([0,0,0], dtype=float)
self._hipLandmarksAligned['HJC_left'] = np.array([0,0,0], dtype=float)
self._hipLandmarksAligned['HJC_right'] = np.array([0,0,0], dtype=float)
if self._config['GUI']:
print 'launching prediction gui'
self._widget = MayaviHJCPredictionViewerWidget(self._landmarks,
self._config,
self.predict,
METHODS,
POP_CLASS)
self._widget._ui.acceptButton.clicked.connect(self._doneExecution)
self._widget._ui.abortButton.clicked.connect(self._abort)
self._widget.setModal(True)
self._setCurrentWidget(self._widget)
else:
self.predict()
self._doneExecution()
def _abort(self):
raise RuntimeError('HJC Prediction Aborted')
def _getHipLandmarks(self):
self._hipLandmarks = {}
for l in HIPLANDMARKS:
lname = self._config[l]
try:
self._hipLandmarks[l] = self._landmarks[lname]
except KeyError:
raise RuntimeError, 'HJC prediction failed, missing landmark: '+lname
def _alignHipCS(self):
# align landmarks to hip CS
hipLandmarks = self._hipLandmarks.items()
landmarkNames = [l[0] for l in hipLandmarks]
landmarkCoords = np.array([l[1] for l in hipLandmarks])
landmarkCoordsAligned, alignT = ma.alignAnatomicPelvis(landmarkCoords,
self._hipLandmarks['LASIS'],
self._hipLandmarks['RASIS'],
self._hipLandmarks['LPSIS'],
self._hipLandmarks['RPSIS'],
returnT=True )
self._hipLandmarksAligned = dict(zip(landmarkNames,landmarkCoordsAligned))
self._inverseT = np.linalg.inv(np.vstack([alignT, [0,0,0,1]]))
def predict(self):
# run predictions methods
print 'predicting using %s (%s)'%(self._config['Prediction Method'],
self._config['Population Class'],
)
if self._config['Prediction Method']=='Seidel':
self._predict(('LASIS', 'RASIS', 'LPSIS', 'RPSIS', 'PS'), hjc.HJCSeidel)
elif self._config['Prediction Method']=='Tylkowski':
self._predict(('LASIS', 'RASIS'), hjc.HJCTylkowski)
elif self._config['Prediction Method']=='Bell':
self._predict(('LASIS', 'RASIS', 'PS'), hjc.HJCBell)
def _predict(self, reqLandmarks, predictor):
L = []
for l in reqLandmarks:
try:
L.append(self._hipLandmarksAligned[l])
except KeyError:
raise RuntimeError, 'HJC prediction failed, missing landmark: '+l
L.append(self._config['Population Class'])
predictions = np.array(predictor(*L)[:2])
self._hipLandmarksAligned['HJC_left'] = predictions[0]
self._hipLandmarksAligned['HJC_right'] = predictions[1]
self._hipLandmarks['HJC_left'],\
self._hipLandmarks['HJC_right'] = ma.transform3D.transformAffine(predictions, self._inverseT)
self._landmarks['HJC_left'],\
self._landmarks['HJC_right'] = ma.transform3D.transformAffine(predictions, self._inverseT)
# self._hipLandmarks['HJC_left'] = ma.transform3D.transformAffine( [predictions[0],], self._inverseT )
# self._hipLandmarks['HJC_right'] = ma.transform3D.transformAffine( [predictions[1],], self._inverseT )
# self._landmarks['HJC_left'] = ma.transform3D.transformAffine( [predictions[0],], self._inverseT )
# self._landmarks['HJC_right'] = ma.transform3D.transformAffine( [predictions[1],], self._inverseT )
def setPortData(self, index, dataIn):
'''
Add your code here that will set the appropriate objects for this step.
The index is the index of the port in the port list. If there is only one
uses port for this step then the index can be ignored.
'''
self._landmarks = dataIn # ju#landmarks
def getPortData(self, index):
'''
Add your code here that will return the appropriate objects for this step.
The index is the index of the port in the port list. If there is only one
provides port for this step then the index can be ignored.
'''
return self._landmarks # ju#landmarks
def configure(self):
'''
This function will be called when the configure icon on the step is
clicked. It is appropriate to display a configuration dialog at this
time. If the conditions for the configuration of this step are complete
then set:
self._configured = True
'''
dlg = ConfigureDialog(METHODS, POP_CLASS)
dlg.identifierOccursCount = self._identifierOccursCount
dlg.setConfig(self._config)
dlg.validate()
dlg.setModal(True)
if dlg.exec_():
self._config = dlg.getConfig()
self._configured = dlg.validate()
self._configuredObserver()
def getIdentifier(self):
'''
The identifier is a string that must be unique within a workflow.
'''
return self._config['identifier']
def setIdentifier(self, identifier):
'''
The framework will set the identifier for this step when it is loaded.
'''
self._config['identifier'] = identifier
def serialize(self, location):
'''
Add code to serialize this step to disk. The filename should
use the step identifier (received from getIdentifier()) to keep it
unique within the workflow. The suggested name for the file on
disk is:
filename = getIdentifier() + '.conf'
'''
configuration_file = os.path.join(location, self.getIdentifier() + '.conf')
conf = QtCore.QSettings(configuration_file, QtCore.QSettings.IniFormat)
conf.beginGroup('config')
conf.setValue('identifier', self._config['identifier'])
conf.setValue('Prediction Method', self._config['Prediction Method'])
conf.setValue('Population Class', self._config['Population Class'])
conf.setValue('LASIS', self._config['LASIS'])
conf.setValue('RASIS', self._config['RASIS'])
conf.setValue('LPSIS', self._config['LPSIS'])
conf.setValue('RPSIS', self._config['RPSIS'])
conf.setValue('PS', self._config['PS'])
if self._config['GUI']:
conf.setValue('GUI', 'True')
else:
conf.setValue('GUI', 'False')
conf.endGroup()
def deserialize(self, location):
'''
Add code to deserialize this step from disk. As with the serialize
method the filename should use the step identifier. Obviously the
filename used here should be the same as the one used by the
serialize method.
'''
configuration_file = os.path.join(location, self.getIdentifier() + '.conf')
conf = QtCore.QSettings(configuration_file, QtCore.QSettings.IniFormat)
conf.beginGroup('config')
self._config['identifier'] = conf.value('identifier', '')
self._config['Prediction Method'] = conf.value('Prediction Method', 'Seidel')
self._config['Population Class'] = conf.value('Population Class', 'adults')
self._config['LASIS'] = conf.value('LASIS', 'LASIS')
self._config['RASIS'] = conf.value('RASIS', 'RASIS')
self._config['LPSIS'] = conf.value('LPSIS', 'LPSIS')
self._config['RPSIS'] = conf.value('RPSIS', 'RPSIS')
self._config['PS'] = conf.value('PS', 'PS')
if conf.value('GUI')=='True':
self._config['GUI'] = True
elif conf.value('GUI')=='False':
self._config['GUI'] = False
conf.endGroup()
d = ConfigureDialog(METHODS, POP_CLASS)
d.identifierOccursCount = self._identifierOccursCount
d.setConfig(self._config)
self._configured = d.validate()
|
|
"""
Interactive phase plane plot for Euler equations with ideal gas,
Euler equations with Tammann equations of state and acoustic equations.
"""
import sys, os
import numpy as np
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from ipywidgets import widgets
from ipywidgets import interact
from IPython.display import display
def euler_phase_plane_plot():
"Return phase plane function ready to use with interact."
# Define hugoniot locus and intergal curves independently (needed for interact version)
def hugoniot_locus_1(p,ql,gamma):
rhol, ul, pl = ql
cl = np.sqrt(gamma*pl/rhol)
beta = (gamma+1.)/(gamma-1.)
return ul + 2*cl/np.sqrt(2*gamma*(gamma-1.)) * ((1-p/pl)/np.sqrt(1+beta*p/pl))
def hugoniot_locus_3(p,qr,gamma):
rhor, ur, pr = qr
cr = np.sqrt(gamma*pr/rhor)
beta = (gamma+1.)/(gamma-1.)
return ur - 2*cr/np.sqrt(2*gamma*(gamma-1.)) * ((1-p/pr)/np.sqrt(1+beta*p/pr))
def integral_curve_1(p,ql,gamma):
rhol, ul, pl = ql
cl = np.sqrt(gamma*pl/rhol)
return ul + 2*cl/(gamma-1.)*(1.-(p/pl)**((gamma-1.)/(2.*gamma)))
def integral_curve_3(p,qr,gamma):
rhor, ur, pr = qr
cr = np.sqrt(gamma*pr/rhor)
return ur - 2*cr/(gamma-1.)*(1.-(p/pr)**((gamma-1.)/(2.*gamma)))
def plot_function(rhol,ul,pl,rhor,ur,pr,gamma,
xmin,xmax,ymin,ymax,show_phys,show_unphys):
"Subfunction required for interactive (function of only interactive parameters)."
ql = [rhol, ul, pl]
qr = [rhor, ur, pr]
hugoloc1 = lambda p: hugoniot_locus_1(p,ql,gamma)
hugoloc3 = lambda p: hugoniot_locus_3(p,qr,gamma)
intcurv1 = lambda p: integral_curve_1(p,ql,gamma)
intcurv3 = lambda p: integral_curve_3(p,qr,gamma)
def phi_l(p):
"Check whether the 1-wave is a shock or rarefaction."
if p >= pl:
return hugoloc1(p)
else:
return intcurv1(p)
# Check whether the 3-wave is a shock or rarefaction
def phi_r(p):
if p >= pr:
return hugoloc3(p)
else:
return intcurv3(p)
phi = lambda p: phi_l(p)-phi_r(p)
# Use fsolve to find p_star such that Phi(p_star)=0
p0 = (ql[2] + qr[2])/2.0 # initial guess is the average of initial pressures
p_star, info, ier, msg = fsolve(phi, p0, full_output=True, xtol=1.e-14)
# For strong rarefactions, sometimes fsolve needs help
if ier != 1:
p_star, info, ier, msg = fsolve(phi, p0, full_output=True, factor=0.1, xtol=1.e-10)
# This should not happen:
if ier != 1:
print('Warning: fsolve did not converge.')
u_star = 0.5*(phi_l(p_star) + phi_r(p_star))
# Set plot bounds
fig, ax = plt.subplots(figsize=(12,4))
x = (ql[2], qr[2], p_star)
y = (ql[1], qr[1], u_star)
dx, dy = xmax - xmin, ymax - ymin
ax.set_xlim(min(0.00000001,xmin),xmax)
ax.set_ylim(ymin,ymax)
ax.set_xlabel('Pressure (p)', fontsize=15)
ax.set_ylabel('Velocity (u)', fontsize=15)
p = np.linspace(xmin,xmax,500)
p1_shk = p[p>=pl]
p1_rar = p[p<pl]
p3_shk = p[p>=pr]
p3_rar = p[p<pr]
if show_unphys:
# Plot unphysical solutions
ax.plot(p1_rar,hugoloc1(p1_rar),'--r')
ax.plot(p3_rar,hugoloc3(p3_rar),'--r')
ax.plot(p1_shk,intcurv1(p1_shk),'--b')
ax.plot(p3_shk,intcurv3(p3_shk),'--b')
if show_phys:
# Plot physical solutions
ax.plot(p1_shk,hugoloc1(p1_shk),'-r')
ax.plot(p3_shk,hugoloc3(p3_shk),'-r')
ax.plot(p1_rar,intcurv1(p1_rar),'-b')
ax.plot(p3_rar,intcurv3(p3_rar),'-b')
if (p_star <= xmax and u_star >ymin and u_star < ymax):
ax.plot(p_star, u_star, '-ok', markersize=10)
ax.text(x[2] + 0.025*dx,y[2] + 0.025*dy, '$q_m$', fontsize=15)
# Plot initial states and markers
ax.plot(ql[2], ql[1], '-ok', markersize=10)
ax.plot(qr[2], qr[1], '-ok', markersize=10)
for i,label in enumerate(('$q_l$', '$q_r$')):
ax.text(x[i] + 0.025*dx,y[i] + 0.025*dy,label, fontsize=15)
plt.show()
return plot_function
def euler_interactive_phase_plane(ql=(1.0, -3.0, 100.0),
qr=(1.0, 3.0, 100.0),
gamma=1.4):
"Create the GUI and output the interact app."
# Create plot function for interact
pp_plot = euler_phase_plane_plot()
# Declare all widget sliders
ql1_widget = widgets.FloatSlider(value=ql[0],min=0.01,max=100.0, description=r'$\rho_l$')
ql2_widget = widgets.FloatSlider(value=ql[1],min=-15,max=15.0, description='$u_l$')
ql3_widget = widgets.FloatSlider(value=ql[2],min=1,max=200.0, description='$p_l$')
qr1_widget = widgets.FloatSlider(value=qr[0],min=0.01,max=100.0, description=r'$\rho_r$')
qr2_widget = widgets.FloatSlider(value=qr[1],min=-15,max=15.0, description='$u_r$')
qr3_widget = widgets.FloatSlider(value=qr[2],min=1,max=200.0, description='$p_r$')
gamm_widget = widgets.FloatSlider(value=gamma,min=0.01,max=10.0, description='$\gamma$')
xmin_widget = widgets.BoundedFloatText(value=0.0000001, description='$p_{min}:$')
xmax_widget = widgets.FloatText(value=200, description='$p_{max}:$')
ymin_widget = widgets.FloatText(value=-15, description='$u_{min}:$')
ymax_widget = widgets.FloatText(value=15, description='$u_{max}:$')
show_physical = widgets.Checkbox(value=True, description='Physical solution')
show_unphysical = widgets.Checkbox(value=True, description='Unphysical solution')
# Additional control widgets not called by function
rhomax_widget = widgets.FloatText(value=100, description=r'$\rho_{max}$')
gammax_widget = widgets.FloatText(value=10, description='$\gamma_{max}$')
# Allow for dependent widgets to update
def update_xmin(*args):
ql3_widget.min = xmin_widget.value
qr3_widget.min = xmin_widget.value
def update_xmax(*args):
ql3_widget.max = xmax_widget.value
qr3_widget.max = xmax_widget.value
def update_ymin(*args):
ql2_widget.min = ymin_widget.value
qr2_widget.min = ymin_widget.value
def update_ymax(*args):
ql2_widget.max = ymax_widget.value
qr2_widget.max = ymax_widget.value
def update_rhomax(*args):
ql1_widget.max = rhomax_widget.value
qr1_widget.max = rhomax_widget.value
def update_gammax(*args):
gamm_widget.max = gammax_widget.value
xmin_widget.observe(update_xmin, 'value')
xmax_widget.observe(update_xmax, 'value')
ymin_widget.observe(update_ymin, 'value')
ymax_widget.observe(update_ymax, 'value')
rhomax_widget.observe(update_rhomax, 'value')
gammax_widget.observe(update_gammax, 'value')
# Organize slider widgets into boxes
qleftright = widgets.VBox([widgets.HBox([ql1_widget, ql2_widget, ql3_widget]),
widgets.HBox([qr1_widget, qr2_widget, qr3_widget]),
widgets.HBox([gamm_widget])])
plot_opts = widgets.VBox([widgets.HBox([show_physical, show_unphysical]),
widgets.HBox([xmin_widget, xmax_widget, rhomax_widget]),
widgets.HBox([ymin_widget, ymax_widget, gammax_widget])])
# Set up interactive GUI (tab style)
interact_gui = widgets.Tab(children=[qleftright, plot_opts])
interact_gui.set_title(0, 'Left and right states')
interact_gui.set_title(1, 'Plot options')
# Define interactive widget and run GUI
ppwidget = interact(pp_plot, rhol=ql1_widget, ul=ql2_widget, pl=ql3_widget,
rhor=qr1_widget, ur=qr2_widget, pr=qr3_widget,
gamma=gamm_widget,
xmin=xmin_widget, xmax=xmax_widget,
ymin=ymin_widget, ymax=ymax_widget,
show_phys=show_physical, show_unphys=show_unphysical)
try:
ppwidget.widget.close()
display(interact_gui)
display(ppwidget.widget.out)
except:
pass
def euler_tammann_phase_plane_plot():
"Return phase plane function ready to use with interact."
# Define hugoniot locus and integral curves independently (needed for interact version)
def hugoniot_locus_1(p,ql,params):
gammal, pinfl = params
rhol, ul, pl = ql
betal = (pl + pinfl)*(gammal - 1.0)/(gammal + 1.0)
alphal = 2.0/((gammal + 1.0)*rhol)
return ul - (p - pl)*np.sqrt(alphal/(p + pinfl + betal))
def hugoniot_locus_3(p,qr,params):
gammar, pinfr = params
rhor, ur, pr = qr
betar = (pr + pinfr)*(gammar - 1.0)/(gammar + 1.0)
alphar = 2.0/((gammar + 1.0)*rhor)
return ur + (p - pr)*np.sqrt(alphar/(p + pinfr + betar))
def integral_curve_1(p,ql,params):
gammal, pinfl = params
rhol, ul, pl = ql
cl = np.sqrt(gammal*(pl + pinfl)/rhol)
gl1 = gammal - 1.0
return ul + 2*cl/gl1*(1 - ((p + pinfl)/(pl+pinfl))**(gl1/(2.0*gammal)))
def integral_curve_3(p,qr,params):
gammar, pinfr = params
rhor, ur, pr = qr
cr = np.sqrt(gammar*(pr + pinfr)/rhor)
gr1 = gammar - 1.0
return ur - 2*cr/gr1*(1 - ((p + pinfr)/(pr + pinfr))**(gr1/(2.0*gammar)))
def plot_function(rhol,ul,pl,rhor,ur,pr,gammal,pinfl,gammar,pinfr,
xmin,xmax,ymin,ymax,show_phys,show_unphys):
"Subfunction required for interactive (function of only interactive parameters)."
ql = [rhol, ul, pl]
qr = [rhor, ur, pr]
paramsl = [gammal, pinfl]
paramsr = [gammar, pinfr]
hugoloc1 = lambda p: hugoniot_locus_1(p,ql,paramsl)
hugoloc3 = lambda p: hugoniot_locus_3(p,qr,paramsr)
intcurv1 = lambda p: integral_curve_1(p,ql,paramsl)
intcurv3 = lambda p: integral_curve_3(p,qr,paramsr)
def phi_l(p):
"Check whether the 1-wave is a shock or rarefaction."
if p >= pl:
return hugoloc1(p)
else:
return intcurv1(p)
def phi_r(p):
"Check whether the 3-wave is a shock or rarefaction."
if p >= pr:
return hugoloc3(p)
else:
return intcurv3(p)
phi = lambda p: phi_l(p)-phi_r(p)
# Use fsolve to find p_star such that Phi(p_star)=0
p0 = (ql[2] + qr[2])/2.0 # initial guess is the average of initial pressures
p_star, info, ier, msg = fsolve(phi, p0, full_output=True, xtol=1.e-14)
# For strong rarefactions, sometimes fsolve needs help
if ier != 1:
p_star, info, ier, msg = fsolve(phi, p0, full_output=True, factor=0.1, xtol=1.e-10)
# This should not happen:
if ier != 1:
print('Warning: fsolve did not converge.')
u_star = 0.5*(phi_l(p_star) + phi_r(p_star))
# Set plot bounds
fig, ax = plt.subplots(figsize=(12,4))
x = (ql[2], qr[2], p_star)
y = (ql[1], qr[1], u_star)
dx, dy = xmax - xmin, ymax - ymin
ax.set_xlim(min(0.00000001,xmin),xmax)
ax.set_ylim(ymin,ymax)
ax.set_xlabel('Pressure (p)', fontsize=15)
ax.set_ylabel('Velocity (u)', fontsize=15)
p = np.linspace(xmin,xmax,500)
p1_shk = p[p>=pl]
p1_rar = p[p<pl]
p3_shk = p[p>=pr]
p3_rar = p[p<pr]
# Plot unphysical solutions
if show_unphys:
ax.plot(p1_rar,hugoloc1(p1_rar),'--r')
ax.plot(p3_rar,hugoloc3(p3_rar),'--r')
ax.plot(p1_shk,intcurv1(p1_shk),'--b')
ax.plot(p3_shk,intcurv3(p3_shk),'--b')
# Plot physical solutions
if show_phys:
ax.plot(p1_shk,hugoloc1(p1_shk),'-r')
ax.plot(p3_shk,hugoloc3(p3_shk),'-r')
ax.plot(p1_rar,intcurv1(p1_rar),'-b')
ax.plot(p3_rar,intcurv3(p3_rar),'-b')
if (p_star <= xmax and u_star > ymin and u_star < ymax):
ax.plot(p_star, u_star, '-ok', markersize=10)
ax.text(x[2] + 0.025*dx,y[2] + 0.025*dy, '$q_m$', fontsize=15)
# Plot initial states and markers
ax.plot(ql[2], ql[1], '-ok', markersize=10)
ax.plot(qr[2], qr[1], '-ok', markersize=10)
for i,label in enumerate(('$q_l$', '$q_r$')):
ax.text(x[i] + 0.025*dx,y[i] + 0.025*dy,label, fontsize=15)
plt.show()
return plot_function
def euler_tammann_interactive_phase_plane(ql=(600.0, 10.0, 50000.0),
qr=(50.0, -10.0, 25000.0),
paramsl=(1.4, 0.0),
paramsr=(7.0, 100.0)):
"Create the GUI and output the interact app."
# Create plot function for interact
pp_plot = euler_tammann_phase_plane_plot()
# Declare all widget sliders
ql1_widget = widgets.FloatSlider(value=ql[0],min=0.01,max=1000.0, description=r'$\rho_l$')
ql2_widget = widgets.FloatSlider(value=ql[1],min=-15,max=15.0, description='$u_l$')
ql3_widget = widgets.FloatSlider(value=ql[2],min=1,max=200000.0, description='$p_l$')
qr1_widget = widgets.FloatSlider(value=qr[0],min=0.01,max=1000.0, description=r'$\rho_r$')
qr2_widget = widgets.FloatSlider(value=qr[1],min=-15,max=15.0, description='$u_r$')
qr3_widget = widgets.FloatSlider(value=qr[2],min=1,max=200000.0, description='$p_r$')
gamml_widget = widgets.FloatSlider(value=paramsl[0],min=0.01,max=10.0, description='$\gamma_l$')
gammr_widget = widgets.FloatSlider(value=paramsr[0],min=0.01,max=10.0, description='$\gamma_r$')
pinfl_widget = widgets.FloatSlider(value=paramsl[1],min=0.0,max=300000.0, description='$p_{\infty l}$')
pinfr_widget = widgets.FloatSlider(value=paramsr[1],min=0.0,max=300000.0, description='$p_{\infty r}$')
xmin_widget = widgets.BoundedFloatText(value=0.0000001, description='$p_{min}:$')
xmax_widget = widgets.FloatText(value=200000, description='$p_{max}:$')
ymin_widget = widgets.FloatText(value=-15, description='$u_{min}:$')
ymax_widget = widgets.FloatText(value=15, description='$u_{max}:$')
show_physical = widgets.Checkbox(value=True, description='Physical solution')
show_unphysical = widgets.Checkbox(value=True, description='Unphysical solution')
# Additional control widgets not called by function
rhomax_widget = widgets.FloatText(value=1000, description=r'$\rho_{max}$')
gammax_widget = widgets.FloatText(value=10, description='$\gamma_{max}$')
pinfmax_widget = widgets.FloatText(value=300000, description='$p_{\infty max}$')
# Allow for dependent widgets to update
def update_xmin(*args):
ql3_widget.min = xmin_widget.value
qr3_widget.min = xmin_widget.value
def update_xmax(*args):
ql3_widget.max = xmax_widget.value
qr3_widget.max = xmax_widget.value
def update_ymin(*args):
ql2_widget.min = ymin_widget.value
qr2_widget.min = ymin_widget.value
def update_ymax(*args):
ql2_widget.max = ymax_widget.value
qr2_widget.max = ymax_widget.value
def update_rhomax(*args):
ql1_widget.max = rhomax_widget.value
qr1_widget.max = rhomax_widget.value
def update_gammax(*args):
gamml_widget.max = gammax_widget.value
gammr_widget.max = gammax_widget.value
def update_pinfmax(*args):
pinfl_widget.max = pinfmax_widget.value
pinfr_widget.max = pinfmax_widget.value
xmin_widget.observe(update_xmin, 'value')
xmax_widget.observe(update_xmax, 'value')
ymin_widget.observe(update_ymin, 'value')
ymax_widget.observe(update_ymax, 'value')
rhomax_widget.observe(update_rhomax, 'value')
gammax_widget.observe(update_gammax, 'value')
pinfmax_widget.observe(update_pinfmax, 'value')
# Organize slider widgets into boxes
qleftright = widgets.VBox([widgets.HBox([ql1_widget, ql2_widget, ql3_widget]),
widgets.HBox([qr1_widget, qr2_widget, qr3_widget])])
params = widgets.HBox([widgets.VBox([gamml_widget, gammr_widget]),
widgets.VBox([pinfl_widget, pinfr_widget])])
plot_opts = widgets.HBox([widgets.VBox([show_physical, xmin_widget, ymin_widget]),
widgets.VBox([show_unphysical, xmax_widget, ymax_widget]),
widgets.VBox([rhomax_widget, gammax_widget, pinfmax_widget])])
# Set up interactive GUI (tab style)
interact_gui = widgets.Tab(children=[qleftright, params, plot_opts])
interact_gui.set_title(0, 'Left and right states')
interact_gui.set_title(1, 'Tammann EOS')
interact_gui.set_title(2, 'Plot options')
# Define interactive widget and run GUI
ppwidget = interact(pp_plot, rhol=ql1_widget, ul=ql2_widget, pl=ql3_widget,
rhor=qr1_widget, ur=qr2_widget, pr=qr3_widget,
gammal=gamml_widget, pinfl=pinfl_widget,
gammar=gammr_widget, pinfr=pinfr_widget,
xmin=xmin_widget, xmax=xmax_widget,
ymin=ymin_widget, ymax=ymax_widget,
show_phys=show_physical, show_unphys=show_unphysical)
ppwidget.widget.close()
display(interact_gui)
display(ppwidget.widget.out)
|
|
# Copyright (c) 2017, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Peer management.'''
import asyncio
import logging
import random
import socket
import ssl
import time
from collections import defaultdict, Counter
from functools import partial
from aiorpcx import ClientSession, RPCError, SOCKSProxy
from lib.peer import Peer
from lib.util import ConnectionLogger
PEER_GOOD, PEER_STALE, PEER_NEVER, PEER_BAD = range(4)
STALE_SECS = 24 * 3600
WAKEUP_SECS = 300
class PeerSession(ClientSession):
'''An outgoing session to a peer.'''
sessions = set()
def __init__(self, peer, peer_mgr, kind, host, port, **kwargs):
super().__init__(host, port, **kwargs)
self.peer = peer
self.peer_mgr = peer_mgr
self.kind = kind
self.timeout = 20 if self.peer.is_tor else 10
context = {'conn_id': f'{host}'}
self.logger = ConnectionLogger(self.logger, context)
def connection_made(self, transport):
super().connection_made(transport)
self.sessions.add(self)
# Update IP address if not Tor
if not self.peer.is_tor:
address = self.peer_address()
if address:
self.peer.ip_addr = address[0]
# Send server.version first
controller = self.peer_mgr.controller
self.send_request('server.version', controller.server_version_args(),
self.on_version, timeout=self.timeout)
def connection_lost(self, exc):
'''Handle an incoming client connection.'''
super().connection_lost(exc)
self.sessions.remove(self)
def _header_notification(self, header):
pass
def notification_handler(self, method):
# We subscribe so might be unlucky enough to get a notification...
if method == 'blockchain.headers.subscribe':
return self._header_notification
return None
def is_good(self, request, instance):
try:
result = request.result()
except asyncio.CancelledError:
return False
except asyncio.TimeoutError as e:
self.fail(request, str(e))
return False
except RPCError as error:
self.fail(request, f'{error.message} ({error.code})')
return False
if isinstance(result, instance):
return True
self.fail(request, f'{request} returned bad result type '
f'{type(result).__name__}')
return False
def fail(self, request, reason):
self.logger.error(f'{request} failed: {reason}')
self.peer_mgr.set_verification_status(self.peer, self.kind, False)
self.close()
def bad(self, reason):
self.logger.error(f'marking bad: {reason}')
self.peer.mark_bad()
self.peer_mgr.set_verification_status(self.peer, self.kind, False)
self.close()
def on_version(self, request):
'''Handle the response to the version message.'''
if not self.is_good(request, (list, str)):
return
result = request.result()
if isinstance(result, str):
version = result
else:
# Protocol version 1.1 returns a pair with the version first
if len(result) < 2 or not isinstance(result[0], str):
self.fail(request, 'result array bad format')
return
version = result[0]
self.peer.server_version = version
self.peer.features['server_version'] = version
for method, on_done in [
('blockchain.headers.subscribe', self.on_height),
('server.features', self.on_features),
('server.peers.subscribe', self.on_peers_subscribe),
]:
self.send_request(method, on_done=on_done, timeout=self.timeout)
def on_features(self, request):
if not self.is_good(request, dict):
return
features = request.result()
hosts = [host.lower() for host in features.get('hosts', {})]
our_hash = self.peer_mgr.env.coin.GENESIS_HASH
if our_hash != features.get('genesis_hash'):
self.bad('incorrect genesis hash')
elif self.peer.host.lower() in hosts:
self.peer.update_features(features)
self.maybe_close()
else:
self.bad('ignoring - not listed in host list {}'.format(hosts))
def on_height(self, request):
'''Handle the response to blockchain.headers.subscribe message.'''
if not self.is_good(request, dict):
return
result = request.result()
controller = self.peer_mgr.controller
our_height = controller.bp.db_height
their_height = result.get('block_height')
if not isinstance(their_height, int):
self.bad('invalid height {}'.format(their_height))
return
if abs(our_height - their_height) > 5:
self.bad('bad height {:,d} (ours: {:,d})'
.format(their_height, our_height))
return
# Check prior header too in case of hard fork.
check_height = min(our_height, their_height)
expected_header = controller.electrum_header(check_height)
self.send_request('blockchain.block.get_header', [check_height],
partial(self.on_header, expected_header),
timeout=self.timeout)
def on_header(self, expected_header, request):
'''Handle the response to blockchain.block.get_header message.
Compare hashes of prior header in attempt to determine if forked.'''
if not self.is_good(request, dict):
return
result = request.result()
theirs = result.get('prev_block_hash')
ours = expected_header.get('prev_block_hash')
if ours == theirs:
self.maybe_close()
else:
self.bad('our header hash {} and theirs {} differ'
.format(ours, theirs))
def on_peers_subscribe(self, request):
'''Handle the response to the peers.subcribe message.'''
if not self.is_good(request, list):
return
# Check the peers list we got from a remote peer.
# Each is expected to be of the form:
# [ip_addr, hostname, ['v1.0', 't51001', 's51002']]
# Call add_peer if the remote doesn't appear to know about us.
raw_peers = request.result()
try:
real_names = [' '.join([u[1]] + u[2]) for u in raw_peers]
peers = [Peer.from_real_name(real_name, str(self.peer))
for real_name in real_names]
except Exception:
self.bad('bad server.peers.subscribe response')
return
features = self.peer_mgr.features_to_register(self.peer, peers)
if features:
self.logger.info(f'registering ourself with "server.add_peer"')
self.send_request('server.add_peer', [features],
self.on_add_peer, timeout=self.timeout)
else:
self.maybe_close()
def on_add_peer(self, request):
'''We got a response the add_peer message. Don't care about its
form.'''
self.maybe_close()
def maybe_close(self):
'''Close the connection if no requests are outstanding, and mark peer
as good.
'''
if not self.all_requests():
self.close()
self.peer_mgr.set_verification_status(self.peer, self.kind, True)
class PeerManager(object):
'''Looks after the DB of peer network servers.
Attempts to maintain a connection with up to 8 peers.
Issues a 'peers.subscribe' RPC to them and tells them our data.
'''
def __init__(self, env, controller):
self.logger = logging.getLogger(self.__class__.__name__)
# Initialise the Peer class
Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS
self.env = env
self.controller = controller
self.loop = controller.loop
# Our clearnet and Tor Peers, if any
self.myselves = [Peer(ident.host, controller.server_features(), 'env')
for ident in env.identities]
self.retry_event = asyncio.Event()
# Peers have one entry per hostname. Once connected, the
# ip_addr property is either None, an onion peer, or the
# IP address that was connected to. Adding a peer will evict
# any other peers with the same host name or IP address.
self.peers = set()
self.permit_onion_peer_time = time.time()
self.proxy = None
self.last_proxy_try = 0
def my_clearnet_peer(self):
'''Returns the clearnet peer representing this server, if any.'''
clearnet = [peer for peer in self.myselves if not peer.is_tor]
return clearnet[0] if clearnet else None
def info(self):
'''The number of peers.'''
self.set_peer_statuses()
counter = Counter(peer.status for peer in self.peers)
return {
'bad': counter[PEER_BAD],
'good': counter[PEER_GOOD],
'never': counter[PEER_NEVER],
'stale': counter[PEER_STALE],
'total': len(self.peers),
}
def set_peer_statuses(self):
'''Set peer statuses.'''
cutoff = time.time() - STALE_SECS
for peer in self.peers:
if peer.bad:
peer.status = PEER_BAD
elif peer.last_good > cutoff:
peer.status = PEER_GOOD
elif peer.last_good:
peer.status = PEER_STALE
else:
peer.status = PEER_NEVER
def rpc_data(self):
'''Peer data for the peers RPC method.'''
self.set_peer_statuses()
descs = ['good', 'stale', 'never', 'bad']
def peer_data(peer):
data = peer.serialize()
data['status'] = descs[peer.status]
return data
def peer_key(peer):
return (peer.bad, -peer.last_good)
return [peer_data(peer) for peer in sorted(self.peers, key=peer_key)]
def features_to_register(self, peer, remote_peers):
'''If we should register ourselves to the remote peer, which has
reported the given list of known peers, return the clearnet
identity features to register, otherwise None.
'''
self.add_peers(remote_peers)
# Announce ourself if not present. Don't if disabled, we
# are a non-public IP address, or to ourselves.
if not self.env.peer_announce or peer in self.myselves:
return None
my = self.my_clearnet_peer()
if not my or not my.is_public:
return None
# Register if no matches, or ports have changed
for peer in my.matches(remote_peers):
if peer.tcp_port == my.tcp_port and peer.ssl_port == my.ssl_port:
return None
return my.features
def add_peers(self, peers, limit=2, check_ports=False, source=None):
'''Add a limited number of peers that are not already present.'''
retry = False
new_peers = []
for peer in peers:
if not peer.is_public:
continue
matches = peer.matches(self.peers)
if not matches:
new_peers.append(peer)
elif check_ports:
for match in matches:
if match.check_ports(peer):
self.logger.info('ports changed for {}'.format(peer))
retry = True
if new_peers:
retry = True
source = source or new_peers[0].source
if limit:
random.shuffle(new_peers)
use_peers = new_peers[:limit]
else:
use_peers = new_peers
for n, peer in enumerate(use_peers):
self.logger.info('accepted new peer {:d}/{:d} {} from {} '
.format(n + 1, len(use_peers), peer, source))
self.peers.update(use_peers)
if retry:
self.retry_event.set()
def permit_new_onion_peer(self):
'''Accept a new onion peer only once per random time interval.'''
now = time.time()
if now < self.permit_onion_peer_time:
return False
self.permit_onion_peer_time = now + random.randrange(0, 1200)
return True
async def on_add_peer(self, features, source_info):
'''Add a peer (but only if the peer resolves to the source).'''
if not source_info:
self.logger.info('ignored add_peer request: no source info')
return False
source = source_info[0]
peers = Peer.peers_from_features(features, source)
if not peers:
self.logger.info('ignored add_peer request: no peers given')
return False
# Just look at the first peer, require it
peer = peers[0]
host = peer.host
if peer.is_tor:
permit = self.permit_new_onion_peer()
reason = 'rate limiting'
else:
try:
infos = await self.loop.getaddrinfo(host, 80,
type=socket.SOCK_STREAM)
except socket.gaierror:
permit = False
reason = 'address resolution failure'
else:
permit = any(source == info[-1][0] for info in infos)
reason = 'source-destination mismatch'
if permit:
self.logger.info('accepted add_peer request from {} for {}'
.format(source, host))
self.add_peers([peer], check_ports=True)
else:
self.logger.warning('rejected add_peer request from {} for {} ({})'
.format(source, host, reason))
return permit
def on_peers_subscribe(self, is_tor):
'''Returns the server peers as a list of (ip, host, details) tuples.
We return all peers we've connected to in the last day.
Additionally, if we don't have onion routing, we return a few
hard-coded onion servers.
'''
cutoff = time.time() - STALE_SECS
recent = [peer for peer in self.peers
if peer.last_good > cutoff and
not peer.bad and peer.is_public]
onion_peers = []
# Always report ourselves if valid (even if not public)
peers = set(myself for myself in self.myselves
if myself.last_good > cutoff)
# Bucket the clearnet peers and select up to two from each
buckets = defaultdict(list)
for peer in recent:
if peer.is_tor:
onion_peers.append(peer)
else:
buckets[peer.bucket()].append(peer)
for bucket_peers in buckets.values():
random.shuffle(bucket_peers)
peers.update(bucket_peers[:2])
# Add up to 20% onion peers (but up to 10 is OK anyway)
random.shuffle(onion_peers)
max_onion = 50 if is_tor else max(10, len(peers) // 4)
peers.update(onion_peers[:max_onion])
return [peer.to_tuple() for peer in peers]
def import_peers(self):
'''Import hard-coded peers from a file or the coin defaults.'''
self.add_peers(self.myselves)
# Add the hard-coded ones unless only returning self
if self.env.peer_discovery != self.env.PD_SELF:
coin_peers = self.env.coin.PEERS
peers = [Peer.from_real_name(real_name, 'coins.py')
for real_name in coin_peers]
self.add_peers(peers, limit=None)
async def maybe_detect_proxy(self):
'''Detect a proxy if we don't have one and some time has passed since
the last attempt.
If found self.proxy is set to a SOCKSProxy instance, otherwise
None.
'''
if self.proxy or time.time() - self.last_proxy_try < 900:
return
self.last_proxy_try = time.time()
host = self.env.tor_proxy_host
if self.env.tor_proxy_port is None:
ports = [9050, 9150, 1080]
else:
ports = [self.env.tor_proxy_port]
self.logger.info(f'trying to detect proxy on "{host}" ports {ports}')
cls = SOCKSProxy
result = await cls.auto_detect_host(host, ports, None, loop=self.loop)
if isinstance(result, cls):
self.proxy = result
self.logger.info(f'detected {self.proxy}')
def proxy_peername(self):
'''Return the peername of the proxy, if there is a proxy, otherwise
None.'''
return self.proxy.peername if self.proxy else None
async def main_loop(self):
'''Main loop performing peer maintenance. This includes
1) Forgetting unreachable peers.
2) Verifying connectivity of new peers.
3) Retrying old peers at regular intervals.
'''
if self.env.peer_discovery != self.env.PD_ON:
self.logger.info('peer discovery is disabled')
return
self.logger.info('beginning peer discovery. Force use of proxy: {}'
.format(self.env.force_proxy))
self.import_peers()
await self.maybe_detect_proxy()
try:
while True:
timeout = self.loop.call_later(WAKEUP_SECS,
self.retry_event.set)
await self.retry_event.wait()
self.retry_event.clear()
timeout.cancel()
await self.retry_peers()
finally:
for session in list(PeerSession.sessions):
session.abort()
await session.wait_closed()
def is_coin_onion_peer(self, peer):
'''Return true if this peer is a hard-coded onion peer.'''
return peer.is_tor and any(peer.host in real_name
for real_name in self.env.coin.PEERS)
async def retry_peers(self):
'''Retry peers that are close to getting stale.'''
# Exponential backoff of retries
now = time.time()
nearly_stale_time = (now - STALE_SECS) + WAKEUP_SECS * 2
def should_retry(peer):
# Retry a peer whose ports might have updated
if peer.other_port_pairs:
return True
# Retry a good connection if it is about to turn stale
if peer.try_count == 0:
return peer.last_good < nearly_stale_time
# Retry a failed connection if enough time has passed
return peer.last_try < now - WAKEUP_SECS * 2 ** peer.try_count
peers = [peer for peer in self.peers if should_retry(peer)]
if self.env.force_proxy or any(peer.is_tor for peer in peers):
await self.maybe_detect_proxy()
for peer in peers:
peer.try_count += 1
pairs = peer.connection_port_pairs()
if peer.bad or not pairs:
self.maybe_forget_peer(peer)
else:
self.retry_peer(peer, pairs)
def retry_peer(self, peer, port_pairs):
peer.last_try = time.time()
kwargs = {'loop': self.loop}
kind, port = port_pairs[0]
if kind == 'SSL':
kwargs['ssl'] = ssl.SSLContext(ssl.PROTOCOL_TLS)
host = self.env.cs_host(for_rpc=False)
if isinstance(host, list):
host = host[0]
if self.env.force_proxy or peer.is_tor:
if not self.proxy:
return
kwargs['proxy'] = self.proxy
kwargs['resolve'] = not peer.is_tor
elif host:
# Use our listening Host/IP for outgoing non-proxy
# connections so our peers see the correct source.
kwargs['local_addr'] = (host, None)
session = PeerSession(peer, self, kind, peer.host, port, **kwargs)
callback = partial(self.on_connected, peer, port_pairs)
self.controller.create_task(session.create_connection(), callback)
def on_connected(self, peer, port_pairs, task):
'''Called when a connection attempt succeeds or fails.
If failed, close the session, log it and try remaining port pairs.
'''
if not task.cancelled() and task.exception():
kind, port = port_pairs.pop(0)
elapsed = time.time() - peer.last_try
self.logger.info(f'failed connecting to {peer} at {kind} port '
f'{port} in {elapsed:.1f}s: {task.exception()}')
if port_pairs:
self.retry_peer(peer, port_pairs)
else:
self.maybe_forget_peer(peer)
def set_verification_status(self, peer, kind, good):
'''Called when a verification succeeded or failed.'''
now = time.time()
if self.env.force_proxy or peer.is_tor:
how = 'via {} over Tor'.format(kind)
else:
how = 'via {} at {}'.format(kind, peer.ip_addr)
status = 'verified' if good else 'failed to verify'
elapsed = now - peer.last_try
self.logger.info(f'{status} {peer} {how} in {elapsed:.1f}s')
if good:
peer.try_count = 0
peer.last_good = now
peer.source = 'peer'
# At most 2 matches if we're a host name, potentially several if
# we're an IP address (several instances can share a NAT).
matches = peer.matches(self.peers)
for match in matches:
if match.ip_address:
if len(matches) > 1:
self.peers.remove(match)
elif peer.host in match.features['hosts']:
match.update_features_from_peer(peer)
else:
self.maybe_forget_peer(peer)
def maybe_forget_peer(self, peer):
'''Forget the peer if appropriate, e.g. long-term unreachable.'''
if peer.last_good and not peer.bad:
try_limit = 10
else:
try_limit = 3
forget = peer.try_count >= try_limit
if forget:
desc = 'bad' if peer.bad else 'unreachable'
self.logger.info('forgetting {} peer: {}'.format(desc, peer))
self.peers.discard(peer)
return forget
|
|
from unicodedata import category
from django.core.management.base import BaseCommand, CommandError
from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption
from django.core import serializers
from optparse import make_option
import uuid
import os
import sys
from django.conf import settings
from django.core.management.color import color_style
import json
def add_category(category,categories):
categories.add(category.pk)
if category.parent:
add_category(category.parent,categories)
def ensure_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def choice(choices,question="Your choice: "):
choice = None
while 1:
if not choice:
input_msg = ""
for i in range(0,len(choices)):
input_msg += "["+str(i)+"] "+str(choices[i])+"\n"
choice_number = input(input_msg + question)
try:
choice = choices[int(choice_number)]
return choice
except:
sys.stderr.write("Error: Wrong choice.\n")
def serialize_widget(aw):
data = json.loads(serializers.serialize("json",[aw,]))[0]
if 'pk' in data:
data.pop('pk')
if 'user' in data['fields']:
data['fields'].pop('user')
if not data['fields']['category'] is None:
data['fields']['category'] = aw.category.uid
input_data = json.loads(serializers.serialize("json",aw.inputs.all().order_by('uid')))
for i in input_data:
if 'pk' in i:
i.pop('pk')
i['fields']['widget']=aw.uid
output_data = json.loads(serializers.serialize("json",aw.outputs.all().order_by('uid')))
for i in output_data:
if 'pk' in i:
i.pop('pk')
i['fields']['widget']=aw.uid
options_data = json.loads(serializers.serialize("json",AbstractOption.objects.filter(abstract_input__widget=aw).order_by('uid')))
for o in options_data:
if 'pk' in o:
o.pop('pk')
o['fields']['abstract_input']=AbstractInput.objects.get(id=o['fields']['abstract_input']).uid
return [data,]+input_data+output_data+options_data
def serialize_category(c):
data = json.loads(serializers.serialize("json",[c,]))[0]
if 'pk' in data:
data.pop('pk')
if not data['fields']['parent'] is None:
c2 = Category.objects.get(id=data['fields']['parent'])
data['fields']['parent'] = c2.uid
if 'workflow' in data['fields']:
data['fields'].pop('workflow')
if 'user' in data['fields']:
data['fields'].pop('user')
return data
def export_package(package_name,writer,dest_folder=None):
style = color_style()
external = package_name in settings.INSTALLED_APPS_EXTERNAL_PACKAGES
if external and not dest_folder:
raise CommandError("You must provide a destination folder when exporting external packages.")
if not external and dest_folder:
raise CommandError("You can't use a custom destination folder when exporting local packages.")
if 'workflows.'+package_name not in settings.INSTALLED_APPS and not external:
raise CommandError("Package not found in INSTALLED_APPS.")
#here we check the integrity of the package
aws = AbstractWidget.objects.filter(package=package_name)
for aw in aws:
if aw.uid:
for bw in aws:
if bw.uid == aw.uid and bw.id != aw.id:
writer.write("Found two widgets with the same UID. Please select a widget to assign new UID to.\n")
selected_widget = choice([aw,bw],"Select a widget: ")
selected_widget.set_uid(commit=True)
#first we check if package_data directory exists and make it if it doesn't
if external:
package_directory = os.path.join(dest_folder,'package_data')
else:
package_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../../'+package_name+"/package_data/")
ensure_dir(package_directory)
widgets_directory = os.path.join(package_directory,"widgets")
deprecated_widgets_directory = os.path.join(package_directory,"deprecated_widgets")
ensure_dir(widgets_directory)
categories_directory = os.path.join(package_directory,"categories")
ensure_dir(categories_directory)
writer.write(" > Ensuring package directory for "+package_name+".\n")
categories = set()
writer.write(" > Exporting widgets\n")
global_change = False
for aw in aws:
aw.update_uid()
if os.path.isfile(os.path.join(deprecated_widgets_directory,aw.uid+'.json')):
writer.write(style.ERROR(" - Deprecated widget "+str(aw)+" found! Please import package to remove it. This widget has NOT been exported.\n"))
continue
add_category(aw.category,categories)
serialized_widget = serialize_widget(aw)
created = True
change = True
try:
widget_file = open(os.path.join(widgets_directory,aw.uid+'.json'),'r')
created = False
w_data = json.loads(widget_file.read())
widget_file.close()
if w_data == serialized_widget:
change = False
except:
created = True
change = True
if change:
global_change = True
if created:
writer.write(" + Exporting widget "+str(aw)+"\n")
else:
writer.write(" + Updating widget "+str(aw)+"\n")
widget_data = json.dumps(serialized_widget,indent=2)
widget_file = open(os.path.join(widgets_directory,aw.uid+'.json'),'w')
widget_file.write(widget_data)
widget_file.close()
if not global_change:
writer.write(" No changes in the widgets detected!\n")
writer.write(" > Exporting categories\n")
global_change = False
for category in categories:
c = Category.objects.get(id=category)
c.update_uid()
data = serialize_category(c)
created = True
change = True
try:
category_file = open(os.path.join(categories_directory,c.uid+'.json'),'r')
created = False
c_data = json.loads(category_file.read())
category_file.close()
if c_data == data:
change = False
except:
created = True
change = True
if change:
global_change = True
if created:
writer.write(" + Exporting category "+str(c)+"\n")
else:
writer.write(" + Updating category "+str(c)+"\n")
category_data = json.dumps(data,indent=2)
category_file = open(os.path.join(categories_directory,c.uid+'.json'),'w')
category_file.write(category_data)
category_file.close()
if not global_change:
writer.write(" No changes in the categories detected!\n")
class Command(BaseCommand):
help = 'Exports the package "package_name".'
def add_arguments(self, parser):
parser.add_argument('package_name', type=str)
parser.add_argument('external_destination_folder', type=str)
def handle(self, *args, **options):
package_name = options.get('package_name')
if not package_name:
raise CommandError('Argument "package_name" is required.')
dest_folder = options.get('external_destination_folder')
writer = self.stdout
export_package(package_name,writer,dest_folder=dest_folder)
writer.write('Thanks for using the new export command. You rock.\n')
|
|
# Copyright (c) 2010 Cloud.com, Inc
# Copyright (c) 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A Hyper-V Nova Compute driver.
"""
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import hostops
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import migrationops
from nova.virt.hyperv import rdpconsoleops
from nova.virt.hyperv import snapshotops
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
class HyperVDriver(driver.ComputeDriver):
def __init__(self, virtapi):
super(HyperVDriver, self).__init__(virtapi)
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._snapshotops = snapshotops.SnapshotOps()
self._livemigrationops = livemigrationops.LiveMigrationOps()
self._migrationops = migrationops.MigrationOps()
self._rdpconsoleops = rdpconsoleops.RDPConsoleOps()
def init_host(self, host):
self._vmops.restart_vm_log_writers()
def list_instance_uuids(self):
return self._vmops.list_instance_uuids()
def list_instances(self):
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def get_info(self, instance):
return self._vmops.get_info(instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
return self._volumeops.attach_volume(connection_info,
instance['name'])
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
return self._volumeops.detach_volume(connection_info,
instance['name'])
def get_volume_connector(self, instance):
return self._volumeops.get_volume_connector(instance)
def get_available_resource(self, nodename):
return self._hostops.get_available_resource()
def get_host_stats(self, refresh=False):
return self._hostops.get_host_stats(refresh)
def host_power_action(self, host, action):
return self._hostops.host_power_action(host, action)
def snapshot(self, context, instance, image_id, update_task_state):
self._snapshotops.snapshot(context, instance, image_id,
update_task_state)
def pause(self, instance):
self._vmops.pause(instance)
def unpause(self, instance):
self._vmops.unpause(instance)
def suspend(self, context, instance):
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
self._vmops.power_off(instance, timeout, retry_interval)
def power_on(self, context, instance, network_info,
block_device_info=None):
self._vmops.power_on(instance, block_device_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""Resume guest state when a host is booted."""
self._vmops.resume_state_on_host_boot(context, instance, network_info,
block_device_info)
def live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
self._livemigrationops.live_migration(context, instance, dest,
post_method, recover_method,
block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
self.destroy(context, instance, network_info, block_device_info)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
self._livemigrationops.pre_live_migration(context, instance,
block_device_info,
network_info)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
self._livemigrationops.post_live_migration_at_destination(
context,
instance,
network_info,
block_migration)
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return self._livemigrationops.check_can_live_migrate_destination(
context, instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
self._livemigrationops.check_can_live_migrate_destination_cleanup(
context, dest_check_data)
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
return self._livemigrationops.check_can_live_migrate_source(
context, instance, dest_check_data)
def get_instance_disk_info(self, instance_name, block_device_info=None):
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
msg = _("VIF plugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
msg = _("VIF unplugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def ensure_filtering_rules_for_instance(self, instance, network_info):
LOG.debug("ensure_filtering_rules_for_instance called",
instance=instance)
def unfilter_instance(self, instance, network_info):
LOG.debug("unfilter_instance called", instance=instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
return self._migrationops.migrate_disk_and_power_off(context,
instance, dest,
flavor,
network_info,
block_device_info,
timeout,
retry_interval)
def confirm_migration(self, migration, instance, network_info):
self._migrationops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
self._migrationops.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
self._migrationops.finish_migration(context, migration, instance,
disk_info, network_info,
image_meta, resize_instance,
block_device_info, power_on)
def get_host_ip_addr(self):
return self._hostops.get_host_ip_addr()
def get_host_uptime(self, host):
return self._hostops.get_host_uptime()
def get_rdp_console(self, context, instance):
return self._rdpconsoleops.get_rdp_console(instance)
def get_console_output(self, context, instance):
return self._vmops.get_console_output(instance)
|
|
"""Unit test for monitor.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import io
import json
import os
import shutil
import tempfile
import unittest
import mock
import treadmill
from treadmill import fs
from treadmill import monitor
from treadmill import supervisor
from treadmill import utils
class MonitorTest(unittest.TestCase):
"""Mock test for treadmill.monitor.MonitorContainerDown.
"""
def setUp(self):
self.root = tempfile.mkdtemp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('treadmill.dirwatch.DirWatcher', mock.Mock())
@mock.patch('treadmill.plugin_manager.load', mock.Mock())
def test_configure(self):
"""Test monitor run loop.
"""
# Disable W0212(protected-access)
# pylint: disable=W0212
config_dir = os.path.join(self.root, 'config')
watch_dir1 = os.path.join(self.root, 'watch', '1')
watch_dir2 = os.path.join(self.root, 'watch', '2')
fs.mkdir_safe(config_dir)
fs.mkdir_safe(watch_dir1)
fs.mkdir_safe(watch_dir2)
with io.open(os.path.join(config_dir, 'default'), 'w') as f:
f.writelines([
'{};plugin1\n'.format(watch_dir1),
'{};plugin2;{{"key": "value"}}\n'.format(watch_dir2)
])
impl1 = mock.Mock()
# W0613(unused-argument)
def _handler1(tm_env, params): # pylint: disable=W0613
return impl1
impl2 = mock.Mock()
# W0613(unused-argument)
def _handler2(tm_env, params): # pylint: disable=W0613
return impl2
treadmill.plugin_manager.load.side_effect = [_handler1, _handler2]
mock_dirwatch = mock.Mock()
treadmill.dirwatch.DirWatcher.return_value = mock_dirwatch
mock_dirwatch.wait_for_events.side_effect = [
StopIteration()
]
mon = monitor.Monitor(
tm_env={},
config_dir=config_dir
)
self.assertRaises(StopIteration, mon.run)
treadmill.plugin_manager.load.assert_has_calls([
mock.call('treadmill.tombstones', 'plugin1'),
mock.call('treadmill.tombstones', 'plugin2'),
], any_order=True)
mock_dirwatch.add_dir.assert_has_calls([
mock.call(watch_dir1),
mock.call(watch_dir2),
], any_order=True)
@mock.patch('treadmill.dirwatch.DirWatcher', mock.Mock())
@mock.patch('treadmill.plugin_manager.load', mock.Mock())
def test_configure_restart(self):
"""Test monitor run loop.
"""
# Disable W0212(protected-access)
# pylint: disable=W0212
config_dir = os.path.join(self.root, 'config')
watch_dir1 = os.path.join(self.root, 'watch', '1')
fs.mkdir_safe(config_dir)
fs.mkdir_safe(watch_dir1)
event_file = os.path.join(watch_dir1, 'test,12345.123,1,0')
utils.touch(event_file)
with io.open(os.path.join(config_dir, 'default'), 'w') as f:
f.writelines([
'{};plugin1\n'.format(watch_dir1)
])
impl1 = mock.Mock()
impl1.execute.return_value = True
# W0613(unused-argument)
def _handler1(tm_env, params): # pylint: disable=W0613
return impl1
treadmill.plugin_manager.load.side_effect = [_handler1]
mock_dirwatch = mock.Mock()
treadmill.dirwatch.DirWatcher.return_value = mock_dirwatch
mock_dirwatch.wait_for_events.side_effect = [
False, StopIteration()
]
mon = monitor.Monitor(
tm_env={},
config_dir=config_dir
)
self.assertRaises(StopIteration, mon.run)
impl1.execute.assert_called_with({
'return_code': 1,
'id': 'test',
'signal': 0,
'timestamp': 12345.123,
})
self.assertFalse(os.path.exists(event_file))
@mock.patch('treadmill.dirwatch.DirWatcher', mock.Mock())
@mock.patch('treadmill.plugin_manager.load', mock.Mock())
def test_run(self):
"""Test monitor run loop.
"""
# Disable W0212(protected-access)
# pylint: disable=W0212
config_dir = os.path.join(self.root, 'config')
watch_dir1 = os.path.join(self.root, 'watch', '1')
fs.mkdir_safe(config_dir)
fs.mkdir_safe(watch_dir1)
event_file = os.path.join(watch_dir1, 'test2,12345.123,256,9')
with io.open(os.path.join(config_dir, 'default'), 'w') as f:
f.writelines([
'{};plugin1\n'.format(watch_dir1)
])
impl1 = mock.Mock()
impl1.execute.return_value = False
# W0613(unused-argument)
def _handler1(tm_env, params): # pylint: disable=W0613
return impl1
treadmill.plugin_manager.load.side_effect = [_handler1]
def _side_effect():
utils.touch(event_file)
return True
mock_dirwatch = mock.Mock()
treadmill.dirwatch.DirWatcher.return_value = mock_dirwatch
mock_dirwatch.wait_for_events.side_effect = [
_side_effect(), StopIteration()
]
mon = monitor.Monitor(
tm_env={},
config_dir=config_dir
)
self.assertRaises(StopIteration, mon.run)
impl1.execute.assert_called_with({
'return_code': 256,
'id': 'test2',
'signal': 9,
'timestamp': 12345.123,
})
self.assertTrue(os.path.exists(event_file))
class MonitorContainerCleanupTest(unittest.TestCase):
"""Mock test for treadmill.monitor.MonitorContainerCleanup.
"""
def setUp(self):
self.root = tempfile.mkdtemp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('os.replace', mock.Mock())
@mock.patch('treadmill.supervisor.control_svscan', mock.Mock())
@mock.patch('treadmill.appcfg.abort.flag_aborted', mock.Mock())
def test_execute(self):
"""Test shutting down of the node.
"""
mock_tm_env_class = collections.namedtuple(
'MockTMEnv', ['running_dir', 'cleanup_dir']
)
mock_tm_env = mock_tm_env_class(os.path.join(self.root, 'running'),
os.path.join(self.root, 'cleanup'))
service_dir = os.path.join(mock_tm_env.running_dir, 'mock_service')
fs.mkdir_safe(service_dir)
with io.open(os.path.join(service_dir, 'type'), 'w') as f:
f.write('longrun')
mock_container_cleanup_action =\
monitor.MonitorContainerCleanup(mock_tm_env, {})
res = mock_container_cleanup_action.execute(
{
'signal': 0,
'id': 'mock_service',
}
)
# This MonitorContainerCleanup stops the monitor.
self.assertEqual(res, True)
treadmill.appcfg.abort.flag_aborted.assert_not_called()
os.replace.assert_called()
supervisor.control_svscan.assert_called_with(
os.path.join(self.root, 'running'), [
supervisor.SvscanControlAction.alarm,
supervisor.SvscanControlAction.nuke
]
)
@mock.patch('os.replace', mock.Mock())
@mock.patch('treadmill.supervisor.control_svscan', mock.Mock())
@mock.patch('treadmill.appcfg.abort.flag_aborted', mock.Mock())
def test_execute_pid1_aborted(self):
"""Test shutting down of the node.
"""
mock_tm_env_class = collections.namedtuple(
'MockTMEnv', ['running_dir', 'cleanup_dir']
)
mock_tm_env = mock_tm_env_class(os.path.join(self.root, 'running'),
os.path.join(self.root, 'cleanup'))
service_dir = os.path.join(mock_tm_env.running_dir, 'mock_service')
fs.mkdir_safe(service_dir)
with io.open(os.path.join(service_dir, 'type'), 'w') as f:
f.write('longrun')
mock_container_cleanup_action =\
monitor.MonitorContainerCleanup(mock_tm_env, {})
res = mock_container_cleanup_action.execute(
{
'signal': 6,
'id': 'mock_service',
}
)
# This MonitorContainerCleanup stops the monitor.
self.assertEqual(res, True)
treadmill.appcfg.abort.flag_aborted.assert_called_with(
os.path.join(service_dir, 'data'),
why=treadmill.appcfg.abort.AbortedReason.PID1
)
os.replace.assert_called()
supervisor.control_svscan.assert_called_with(
os.path.join(self.root, 'running'), [
supervisor.SvscanControlAction.alarm,
supervisor.SvscanControlAction.nuke
]
)
class MonitorContainerDownTest(unittest.TestCase):
"""Mock test for treadmill.monitor.MonitorContainerDown.
"""
def setUp(self):
self.root = tempfile.mkdtemp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('treadmill.supervisor.open_service',
mock.Mock(spec_set=True))
@mock.patch('treadmill.supervisor.control_service',
mock.Mock(spec_set=True))
def test_execute(self):
"""Test shutting down of the container services.
"""
apps_dir = os.path.join(self.root, 'apps')
data_dir = os.path.join(apps_dir, 'proid.app-0000000001-abcde', 'data')
os.makedirs(data_dir)
mock_tm_env = mock.Mock(apps_dir=apps_dir)
treadmill.supervisor.open_service.return_value = mock.Mock(
data_dir=data_dir
)
monitor_container_down = monitor.MonitorContainerDown(mock_tm_env, {})
res1 = monitor_container_down.execute(
{
'id': 'proid.app-0000000001-abcde,service1',
'return_code': 1,
'signal': 0,
'timestamp': 12345.123
}
)
res2 = monitor_container_down.execute(
{
'id': 'proid.app-0000000001-abcde,service2',
'return_code': 256,
'signal': 15,
'timestamp': 12345.456
}
)
self.assertEqual(res1, True)
self.assertEqual(res2, True)
exitinfo_file = os.path.join(data_dir, 'exitinfo')
with io.open(exitinfo_file, 'r') as f:
exitinfo = json.load(f)
self.assertEqual(
exitinfo,
{
'service': 'service1',
'return_code': 1,
'signal': 0,
'timestamp': 12345.123
}
)
self.assertTrue(treadmill.supervisor.control_service.called)
class MonitorNodeDownTest(unittest.TestCase):
"""Mock test for treadmill.monitor.MonitorNodeDown.
"""
def setUp(self):
self.root = tempfile.mkdtemp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
def test_execute(self):
"""Test shutting down of the node.
"""
mock_tm_env_class = collections.namedtuple(
'MockTMEnv', ['watchdog_dir']
)
mock_tm_env = mock_tm_env_class(self.root)
mock_down_action = monitor.MonitorNodeDown(mock_tm_env, {})
res = mock_down_action.execute(
{
'id': 'mock_service',
'return_code': 42,
'signal': 9,
}
)
# This MonitorDownAction stops the monitor.
self.assertEqual(res, False)
self.assertTrue(
os.path.exists(os.path.join(self.root, 'Monitor-mock_service'))
)
if __name__ == '__main__':
unittest.main()
|
|
import json
from base64 import b64encode
from datetime import date, time, datetime
from .._compat import PY2, integer_types, to_unicode, to_bytes, basestring
from ..adapters.base import SQLAdapter, NoSQLAdapter
from ..helpers.classes import Reference, SQLCustomType
from ..helpers.methods import bar_encode
from ..helpers.serializers import serializers
from ..objects import Row, Expression, Field
from . import (
Representer, representers, for_type, before_type, for_instance, pre)
long = integer_types[-1]
NoneType = type(None)
class BaseRepresenter(Representer):
@for_type('boolean', adapt=False)
def _boolean(self, value):
if value and not str(value)[:1].upper() in '0F':
return self.adapter.smart_adapt(self.dialect.true)
return self.adapter.smart_adapt(self.dialect.false)
@for_type('id', adapt=False)
def _id(self, value):
return str(long(value))
@for_type('integer', adapt=False)
def _integer(self, value):
return str(long(value))
@for_type('decimal', adapt=False)
def _decimal(self, value):
return str(value)
@for_type('double', adapt=False)
def _double(self, value):
return repr(float(value))
@for_type('date', encode=True)
def _date(self, value):
if isinstance(value, (date, datetime)):
return value.isoformat()[:10]
return str(value)
@for_type('time', encode=True)
def _time(self, value):
if isinstance(value, time):
return value.isoformat()[:10]
return str(value)
@for_type('datetime', encode=True)
def _datetime(self, value):
if isinstance(value, datetime):
value = value.isoformat(self.dialect.dt_sep)[:19]
elif isinstance(value, date):
value = value.isoformat()[:10]+self.dialect.dt_sep+'00:00:00'
else:
value = str(value)
return value
def _ensure_list(self, value):
if not value:
value = []
elif not isinstance(value, (list, tuple)):
value = [value]
return value
@for_type('list:integer')
def _list_integer(self, value):
values = self._ensure_list(value)
values = list(map(int, [val for val in values if val != '']))
return bar_encode(value)
@for_type('list:string')
def _list_string(self, value):
value = self._ensure_list(value)
if PY2:
try:
value = map(str, value)
except:
value = map(
lambda x: unicode(x).encode(self.adapter.db_codec), value)
else:
value = list(map(str, value))
return bar_encode(value)
@for_type('list:reference', adapt=False)
def _list_reference(self, value):
return self.registered_t['list:integer'](value, 'list:reference')
class JSONRepresenter(Representer):
@for_type('json', encode=True)
def _json(self, value):
return serializers.json(value)
@representers.register_for(SQLAdapter)
class SQLRepresenter(BaseRepresenter):
def _custom_type(self, value, field_type):
value = field_type.encoder(value)
if value and field_type.type in ('string', 'text', 'json'):
return self.adapter.adapt(value)
return value or 'NULL'
@pre()
def _before_all(self, obj, field_type):
if isinstance(field_type, SQLCustomType):
return self._custom_type(obj, field_type)
if obj == '' and not field_type[:2] in ('st', 'te', 'js', 'pa', 'up'):
return 'NULL'
r = self.exceptions(obj, field_type)
return r
def exceptions(self, obj, field_type):
return None
@for_instance(NoneType)
def _none(self, value, field_type):
return 'NULL'
@for_instance(Expression)
def _expression(self, value, field_type):
return str(value)
@for_instance(Field)
def _fieldexpr(self, value, field_type):
return str(value)
@before_type('reference')
def reference_extras(self, field_type):
return {'referenced': field_type[9:].strip()}
@for_type('reference', adapt=False)
def _reference(self, value, referenced):
if referenced in self.adapter.db.tables:
return str(long(value))
p = referenced.partition('.')
if p[2] != '':
try:
ftype = self.adapter.db[p[0]][p[2]].type
return self.adapter.represent(value, ftype)
except (ValueError, KeyError):
return repr(value)
elif isinstance(value, (Row, Reference)):
return str(value['id'])
return str(long(value))
@for_type('blob', encode=True)
def _blob(self, value):
return b64encode(to_bytes(value))
@representers.register_for(NoSQLAdapter)
class NoSQLRepresenter(BaseRepresenter):
def adapt(self, value):
return value
@pre(is_breaking=True)
def _before_all(self, obj, field_type):
if isinstance(field_type, SQLCustomType):
return True, field_type.encoder(obj)
return False, obj
@pre(is_breaking=True)
def _nullify_empty_string(self, obj, field_type):
if obj == '' and not (isinstance(field_type, str) and
field_type[:2] in ('st', 'te', 'pa', 'up')):
return True, None
return False, obj
@for_instance(NoneType)
def _none(self, value, field_type):
return None
@for_instance(list, repr_type=True)
def _repr_list(self, value, field_type):
if isinstance(field_type, str) and not field_type.startswith('list:'):
return [self.adapter.represent(v, field_type) for v in value]
return value
@for_type('id')
def _id(self, value):
return long(value)
@for_type('integer')
def _integer(self, value):
return long(value)
@for_type('bigint')
def _bigint(self, value):
return long(value)
@for_type('double')
def _double(self, value):
return float(value)
@for_type('reference')
def _reference(self, value):
if isinstance(value, (Row, Reference)):
value = value['id']
return long(value)
@for_type('boolean')
def _boolean(self, value):
if not isinstance(value, bool):
if value and not str(value)[:1].upper() in '0F':
return True
return False
return value
@for_type('string')
def _string(self, value):
return to_unicode(value)
@for_type('text')
def _text(self, value):
return to_unicode(value)
@for_type('blob')
def _blob(self, value):
return value
@for_type('json')
def _json(self, value):
if isinstance(value, basestring):
value = to_unicode(value)
value = json.loads(value)
return value
def _represent_list(self, value):
items = self._ensure_list(value)
return [item for item in items if item is not None]
@for_type('date')
def _date(self, value):
if not isinstance(value, date):
(y, m, d) = map(int, str(value).strip().split('-'))
value = date(y, m, d)
elif isinstance(value, datetime):
(y, m, d) = (value.year, value.month, value.day)
value = date(y, m, d)
return value
@for_type('time')
def _time(self, value):
if not isinstance(value, time):
time_items = list(map(int, str(value).strip().split(':')[:3]))
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
value = time(h, mi, s)
return value
@for_type('datetime')
def _datetime(self, value):
if not isinstance(value, datetime):
(y, m, d) = map(int, str(value)[:10].strip().split('-'))
time_items = list(map(int, str(value)[11:].strip().split(':')[:3]))
while len(time_items) < 3:
time_items.append(0)
(h, mi, s) = time_items
value = datetime(y, m, d, h, mi, s)
return value
@for_type('list:integer')
def _list_integer(self, value):
values = self._represent_list(value)
return list(map(int, values))
@for_type('list:string')
def _list_string(self, value):
values = self._represent_list(value)
return list(map(to_unicode, values))
@for_type('list:reference')
def _list_reference(self, value):
values = self._represent_list(value)
return list(map(long, values))
|
|
#!/usr/bin/env python
#
# Generated Tue Feb 1 14:34:02 2011 by generateDS.py version 2.5a.
#
import sys
import peoplesup as supermod
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Data representation classes
#
class peopleSub(supermod.people):
def __init__(self, comments=None, person=None, specialperson=None, programmer=None, python_programmer=None, java_programmer=None):
super(peopleSub, self).__init__(comments, person, specialperson, programmer, python_programmer, java_programmer, )
supermod.people.subclass = peopleSub
# end class peopleSub
class commentsSub(supermod.comments):
def __init__(self, emp=None, bold=None, valueOf_=None, mixedclass_=None, content_=None):
super(commentsSub, self).__init__(emp, bold, valueOf_, mixedclass_, content_, )
supermod.comments.subclass = commentsSub
# end class commentsSub
class personSub(supermod.person):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None):
super(personSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, )
supermod.person.subclass = personSub
# end class personSub
class specialpersonSub(supermod.specialperson):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None):
super(specialpersonSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, )
supermod.specialperson.subclass = specialpersonSub
# end class specialpersonSub
class programmerSub(supermod.programmer):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None):
super(programmerSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, )
supermod.programmer.subclass = programmerSub
# end class programmerSub
class paramSub(supermod.param):
def __init__(self, semantic=None, name=None, flow=None, sid=None, type_=None, id=None, valueOf_=None):
super(paramSub, self).__init__(semantic, name, flow, sid, type_, id, valueOf_, )
supermod.param.subclass = paramSub
# end class paramSub
class python_programmerSub(supermod.python_programmer):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, nick_name=None, favorite_editor=None):
super(python_programmerSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, nick_name, favorite_editor, )
supermod.python_programmer.subclass = python_programmerSub
# end class python_programmerSub
class java_programmerSub(supermod.java_programmer):
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, status=None, nick_name=None, favorite_editor=None):
super(java_programmerSub, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eltoken, elshort, ellong, elparam, elarraytypes, status, nick_name, favorite_editor, )
supermod.java_programmer.subclass = java_programmerSub
# end class java_programmerSub
class agentSub(supermod.agent):
def __init__(self, firstname=None, lastname=None, priority=None, info=None, vehicle=None):
super(agentSub, self).__init__(firstname, lastname, priority, info, vehicle, )
supermod.agent.subclass = agentSub
# end class agentSub
class special_agentSub(supermod.special_agent):
def __init__(self, firstname=None, lastname=None, priority=None, info=None):
super(special_agentSub, self).__init__(firstname, lastname, priority, info, )
supermod.special_agent.subclass = special_agentSub
# end class special_agentSub
class boosterSub(supermod.booster):
def __init__(self, member_id=None, firstname=None, lastname=None, other_name=None, classxx=None, other_value=None, type_=None, client_handler=None):
super(boosterSub, self).__init__(member_id, firstname, lastname, other_name, classxx, other_value, type_, client_handler, )
supermod.booster.subclass = boosterSub
# end class boosterSub
class client_handlerSub(supermod.client_handler):
def __init__(self, fullname=None, refid=None):
super(client_handlerSub, self).__init__(fullname, refid, )
supermod.client_handler.subclass = client_handlerSub
# end class client_handlerSub
class infoSub(supermod.info):
def __init__(self, rating=None, type_=None, name=None, valueOf_=None):
super(infoSub, self).__init__(rating, type_, name, valueOf_, )
supermod.info.subclass = infoSub
# end class infoSub
class vehicleSub(supermod.vehicle):
def __init__(self, wheelcount=None):
super(vehicleSub, self).__init__(wheelcount, )
supermod.vehicle.subclass = vehicleSub
# end class vehicleSub
class automobileSub(supermod.automobile):
def __init__(self, wheelcount=None, drivername=None):
super(automobileSub, self).__init__(wheelcount, drivername, )
supermod.automobile.subclass = automobileSub
# end class automobileSub
class airplaneSub(supermod.airplane):
def __init__(self, wheelcount=None, pilotname=None):
super(airplaneSub, self).__init__(wheelcount, pilotname, )
supermod.airplane.subclass = airplaneSub
# end class airplaneSub
def get_root_tag(node):
tag = supermod.Tag_pattern_.match(node.tag).groups()[-1]
rootClass = None
if hasattr(supermod, tag):
rootClass = getattr(supermod, tag)
return tag, rootClass
def parse(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:pl="http://kuhlman.com/people.xsd"')
doc = None
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:pl="http://kuhlman.com/people.xsd"')
return rootObj
def parseLiteral(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = supermod.people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from peoplesup import *\n\n')
sys.stdout.write('import peoplesup as model_\n\n')
sys.stdout.write('rootObj = model_.people(\n')
rootObj.exportLiteral(sys.stdout, 0, name_="people")
sys.stdout.write(')\n')
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
root = parse(infilename)
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
|
|
"""Working day tools
"""
import warnings
import ephem
import pytz
from calendar import monthrange
from datetime import date, timedelta, datetime
from math import pi
from dateutil import easter
from lunardate import LunarDate
from calverter import Calverter
MON, TUE, WED, THU, FRI, SAT, SUN = range(7)
class Calendar(object):
FIXED_HOLIDAYS = ()
def __init__(self):
self._holidays = {}
def get_fixed_holidays(self, year):
"""Return the fixed days according to the FIXED_HOLIDAYS class property
"""
days = []
for month, day, label in self.FIXED_HOLIDAYS:
days.append((date(year, month, day), label))
return days
def get_variable_days(self, year):
return []
def get_calendar_holidays(self, year):
"""Get calendar holidays.
If you want to override this, please make sure that it **must** return
a list of tuples (date, holiday_name)."""
return self.get_fixed_holidays(year) + self.get_variable_days(year)
def holidays(self, year=None):
"""Computes holidays (non-working days) for a given year.
Return a 2-item tuple, composed of the date and a label."""
if not year:
year = date.today().year
if year in self._holidays:
return self._holidays[year]
# Here we process the holiday specific calendar
temp_calendar = tuple(self.get_calendar_holidays(year))
# it is sorted
self._holidays[year] = sorted(temp_calendar)
return self._holidays[year]
def holidays_set(self, year=None):
"Return a quick date index (set)"
return set([day for day, label in self.holidays(year)])
def get_weekend_days(self):
"""Return a list (or a tuple) of weekdays that are *not* working days.
e.g: return (SAT, SUN,)
"""
raise NotImplementedError("Your Calendar class must implement the"
" `get_weekend_days` method")
def is_working_day(self, day,
extra_working_days=None, extra_holidays=None):
"""Return True if it's a working day.
In addition to the regular holidays, you can add exceptions.
By providing ``extra_working_days``, you'll state that these dates
**are** working days.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
Please note that the ``extra_working_days`` list has priority over the
``extra_holidays`` list.
"""
# a little exception: chop the datetime type
if type(day) is datetime:
day = day.date()
# Extra lists exceptions
if extra_working_days and day in extra_working_days:
return True
# Regular rules
if day.weekday() in self.get_weekend_days():
return False
return not self.is_holiday(day, extra_holidays=extra_holidays)
def is_holiday(self, day, extra_holidays=None):
"""Return True if it's an holiday.
In addition to the regular holidays, you can add exceptions.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
"""
if extra_holidays and day in extra_holidays:
return True
return day in self.holidays_set(day.year)
def add_working_days(self, day, delta,
extra_working_days=None, extra_holidays=None):
"""Add `delta` working days to the date.
the ``delta`` parameter might be positive or negative. If it's
negative, you may want to use the ``sub_working_days()`` method with
a positive ``delta`` argument.
By providing ``extra_working_days``, you'll state that these dates
**are** working days.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
Please note that the ``extra_working_days`` list has priority over the
``extra_holidays`` list.
"""
days = 0
temp_day = day
day_added = 1 if delta >= 0 else -1
delta = abs(delta)
while days < delta:
temp_day = temp_day + timedelta(days=day_added)
if self.is_working_day(temp_day,
extra_working_days=extra_working_days,
extra_holidays=extra_holidays):
days += 1
return temp_day
def sub_working_days(self, day, delta,
extra_working_days=None, extra_holidays=None):
"""
Substract `delta` working days to the date.
This method is a shortcut / helper. Users may want to use either::
cal.add_working_days(my_date, -7)
cal.sub_working_days(my_date, 7)
The other parameters are to be used exactly as in the
``add_working_days`` method.
A negative ``delta`` argument will be converted into its absolute
value. Hence, the two following calls are equivalent::
cal.sub_working_days(my_date, -7)
cal.sub_working_days(my_date, 7)
"""
delta = abs(delta)
return self.add_working_days(
day, -delta, extra_working_days, extra_holidays)
def find_following_working_day(self, day):
"Looks for the following working day"
while day.weekday() in self.get_weekend_days():
day = day + timedelta(days=1)
return day
@staticmethod
def get_nth_weekday_in_month(year, month, weekday, n=1, start=None):
"""Get the nth weekday in a given month. e.g:
>>> # the 1st monday in Jan 2013
>>> Calendar.get_nth_weekday_in_month(2013, 1, MON)
datetime.date(2013, 1, 7)
>>> # The 2nd monday in Jan 2013
>>> Calendar.get_nth_weekday_in_month(2013, 1, MON, 2)
datetime.date(2013, 1, 14)
"""
day = date(year, month, 1)
if start:
day = start
counter = 0
while True:
if day.month != month:
# Don't forget to break if "n" is too big
return None
if day.weekday() == weekday:
counter += 1
if counter == n:
break
day = day + timedelta(days=1)
return day
@staticmethod
def get_last_weekday_in_month(year, month, weekday):
"""Get the last weekday in a given month. e.g:
>>> # the last monday in Jan 2013
>>> Calendar.get_last_weekday_in_month(2013, 1, MON)
datetime.date(2013, 1, 28)
"""
day = date(year, month, monthrange(year, month)[1])
while True:
if day.weekday() == weekday:
break
day = day - timedelta(days=1)
return day
@staticmethod
def get_first_weekday_after(day, weekday):
"""Get the first weekday after a given day. If the day is the same
weekday, the same day will be returned.
>>> # the first monday after Apr 1 2015
>>> Calendar.get_first_weekday_after(date(2015, 4, 1), 0)
datetime.date(2015, 4, 6)
>>> # the first tuesday after Apr 14 2015
>>> Calendar.get_first_weekday_after(date(2015, 4, 14), 1)
datetime.date(2015, 4, 14)
"""
day_delta = (weekday - day.weekday()) % 7
day = day + timedelta(days=day_delta)
return day
class ChristianMixin(Calendar):
EASTER_METHOD = None # to be assigned in the inherited mixin
include_epiphany = False
include_clean_monday = False
include_annunciation = False
include_ash_wednesday = False
include_palm_sunday = False
include_holy_thursday = False
include_good_friday = False
include_easter_monday = False
include_easter_saturday = False
include_easter_sunday = False
include_all_saints = False
include_immaculate_conception = False
include_christmas = True
include_christmas_eve = False
include_ascension = False
include_assumption = False
include_whit_sunday = False
whit_sunday_label = 'Whit Sunday'
include_whit_monday = False
whit_monday_label = 'Whit Monday'
include_corpus_christi = False
include_boxing_day = False
boxing_day_label = "Boxing Day"
def get_ash_wednesday(self, year):
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=46)
def get_palm_sunday(self, year):
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=7)
def get_holy_thursday(self, year):
"Return the date of the last thursday before easter"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=3)
def get_good_friday(self, year):
"Return the date of the last friday before easter"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=2)
def get_clean_monday(self, year):
"Return the clean monday date"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=48)
def get_easter_saturday(self, year):
"Return the Easter Saturday date"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=1)
def get_easter_sunday(self, year):
"Return the date of the easter (sunday) -- following the easter method"
return easter.easter(year, self.EASTER_METHOD)
def get_easter_monday(self, year):
"Return the date of the monday after easter"
sunday = self.get_easter_sunday(year)
return sunday + timedelta(days=1)
def get_ascension_thursday(self, year):
easter = self.get_easter_sunday(year)
return easter + timedelta(days=39)
def get_whit_monday(self, year):
easter = self.get_easter_sunday(year)
return easter + timedelta(days=50)
def get_whit_sunday(self, year):
easter = self.get_easter_sunday(year)
return easter + timedelta(days=49)
def get_corpus_christi(self, year):
return self.get_easter_sunday(year) + timedelta(days=60)
def get_variable_days(self, year): # noqa
"Return the christian holidays list according to the mixin"
days = super(ChristianMixin, self).get_variable_days(year)
if self.include_epiphany:
days.append((date(year, 1, 6), "Epiphany"))
if self.include_clean_monday:
days.append((self.get_clean_monday(year), "Clean Monday"))
if self.include_annunciation:
days.append((date(year, 3, 25), "Annunciation"))
if self.include_ash_wednesday:
days.append((self.get_ash_wednesday(year), "Ash Wednesday"))
if self.include_palm_sunday:
days.append((self.get_palm_sunday(year), "Palm Sunday"))
if self.include_holy_thursday:
days.append((self.get_holy_thursday(year), "Holy Thursday"))
if self.include_good_friday:
days.append((self.get_good_friday(year), "Good Friday"))
if self.include_easter_saturday:
days.append((self.get_easter_saturday(year), "Easter Saturday"))
if self.include_easter_sunday:
days.append((self.get_easter_sunday(year), "Easter Sunday"))
if self.include_easter_monday:
days.append((self.get_easter_monday(year), "Easter Monday"))
if self.include_assumption:
days.append((date(year, 8, 15), "Assumption of Mary to Heaven"))
if self.include_all_saints:
days.append((date(year, 11, 1), "All Saints Day"))
if self.include_immaculate_conception:
days.append((date(year, 12, 8), "Immaculate Conception"))
if self.include_christmas:
days.append((date(year, 12, 25), "Christmas Day"))
if self.include_christmas_eve:
days.append((date(year, 12, 24), "Christmas Eve"))
if self.include_boxing_day:
days.append((date(year, 12, 26), self.boxing_day_label))
if self.include_ascension:
days.append((
self.get_ascension_thursday(year), "Ascension Thursday"))
if self.include_whit_monday:
days.append((self.get_whit_monday(year), self.whit_monday_label))
if self.include_whit_sunday:
days.append((self.get_whit_sunday(year), self.whit_sunday_label))
if self.include_corpus_christi:
days.append((self.get_corpus_christi(year), "Corpus Christi"))
return days
class WesternCalendar(Calendar):
"""
General usage calendar for Western countries.
(chiefly Europe and Northern America)
"""
EASTER_METHOD = easter.EASTER_WESTERN
WEEKEND_DAYS = (SAT, SUN)
shift_new_years_day = False
FIXED_HOLIDAYS = (
(1, 1, 'New year'),
)
def get_weekend_days(self):
"Week-end days are SATurday and SUNday."
return self.WEEKEND_DAYS
def get_variable_days(self, year):
days = super(WesternCalendar, self).get_variable_days(year)
new_year = date(year, 1, 1)
if self.shift_new_years_day:
if new_year.weekday() in self.get_weekend_days():
days.append((
self.find_following_working_day(new_year),
"New Year shift"))
return days
class OrthodoxMixin(ChristianMixin):
EASTER_METHOD = easter.EASTER_ORTHODOX
class LunarCalendar(Calendar):
"""Calendar including lunar days
"""
FIXED_HOLIDAYS = (
(1, 1, 'Lunar new year'),
)
@staticmethod
def lunar(year, month, day):
return LunarDate(year, month, day).toSolarDate()
class EphemMixin(LunarCalendar):
def calculate_equinoxes(self, year, timezone='UTC'):
""" calculate equinox with time zone """
tz = pytz.timezone(timezone)
d1 = ephem.next_equinox(str(year))
d = ephem.Date(str(d1))
equinox1 = d.datetime() + tz.utcoffset(d.datetime())
d2 = ephem.next_equinox(d1)
d = ephem.Date(str(d2))
equinox2 = d.datetime() + tz.utcoffset(d.datetime())
return (equinox1.date(), equinox2.date())
def solar_term(self, year, degrees, timezone='UTC'):
"""
Returns the date of the solar term for the given longitude
and the given year.
Solar terms are used for Chinese and Taiwanese holidays
(e.g. Qingming Festival in Taiwan).
More information:
- https://en.wikipedia.org/wiki/Solar_term
- https://en.wikipedia.org/wiki/Qingming
This function is adapted from the following topic:
https://answers.launchpad.net/pyephem/+question/110832
"""
twopi = 2 * pi
tz = pytz.timezone(timezone)
# Find out the sun's current longitude.
sun = ephem.Sun(ephem.Date(str(year)))
current_longitude = sun.hlong - pi
# Find approximately the right time of year.
target_longitude = degrees * ephem.degree
difference = (target_longitude - current_longitude) % twopi
t0 = ephem.Date(str(year)) + 365.25 * difference / twopi
# Zero in on the exact moment.
def f(t):
sun.compute(t)
longitude = sun.hlong - pi
return ephem.degrees(target_longitude - longitude).znorm
d = ephem.Date(ephem.newton(f, t0, t0 + ephem.minute))
solar_term = d.datetime() + tz.utcoffset(d.datetime())
return solar_term.date()
class CalverterMixin(Calendar):
conversion_method = None
ISLAMIC_HOLIDAYS = ()
def __init__(self, *args, **kwargs):
super(CalverterMixin, self).__init__(*args, **kwargs)
self.calverter = Calverter()
if self.conversion_method is None:
raise NotImplementedError
def converted(self, year):
conversion_method = getattr(
self.calverter, 'jd_to_%s' % self.conversion_method)
current = date(year, 1, 1)
days = []
while current.year == year:
julian_day = self.calverter.gregorian_to_jd(
current.year,
current.month,
current.day)
days.append(conversion_method(julian_day))
current = current + timedelta(days=1)
return days
def calverted_years(self, year):
converted = self.converted(year)
generator = (y for y, m, d in converted)
return sorted(list(set(generator)))
def get_islamic_holidays(self):
return self.ISLAMIC_HOLIDAYS
def get_variable_days(self, year):
warnings.warn('Please take not that, due to arbitrary decisions, '
'this Islamic calendar computation may be wrong.')
days = super(CalverterMixin, self).get_variable_days(year)
years = self.calverted_years(year)
conversion_method = getattr(
self.calverter, '%s_to_jd' % self.conversion_method)
for month, day, label in self.get_islamic_holidays():
for y in years:
jd = conversion_method(y, month, day)
g_year, g_month, g_day = self.calverter.jd_to_gregorian(jd)
if g_year == year:
holiday = date(g_year, g_month, g_day)
days.append((holiday, label))
return days
class IslamicMixin(CalverterMixin):
conversion_method = 'islamic'
include_prophet_birthday = False
include_day_after_prophet_birthday = False
include_start_ramadan = False
include_eid_al_fitr = False
length_eid_al_fitr = 1
include_eid_al_adha = False
length_eid_al_adha = 1
include_day_of_sacrifice = False
include_day_of_sacrifice_label = "Eid al-Adha"
include_islamic_new_year = False
include_laylat_al_qadr = False
def get_islamic_holidays(self):
"""Return a list of Islamic (month, day, label) for islamic holidays.
Please take note that these dates must be expressed using the Islamic
Calendar"""
days = list(super(IslamicMixin, self).get_islamic_holidays())
if self.include_islamic_new_year:
days.append((1, 1, "Islamic New Year"))
if self.include_prophet_birthday:
days.append((3, 12, "Prophet's Birthday"))
if self.include_day_after_prophet_birthday:
days.append((3, 13, "Day after Prophet's Birthday"))
if self.include_start_ramadan:
days.append((9, 1, "Start of ramadan"))
if self.include_eid_al_fitr:
for x in range(self.length_eid_al_fitr):
days.append((10, x + 1, "Eid al-Fitr"))
if self.include_eid_al_adha:
for x in range(self.length_eid_al_adha):
days.append((12, x + 10, "Eid al-Adha"))
if self.include_day_of_sacrifice:
days.append((12, 10, self.include_day_of_sacrifice_label))
if self.include_laylat_al_qadr:
warnings.warn("The Islamic holiday named Laylat al-Qadr is decided"
" by the religious authorities. It is not possible"
" to compute it. You'll have to add it manually.")
return tuple(days)
class JalaliMixin(CalverterMixin):
conversion_method = 'jalali'
|
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
SHARED_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
android_standard_include_paths = set([
# JNI_H_INCLUDE in build/core/binary.mk
'dalvik/libnativehelper/include/nativehelper',
# from SRC_HEADERS in build/core/config.mk
'system/core/include',
'hardware/libhardware/include',
'hardware/libhardware_legacy/include',
'hardware/ril/include',
'dalvik/libnativehelper/include',
'frameworks/native/include',
'frameworks/native/opengl/include',
'frameworks/base/include',
'frameworks/base/opengl/include',
'frameworks/base/native/include',
'external/skia/include',
# TARGET_C_INCLUDES in build/core/combo/TARGET_linux-arm.mk
'bionic/libc/arch-arm/include',
'bionic/libc/include',
'bionic/libstdc++/include',
'bionic/libc/kernel/common',
'bionic/libc/kernel/arch-arm',
'bionic/libm/include',
'bionic/libm/include/arm',
'bionic/libthread_db/include',
])
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
make.ensure_directory_exists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'NONE')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
self.WriteLn('LOCAL_MODULE_TAGS := optional')
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
# Grab output directories; needed for Actions and Rules.
self.WriteLn('gyp_intermediate_dir := $(call local-intermediates-dir)')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared)')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)' %
main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)' %
main_output)
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
rule_trigger = '%s_rule_trigger' % self.android_module
did_write_rule = False
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
did_write_rule = True
name = make.StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_intermediate_dir)'
% main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(GYP_ABS_ANDROID_TOP_DIR)/$(gyp_shared_intermediate_dir)'
% main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
self.WriteLn('%s: %s' % (output, main_output))
self.WriteLn('.PHONY: %s' % (rule_trigger))
self.WriteLn('%s: %s' % (rule_trigger, main_output))
self.WriteLn('')
if did_write_rule:
extra_sources.append(rule_trigger) # Force all rules to run.
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -r $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
config = configs[spec['default_configuration']]
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags'))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS')
cflags_c, includes_from_cflags_c = self.ExtractIncludesFromCFlags(
config.get('cflags_c'))
extracted_includes.extend(includes_from_cflags_c)
self.WriteList(cflags_c, 'MY_CFLAGS_C')
self.WriteList(config.get('defines'), 'MY_DEFS', prefix='-D',
quoter=make.EscapeCppDefine)
self.WriteLn('LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host or
# target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES)')
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_host_gyp'
else:
suffix = '_gyp'
if self.path:
name = '%s%s_%s%s' % (prefix, self.path, self.target, suffix)
else:
name = '%s%s%s' % (prefix, self.target, suffix)
return make.StringToMakefileVariable(name)
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable' and self.toolset == 'host':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$(HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$(TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = '$(call intermediates-dir-for,%s,%s,true)' % (self.android_class,
self.android_module)
else:
path = '$(call intermediates-dir-for,%s,%s)' % (self.android_class,
self.android_module)
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeLdFlags(self, ld_flags):
""" Clean up ldflags from gyp file.
Remove any ldflags that contain android_top_dir.
Args:
ld_flags: ldflags from gyp files.
Returns:
clean ldflags
"""
clean_ldflags = []
for flag in ld_flags:
if self.android_top_dir in flag:
continue
clean_ldflags.append(flag)
return clean_ldflags
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory;
filter out include paths that are already brought in by the Android build
system.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
# Filter out the Android standard search path.
if path not in android_standard_include_paths:
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
if cflags:
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def ComputeAndroidLibraryModuleNames(self, libraries):
"""Compute the Android module names from libraries, ie spec.get('libraries')
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules)
"""
static_lib_modules = []
dynamic_lib_modules = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
# "-lstlport" -> libstlport
if lib.startswith('-l'):
if lib.endswith('_static'):
static_lib_modules.append('lib' + lib[2:])
else:
dynamic_lib_modules.append('lib' + lib[2:])
return (static_lib_modules, dynamic_lib_modules)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
config = configs[spec['default_configuration']]
# LDFLAGS
ldflags = list(config.get('ldflags', []))
static_flags, dynamic_flags = self.ComputeAndroidLibraryModuleNames(
ldflags)
self.WriteLn('')
self.WriteList(self.NormalizeLdFlags(ldflags), 'LOCAL_LDFLAGS')
# Libraries (i.e. -lfoo)
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames(
libraries)
# Link dependencies (i.e. libfoo.a, libfoo.so)
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
self.WriteLn('')
self.WriteList(static_flags + static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
self.WriteLn('')
self.WriteList(dynamic_flags + dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
if self.toolset == 'host':
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
else:
# Don't install target executables for now, as it results in them being
# included in ROM. This can be revisited if there's a reason to install
# them later.
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
# Sort to avoid non-functional changes to makefile.
build_files = sorted([os.path.join('$(LOCAL_PATH)', f) for f in build_files])
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
build_files_args = [os.path.join('$(PRIVATE_LOCAL_PATH)', f)
for f in build_files_args]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
makefile_path = os.path.join('$(LOCAL_PATH)', makefile_name)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write('GYP_FILES := \\\n %s\n\n' %
'\\\n '.join(map(Sourceify, build_files)))
root_makefile.write('%s: PRIVATE_LOCAL_PATH := $(LOCAL_PATH)\n' %
makefile_path)
root_makefile.write('%s: $(GYP_FILES)\n' % makefile_path)
root_makefile.write('\techo ACTION Regenerating $@\n\t%s\n\n' %
gyp.common.EncodePOSIXShellList([gyp_binary, '-fandroid'] +
gyp.RegenerateFlags(options) +
build_files_args))
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid.mk' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
make.ensure_directory_exists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = (qualified_target in needed_targets and
not int(spec.get('suppress_wildcard', False)))
if limit_to_target_all and not part_of_all:
continue
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, base_path, output_file,
spec, configs, part_of_all=part_of_all)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Some tools need to know the absolute path of the top directory.
root_makefile.write('GYP_ABS_ANDROID_TOP_DIR := $(shell pwd)\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if generator_flags.get('auto_regeneration', True):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
|
|
# Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, pickle, os, shutil, subprocess, errno
import shlex
from glob import glob
from .scripts import depfixer
from .scripts import destdir_join
from .mesonlib import is_windows, Popen_safe
from .mtest import rebuild_all
try:
from __main__ import __file__ as main_file
except ImportError:
# Happens when running as meson.exe which is native Windows.
# This is only used for pkexec which is not, so this is fine.
main_file = None
symlink_warning = '''Warning: trying to copy a symlink that points to a file. This will copy the file,
but this will be changed in a future version of Meson to copy the symlink as is. Please update your
build definitions so that it will not break when the change happens.'''
selinux_updates = []
def add_arguments(parser):
parser.add_argument('-C', default='.', dest='wd',
help='directory to cd into before running')
parser.add_argument('--no-rebuild', default=False, action='store_true',
help='Do not rebuild before installing.')
parser.add_argument('--only-changed', default=False, action='store_true',
help='Only overwrite files that are older than the copied file.')
class DirMaker:
def __init__(self, lf):
self.lf = lf
self.dirs = []
def makedirs(self, path, exist_ok=False):
dirname = os.path.normpath(path)
dirs = []
while dirname != os.path.dirname(dirname):
if not os.path.exists(dirname):
dirs.append(dirname)
dirname = os.path.dirname(dirname)
os.makedirs(path, exist_ok=exist_ok)
# store the directories in creation order, with the parent directory
# before the child directories. Future calls of makedir() will not
# create the parent directories, so the last element in the list is
# the last one to be created. That is the first one to be removed on
# __exit__
dirs.reverse()
self.dirs += dirs
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.dirs.reverse()
for d in self.dirs:
append_to_log(self.lf, d)
def is_executable(path, follow_symlinks=False):
'''Checks whether any of the "x" bits are set in the source file mode.'''
return bool(os.stat(path, follow_symlinks=follow_symlinks).st_mode & 0o111)
def append_to_log(lf, line):
lf.write(line)
if not line.endswith('\n'):
lf.write('\n')
lf.flush()
def set_chown(path, user=None, group=None, dir_fd=None, follow_symlinks=True):
# shutil.chown will call os.chown without passing all the parameters
# and particularly follow_symlinks, thus we replace it temporary
# with a lambda with all the parameters so that follow_symlinks will
# be actually passed properly.
# Not nice, but better than actually rewriting shutil.chown until
# this python bug is fixed: https://bugs.python.org/issue18108
real_os_chown = os.chown
try:
os.chown = lambda p, u, g: real_os_chown(p, u, g,
dir_fd=dir_fd,
follow_symlinks=follow_symlinks)
shutil.chown(path, user, group)
except:
raise
finally:
os.chown = real_os_chown
def set_chmod(path, mode, dir_fd=None, follow_symlinks=True):
try:
os.chmod(path, mode, dir_fd=dir_fd, follow_symlinks=follow_symlinks)
except (NotImplementedError, OSError, SystemError) as e:
if not os.path.islink(path):
os.chmod(path, mode, dir_fd=dir_fd)
def sanitize_permissions(path, umask):
if umask is None:
return
new_perms = 0o777 if is_executable(path, follow_symlinks=False) else 0o666
new_perms &= ~umask
try:
set_chmod(path, new_perms, follow_symlinks=False)
except PermissionError as e:
msg = '{!r}: Unable to set permissions {!r}: {}, ignoring...'
print(msg.format(path, new_perms, e.strerror))
def set_mode(path, mode, default_umask):
if mode is None or (mode.perms_s or mode.owner or mode.group) is None:
# Just sanitize permissions with the default umask
sanitize_permissions(path, default_umask)
return
# No chown() on Windows, and must set one of owner/group
if not is_windows() and (mode.owner or mode.group) is not None:
try:
set_chown(path, mode.owner, mode.group, follow_symlinks=False)
except PermissionError as e:
msg = '{!r}: Unable to set owner {!r} and group {!r}: {}, ignoring...'
print(msg.format(path, mode.owner, mode.group, e.strerror))
except LookupError:
msg = '{!r}: Non-existent owner {!r} or group {!r}: ignoring...'
print(msg.format(path, mode.owner, mode.group))
except OSError as e:
if e.errno == errno.EINVAL:
msg = '{!r}: Non-existent numeric owner {!r} or group {!r}: ignoring...'
print(msg.format(path, mode.owner, mode.group))
else:
raise
# Must set permissions *after* setting owner/group otherwise the
# setuid/setgid bits will get wiped by chmod
# NOTE: On Windows you can set read/write perms; the rest are ignored
if mode.perms_s is not None:
try:
set_chmod(path, mode.perms, follow_symlinks=False)
except PermissionError as e:
msg = '{!r}: Unable to set permissions {!r}: {}, ignoring...'
print(msg.format(path, mode.perms_s, e.strerror))
else:
sanitize_permissions(path, default_umask)
def restore_selinux_contexts():
'''
Restores the SELinux context for files in @selinux_updates
If $DESTDIR is set, do not warn if the call fails.
'''
try:
subprocess.check_call(['selinuxenabled'])
except (FileNotFoundError, PermissionError, subprocess.CalledProcessError) as e:
# If we don't have selinux or selinuxenabled returned 1, failure
# is ignored quietly.
return
if not shutil.which('restorecon'):
# If we don't have restorecon, failure is ignored quietly.
return
with subprocess.Popen(['restorecon', '-F', '-f-', '-0'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
out, err = proc.communicate(input=b'\0'.join(os.fsencode(f) for f in selinux_updates) + b'\0')
if proc.returncode != 0 and not os.environ.get('DESTDIR'):
print('Failed to restore SELinux context of installed files...',
'Standard output:', out.decode(),
'Standard error:', err.decode(), sep='\n')
def get_destdir_path(d, path):
if os.path.isabs(path):
output = destdir_join(d.destdir, path)
else:
output = os.path.join(d.fullprefix, path)
return output
def check_for_stampfile(fname):
'''Some languages e.g. Rust have output files
whose names are not known at configure time.
Check if this is the case and return the real
file instead.'''
if fname.endswith('.so') or fname.endswith('.dll'):
if os.stat(fname).st_size == 0:
(base, suffix) = os.path.splitext(fname)
files = glob(base + '-*' + suffix)
if len(files) > 1:
print("Stale dynamic library files in build dir. Can't install.")
sys.exit(1)
if len(files) == 1:
return files[0]
elif fname.endswith('.a') or fname.endswith('.lib'):
if os.stat(fname).st_size == 0:
(base, suffix) = os.path.splitext(fname)
files = glob(base + '-*' + '.rlib')
if len(files) > 1:
print("Stale static library files in build dir. Can't install.")
sys.exit(1)
if len(files) == 1:
return files[0]
return fname
class Installer:
def __init__(self, options, lf):
self.options = options
self.lf = lf
def should_preserve_existing_file(self, from_file, to_file):
if not self.options.only_changed:
return False
# Always replace danging symlinks
if os.path.islink(from_file) and not os.path.isfile(from_file):
return False
from_time = os.stat(from_file).st_mtime
to_time = os.stat(to_file).st_mtime
return from_time <= to_time
def do_copyfile(self, from_file, to_file):
outdir = os.path.split(to_file)[0]
if not os.path.isfile(from_file) and not os.path.islink(from_file):
raise RuntimeError('Tried to install something that isn\'t a file:'
'{!r}'.format(from_file))
# copyfile fails if the target file already exists, so remove it to
# allow overwriting a previous install. If the target is not a file, we
# want to give a readable error.
if os.path.exists(to_file):
if not os.path.isfile(to_file):
raise RuntimeError('Destination {!r} already exists and is not '
'a file'.format(to_file))
if self.should_preserve_existing_file(from_file, to_file):
append_to_log(self.lf, '# Preserving old file %s\n' % to_file)
print('Preserving existing file %s.' % to_file)
return False
os.remove(to_file)
print('Installing %s to %s' % (from_file, outdir))
if os.path.islink(from_file):
if not os.path.exists(from_file):
# Dangling symlink. Replicate as is.
shutil.copy(from_file, outdir, follow_symlinks=False)
else:
# Remove this entire branch when changing the behaviour to duplicate
# symlinks rather than copying what they point to.
print(symlink_warning)
shutil.copyfile(from_file, to_file)
shutil.copystat(from_file, to_file)
else:
shutil.copyfile(from_file, to_file)
shutil.copystat(from_file, to_file)
selinux_updates.append(to_file)
append_to_log(self.lf, to_file)
return True
def do_copydir(self, data, src_dir, dst_dir, exclude, install_mode):
'''
Copies the contents of directory @src_dir into @dst_dir.
For directory
/foo/
bar/
excluded
foobar
file
do_copydir(..., '/foo', '/dst/dir', {'bar/excluded'}) creates
/dst/
dir/
bar/
foobar
file
Args:
src_dir: str, absolute path to the source directory
dst_dir: str, absolute path to the destination directory
exclude: (set(str), set(str)), tuple of (exclude_files, exclude_dirs),
each element of the set is a path relative to src_dir.
'''
if not os.path.isabs(src_dir):
raise ValueError('src_dir must be absolute, got %s' % src_dir)
if not os.path.isabs(dst_dir):
raise ValueError('dst_dir must be absolute, got %s' % dst_dir)
if exclude is not None:
exclude_files, exclude_dirs = exclude
else:
exclude_files = exclude_dirs = set()
for root, dirs, files in os.walk(src_dir):
assert os.path.isabs(root)
for d in dirs[:]:
abs_src = os.path.join(root, d)
filepart = os.path.relpath(abs_src, start=src_dir)
abs_dst = os.path.join(dst_dir, filepart)
# Remove these so they aren't visited by os.walk at all.
if filepart in exclude_dirs:
dirs.remove(d)
continue
if os.path.isdir(abs_dst):
continue
if os.path.exists(abs_dst):
print('Tried to copy directory %s but a file of that name already exists.' % abs_dst)
sys.exit(1)
data.dirmaker.makedirs(abs_dst)
shutil.copystat(abs_src, abs_dst)
sanitize_permissions(abs_dst, data.install_umask)
for f in files:
abs_src = os.path.join(root, f)
filepart = os.path.relpath(abs_src, start=src_dir)
if filepart in exclude_files:
continue
abs_dst = os.path.join(dst_dir, filepart)
if os.path.isdir(abs_dst):
print('Tried to copy file %s but a directory of that name already exists.' % abs_dst)
if os.path.exists(abs_dst):
os.remove(abs_dst)
parent_dir = os.path.dirname(abs_dst)
if not os.path.isdir(parent_dir):
os.mkdir(parent_dir)
shutil.copystat(os.path.dirname(abs_src), parent_dir)
# FIXME: what about symlinks?
self.do_copyfile(abs_src, abs_dst)
set_mode(abs_dst, install_mode, data.install_umask)
append_to_log(self.lf, abs_dst)
def do_install(self, datafilename):
with open(datafilename, 'rb') as ifile:
d = pickle.load(ifile)
d.destdir = os.environ.get('DESTDIR', '')
d.fullprefix = destdir_join(d.destdir, d.prefix)
if d.install_umask is not None:
os.umask(d.install_umask)
try:
d.dirmaker = DirMaker(self.lf)
with d.dirmaker:
self.install_subdirs(d) # Must be first, because it needs to delete the old subtree.
self.install_targets(d)
self.install_headers(d)
self.install_man(d)
self.install_data(d)
restore_selinux_contexts()
self.run_install_script(d)
except PermissionError:
if shutil.which('pkexec') is not None and 'PKEXEC_UID' not in os.environ:
print('Installation failed due to insufficient permissions.')
print('Attempting to use polkit to gain elevated privileges...')
os.execlp('pkexec', 'pkexec', sys.executable, main_file, *sys.argv[1:],
'-C', os.getcwd())
else:
raise
def install_subdirs(self, d):
for (src_dir, dst_dir, mode, exclude) in d.install_subdirs:
full_dst_dir = get_destdir_path(d, dst_dir)
print('Installing subdir %s to %s' % (src_dir, full_dst_dir))
d.dirmaker.makedirs(full_dst_dir, exist_ok=True)
self.do_copydir(d, src_dir, full_dst_dir, exclude, mode)
def install_data(self, d):
for i in d.data:
fullfilename = i[0]
outfilename = get_destdir_path(d, i[1])
mode = i[2]
outdir = os.path.dirname(outfilename)
d.dirmaker.makedirs(outdir, exist_ok=True)
self.do_copyfile(fullfilename, outfilename)
set_mode(outfilename, mode, d.install_umask)
def install_man(self, d):
for m in d.man:
full_source_filename = m[0]
outfilename = get_destdir_path(d, m[1])
outdir = os.path.dirname(outfilename)
d.dirmaker.makedirs(outdir, exist_ok=True)
install_mode = m[2]
self.do_copyfile(full_source_filename, outfilename)
set_mode(outfilename, install_mode, d.install_umask)
def install_headers(self, d):
for t in d.headers:
fullfilename = t[0]
fname = os.path.basename(fullfilename)
outdir = get_destdir_path(d, t[1])
outfilename = os.path.join(outdir, fname)
install_mode = t[2]
d.dirmaker.makedirs(outdir, exist_ok=True)
self.do_copyfile(fullfilename, outfilename)
set_mode(outfilename, install_mode, d.install_umask)
def run_install_script(self, d):
env = {'MESON_SOURCE_ROOT': d.source_dir,
'MESON_BUILD_ROOT': d.build_dir,
'MESON_INSTALL_PREFIX': d.prefix,
'MESON_INSTALL_DESTDIR_PREFIX': d.fullprefix,
'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in d.mesonintrospect]),
}
child_env = os.environ.copy()
child_env.update(env)
for i in d.install_scripts:
script = i['exe']
args = i['args']
name = ' '.join(script + args)
print('Running custom install script {!r}'.format(name))
try:
rc = subprocess.call(script + args, env=child_env)
if rc != 0:
sys.exit(rc)
except OSError:
print('Failed to run install script {!r}'.format(name))
sys.exit(1)
def install_targets(self, d):
for t in d.targets:
if not os.path.exists(t.fname):
# For example, import libraries of shared modules are optional
if t.optional:
print('File {!r} not found, skipping'.format(t.fname))
continue
else:
raise RuntimeError('File {!r} could not be found'.format(t.fname))
fname = check_for_stampfile(t.fname)
outdir = get_destdir_path(d, t.outdir)
outname = os.path.join(outdir, os.path.basename(fname))
final_path = os.path.join(d.prefix, t.outdir, os.path.basename(fname))
aliases = t.aliases
should_strip = t.strip
install_rpath = t.install_rpath
install_name_mappings = t.install_name_mappings
install_mode = t.install_mode
d.dirmaker.makedirs(outdir, exist_ok=True)
if not os.path.exists(fname):
raise RuntimeError('File {!r} could not be found'.format(fname))
elif os.path.isfile(fname):
self.do_copyfile(fname, outname)
set_mode(outname, install_mode, d.install_umask)
if should_strip and d.strip_bin is not None:
if fname.endswith('.jar'):
print('Not stripping jar target:', os.path.basename(fname))
continue
print('Stripping target {!r}'.format(fname))
ps, stdo, stde = Popen_safe(d.strip_bin + [outname])
if ps.returncode != 0:
print('Could not strip file.\n')
print('Stdout:\n%s\n' % stdo)
print('Stderr:\n%s\n' % stde)
sys.exit(1)
pdb_filename = os.path.splitext(fname)[0] + '.pdb'
if not should_strip and os.path.exists(pdb_filename):
pdb_outname = os.path.splitext(outname)[0] + '.pdb'
self.do_copyfile(pdb_filename, pdb_outname)
set_mode(pdb_outname, install_mode, d.install_umask)
elif os.path.isdir(fname):
fname = os.path.join(d.build_dir, fname.rstrip('/'))
outname = os.path.join(outdir, os.path.basename(fname))
self.do_copydir(d, fname, outname, None, install_mode)
else:
raise RuntimeError('Unknown file type for {!r}'.format(fname))
printed_symlink_error = False
for alias, to in aliases.items():
try:
symlinkfilename = os.path.join(outdir, alias)
try:
os.remove(symlinkfilename)
except FileNotFoundError:
pass
os.symlink(to, symlinkfilename)
append_to_log(self.lf, symlinkfilename)
except (NotImplementedError, OSError):
if not printed_symlink_error:
print("Symlink creation does not work on this platform. "
"Skipping all symlinking.")
printed_symlink_error = True
if os.path.isfile(outname):
try:
depfixer.fix_rpath(outname, install_rpath, final_path,
install_name_mappings, verbose=False)
except SystemExit as e:
if isinstance(e.code, int) and e.code == 0:
pass
else:
raise
def run(opts):
datafilename = 'meson-private/install.dat'
private_dir = os.path.dirname(datafilename)
log_dir = os.path.join(private_dir, '../meson-logs')
if not os.path.exists(os.path.join(opts.wd, datafilename)):
sys.exit('Install data not found. Run this command in build directory root.')
log_dir = os.path.join(private_dir, '../meson-logs')
if not opts.no_rebuild:
if not rebuild_all(opts.wd):
sys.exit(-1)
os.chdir(opts.wd)
with open(os.path.join(log_dir, 'install-log.txt'), 'w') as lf:
installer = Installer(opts, lf)
append_to_log(lf, '# List of files installed by Meson')
append_to_log(lf, '# Does not contain files installed by custom scripts.')
installer.do_install(datafilename)
return 0
|
|
"""
Hubs and authorities analysis of graph structure.
"""
#!/usr/bin/env python
# Copyright (C) 2008-2010 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
# NetworkX:http://networkx.lanl.gov/
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['hits','hits_numpy','hits_scipy','authority_matrix','hub_matrix']
import networkx as nx
from networkx.exception import NetworkXError
def hits(G,max_iter=100,tol=1.0e-8,nstart=None):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
----------
G : graph
A NetworkX graph
max_iter : interger, optional
Maximum number of iterations in power method.
tol : float, optional
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of each node for power method iteration.
Returns
-------
(hubs,authorities) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-32, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
raise Exception("hits() not defined for graphs with multiedges.")
# choose fixed starting vector if not given
if nstart is None:
h=dict.fromkeys(G,1.0/G.number_of_nodes())
else:
h=nstart
# normalize starting vector
s=1.0/sum(h.values())
for k in x: h[k]*=s
nnodes=G.number_of_nodes()
i=0
while True: # power iteration: make up to max_iter iterations
hlast=h
h=dict.fromkeys(hlast.keys(),0)
a=dict.fromkeys(hlast.keys(),0)
# this "matrix multiply" looks odd because it is
# doing a left multiply a^T=hlast^T*G
for n in h:
for nbr in G[n]:
a[nbr]+=hlast[n]*G[n][nbr].get('weight',1)
# now multiply h=Ga
for n in h:
for nbr in G[n]:
h[n]+=a[nbr]*G[n][nbr].get('weight',1)
# normalize vector
s=1.0/sum(h.values())
for n in h: h[n]*=s
# normalize vector
s=1.0/sum(a.values())
for n in a: a[n]*=s
# check convergence, l1 norm
err=sum([abs(h[n]-hlast[n]) for n in h])
if err < tol:
break
if i>max_iter:
raise NetworkXError(\
"HITS: power iteration failed to converge in %d iterations."%(i+1))
i+=1
return h,a
def authority_matrix(G,nodelist=None):
"""Return the HITS authority matrix."""
M=nx.to_numpy_matrix(G,nodelist=nodelist)
return M.T*M
def hub_matrix(G,nodelist=None):
"""Return the HITS hub matrix."""
M=nx.to_numpy_matrix(G,nodelist=nodelist)
return M*M.T
def hits_numpy(G):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
-----------
G : graph
A NetworkX graph
Returns
-------
(hubs,authorities) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
The eigenvector calculation uses NumPy's interface to LAPACK.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-32, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"hits_numpy() requires NumPy: http://scipy.org/")
H=nx.hub_matrix(G,G.nodes())
e,ev=np.linalg.eig(H)
m=e.argsort()[-1] # index of maximum eigenvalue
h=np.array(ev[:,m]).flatten()
A=nx.authority_matrix(G,G.nodes())
e,ev=np.linalg.eig(A)
m=e.argsort()[-1] # index of maximum eigenvalue
a=np.array(ev[:,m]).flatten()
hubs=dict(zip(G.nodes(),h/h.sum()))
authorities=dict(zip(G.nodes(),a/a.sum()))
return hubs,authorities
def hits_scipy(G,max_iter=100,tol=1.0e-6):
"""Return HITS hubs and authorities values for nodes.
The HITS algorithm computes two numbers for a node.
Authorities estimates the node value based on the incoming links.
Hubs estimates the node value based on outgoing links.
Parameters
-----------
G : graph
A NetworkX graph
max_iter : interger, optional
Maximum number of iterations in power method.
tol : float, optional
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of each node for power method iteration.
Returns
-------
(hubs,authorities) : two-tuple of dictionaries
Two dictionaries keyed by node containing the hub and authority
values.
Examples
--------
>>> G=nx.path_graph(4)
>>> h,a=nx.hits(G)
Notes
-----
This implementation uses SciPy sparse matrices.
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop
after max_iter iterations or an error tolerance of
number_of_nodes(G)*tol has been reached.
The HITS algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Jon Kleinberg,
Authoritative sources in a hyperlinked environment
Journal of the ACM 46 (5): 604-632, 1999.
doi:10.1145/324133.324140.
http://www.cs.cornell.edu/home/kleinber/auth.pdf.
"""
try:
import scipy.sparse
import numpy as np
except ImportError:
raise ImportError(\
"hits_scipy() requires SciPy: http://scipy.org/")
M=nx.to_scipy_sparse_matrix(G,nodelist=G.nodes())
(n,m)=M.shape # should be square
A=M.T*M # authority matrix
x=scipy.ones((n,1))/n # initial guess
# power iteration on authority matrix
i=0
while True:
xlast=x
x=A*x
x=x/x.sum()
# check convergence, l1 norm
err=scipy.absolute(x-xlast).sum()
if err < tol:
break
if i>max_iter:
raise NetworkXError(\
"HITS: power iteration failed to converge in %d iterations."%(i+1))
i+=1
a=np.asarray(x).flatten()
# h=M*a
h=np.asarray(M*a).flatten()
hubs=dict(zip(G.nodes(),h/h.sum()))
authorities=dict(zip(G.nodes(),a/a.sum()))
return hubs,authorities
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Help include git hash in tensorflow bazel build.
This creates symlinks from the internal git repository directory so
that the build system can see changes in the version state. We also
remember what branch git was on so when the branch changes we can
detect that the ref file is no longer correct (so we can suggest users
run ./configure again).
NOTE: this script is only used in opensource.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import subprocess
import shutil
def parse_branch_ref(filename):
"""Given a filename of a .git/HEAD file return ref path.
In particular, if git is in detached head state, this will
return None. If git is in attached head, it will return
the branch reference. E.g. if on 'master', the HEAD will
contain 'ref: refs/heads/master' so 'refs/heads/master'
will be returned.
Example: parse_branch_ref(".git/HEAD")
Args:
filename: file to treat as a git HEAD file
Returns:
None if detached head, otherwise ref subpath
Raises:
RuntimeError: if the HEAD file is unparseable.
"""
data = open(filename).read().strip()
items = data.split(" ")
if len(items) == 1:
return None
elif len(items) == 2 and items[0] == "ref:":
return items[1].strip()
else:
raise RuntimeError("Git directory has unparseable HEAD")
def configure(src_base_path, gen_path, debug=False):
"""Configure `src_base_path` to embed git hashes if available."""
# TODO(aselle): No files generated or symlinked here are deleted by
# the build system. I don't know of a way to do it in bazel. It
# should only be a problem if somebody moves a sandbox directory
# without running ./configure again.
git_path = os.path.join(src_base_path, ".git")
# Remove and recreate the path
if os.path.exists(gen_path):
if os.path.isdir(gen_path):
try:
shutil.rmtree(gen_path)
except OSError:
raise RuntimeError("Cannot delete directory %s due to permission "
"error, inspect and remove manually" % gen_path)
else:
raise RuntimeError("Cannot delete non-directory %s, inspect ",
"and remove manually" % gen_path)
os.makedirs(gen_path)
if not os.path.isdir(gen_path):
raise RuntimeError("gen_git_source.py: Failed to create dir")
# file that specifies what the state of the git repo is
spec = {}
# value file names will be mapped to the keys
link_map = {"head": None, "branch_ref": None}
if not os.path.isdir(git_path):
# No git directory
spec["git"] = False
open(os.path.join(gen_path, "head"), "w").write("")
open(os.path.join(gen_path, "branch_ref"), "w").write("")
else:
# Git directory, possibly detached or attached
spec["git"] = True
spec["path"] = src_base_path
git_head_path = os.path.join(git_path, "HEAD")
spec["branch"] = parse_branch_ref(git_head_path)
link_map["head"] = git_head_path
if spec["branch"] is not None:
# attached method
link_map["branch_ref"] = os.path.join(git_path, *
os.path.split(spec["branch"]))
# Create symlinks or dummy files
for target, src in link_map.items():
if src is None:
open(os.path.join(gen_path, target), "w").write("")
elif not os.path.exists(src):
# Git repo is configured in a way we don't support such as having
# packed refs. Even though in a git repo, tf.__git_version__ will not
# be accurate.
# TODO(mikecase): Support grabbing git info when using packed refs.
open(os.path.join(gen_path, target), "w").write("")
spec["git"] = False
else:
try:
# In python 3.5, symlink function exists even on Windows. But requires
# Windows Admin privileges, otherwise an OSError will be thrown.
if hasattr(os, "symlink"):
os.symlink(src, os.path.join(gen_path, target))
else:
shutil.copy2(src, os.path.join(gen_path, target))
except OSError:
shutil.copy2(src, os.path.join(gen_path, target))
json.dump(spec, open(os.path.join(gen_path, "spec.json"), "w"), indent=2)
if debug:
print("gen_git_source.py: list %s" % gen_path)
print("gen_git_source.py: %s" + repr(os.listdir(gen_path)))
print("gen_git_source.py: spec is %r" % spec)
def get_git_version(git_base_path, git_tag_override):
"""Get the git version from the repository.
This function runs `git describe ...` in the path given as `git_base_path`.
This will return a string of the form:
<base-tag>-<number of commits since tag>-<shortened sha hash>
For example, 'v0.10.0-1585-gbb717a6' means v0.10.0 was the last tag when
compiled. 1585 commits are after that commit tag, and we can get back to this
version by running `git checkout gbb717a6`.
Args:
git_base_path: where the .git directory is located
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
Returns:
A bytestring representing the git version
"""
unknown_label = b"unknown"
try:
# Force to bytes so this works on python 2 and python 3
val = bytes(subprocess.check_output([
"git", str("--git-dir=%s/.git" % git_base_path),
str("--work-tree=" + git_base_path), "describe", "--long", "--tags"
]).strip())
version_separator = b"-"
if git_tag_override and val:
split_val = val.split(version_separator)
if len(split_val) < 3:
raise Exception(
("Expected git version in format 'TAG-COMMITS AFTER TAG-HASH' "
"but got '%s'") % val)
# There might be "-" in the tag name. But we can be sure that the final
# two "-" are those inserted by the git describe command.
abbrev_commit = split_val[-1]
val = bytes(
version_separator.join([git_tag_override, "0", abbrev_commit]))
return val if val else unknown_label
except (subprocess.CalledProcessError, OSError):
return unknown_label
def write_version_info(filename, git_version):
"""Write a c file that defines the version functions.
Args:
filename: filename to write to.
git_version: the result of a git describe.
"""
if b"\"" in git_version or b"\\" in git_version:
git_version = "git_version_is_invalid" # do not cause build to fail!
contents = """/* Generated by gen_git_source.py */
#include <string>
const char* tf_git_version() {return "%s";}
const char* tf_compiler_version() {
#ifdef _MSC_VER
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
return "MSVC " TOSTRING(_MSC_FULL_VER);
#else
return __VERSION__;
#endif
}
const int tf_cxx11_abi_flag() {
#ifdef _GLIBCXX_USE_CXX11_ABI
return _GLIBCXX_USE_CXX11_ABI;
#else
return 0;
#endif
}
const int tf_monolithic_build() {
#ifdef TENSORFLOW_MONOLITHIC_BUILD
return 1;
#else
return 0;
#endif
}
""" % git_version
open(filename, "w").write(contents)
def generate(arglist, git_tag_override=None):
"""Generate version_info.cc as given `destination_file`.
Args:
arglist: should be a sequence that contains
spec, head_symlink, ref_symlink, destination_file.
`destination_file` is the filename where version_info.cc will be written
`spec` is a filename where the file contains a JSON dictionary
'git' bool that is true if the source is in a git repo
'path' base path of the source code
'branch' the name of the ref specification of the current branch/tag
`head_symlink` is a filename to HEAD that is cross-referenced against
what is contained in the json branch designation.
`ref_symlink` is unused in this script but passed, because the build
system uses that file to detect when commits happen.
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
Raises:
RuntimeError: If ./configure needs to be run, RuntimeError will be raised.
"""
# unused ref_symlink arg
spec, head_symlink, _, dest_file = arglist
data = json.load(open(spec))
git_version = None
if not data["git"]:
git_version = b"unknown"
else:
old_branch = data["branch"]
new_branch = parse_branch_ref(head_symlink)
if new_branch != old_branch:
raise RuntimeError(
"Run ./configure again, branch was '%s' but is now '%s'" %
(old_branch, new_branch))
git_version = get_git_version(data["path"], git_tag_override)
write_version_info(dest_file, git_version)
def raw_generate(output_file, source_dir, git_tag_override=None):
"""Simple generator used for cmake/make build systems.
This does not create any symlinks. It requires the build system
to build unconditionally.
Args:
output_file: Output filename for the version info cc
source_dir: Base path of the source code
git_tag_override: Override the value for the git tag. This is useful for
releases where we want to build the release before the git tag is
created.
"""
git_version = get_git_version(source_dir, git_tag_override)
write_version_info(output_file, git_version)
parser = argparse.ArgumentParser(description="""Git hash injection into bazel.
If used with --configure <path> will search for git directory and put symlinks
into source so that a bazel genrule can call --generate""")
parser.add_argument(
"--debug",
type=bool,
help="print debugging information about paths",
default=False)
parser.add_argument(
"--configure", type=str,
help="Path to configure as a git repo dependency tracking sentinel")
parser.add_argument(
"--gen_root_path", type=str,
help="Root path to place generated git files (created by --configure).")
parser.add_argument(
"--git_tag_override", type=str,
help="Override git tag value in the __git_version__ string. Useful when "
"creating release builds before the release tag is created.")
parser.add_argument(
"--generate",
type=str,
help="Generate given spec-file, HEAD-symlink-file, ref-symlink-file",
nargs="+")
parser.add_argument(
"--raw_generate",
type=str,
help="Generate version_info.cc (simpler version used for cmake/make)")
parser.add_argument(
"--source_dir",
type=str,
help="Base path of the source code (used for cmake/make)")
args = parser.parse_args()
if args.configure is not None:
if args.gen_root_path is None:
raise RuntimeError("Must pass --gen_root_path arg when running --configure")
configure(args.configure, args.gen_root_path, debug=args.debug)
elif args.generate is not None:
generate(args.generate, args.git_tag_override)
elif args.raw_generate is not None:
source_path = "."
if args.source_dir is not None:
source_path = args.source_dir
raw_generate(args.raw_generate, source_path, args.git_tag_override)
else:
raise RuntimeError("--configure or --generate or --raw_generate "
"must be used")
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import collections
UNKNOWN = np.nan
R = 1
FR = 2
F = 3
FL = 4
L = 5
BL = 6
B = 7
BR = 8
U = 9
UF = 10
UB = 11
UL = 12
UR = 13
UFL = 14
UFR = 15
UBL = 16
UBR = 17
D = 18
DF = 19
DB = 20
DL = 21
DR = 22
DFL = 23
DFR = 24
DBL = 25
DBR = 26
INT_TO_CODE = collections.OrderedDict([
(UNKNOWN, 'unknown'),
(R, 'right'),
(FR, 'frontright'),
(F, 'front'),
(FL, 'frontleft'),
(L, 'left'),
(BL, 'backleft'),
(B, 'back'),
(BR, 'backright'),
(U, 'up'),
(UF, 'upfront'),
(UB, 'upback'),
(UL, 'upleft'),
(UR, 'upright'),
(UFL, 'upfrontleft'),
(UFR, 'upfrontright'),
(UBL, 'upbackleft'),
(UBR, 'upbackright'),
(D, 'down'),
(DF, 'downfront'),
(DB, 'downback'),
(DL, 'downleft'),
(DR, 'downright'),
(DFL, 'downfrontleft'),
(DFR, 'downfrontright'),
(DBL, 'downbackleft'),
(DBR, 'downbackright'),
])
# HACK: mirrors ibeis viewpoint int distance encoding
VIEW_INT_DIST = {
# DIST 0 PAIRS
(B, B): 0, (BL, BL): 0, (BR, BR): 0, (D, D): 0, (DB, DB): 0,
(DBL, DBL): 0, (DBR, DBR): 0, (DF, DF): 0, (DFL, DFL): 0,
(DFR, DFR): 0, (DL, DL): 0, (DR, DR): 0, (F, F): 0, (FL, FL): 0,
(FR, FR): 0, (L, L): 0, (R, R): 0, (U, U): 0, (UB, UB): 0,
(UBL, UBL): 0, (UBR, UBR): 0, (UF, UF): 0, (UFL, UFL): 0,
(UFR, UFR): 0, (UL, UL): 0, (UR, UR): 0,
# DIST 1 PAIRS
(B, BL): 1, (B, BR): 1, (B, DB): 1, (B, DBL): 1, (B, DBR): 1,
(B, UB): 1, (B, UBL): 1, (B, UBR): 1, (BL, DBL): 1, (BL, L): 1,
(BL, UBL): 1, (BR, DBR): 1, (BR, R): 1, (BR, UBR): 1, (D, DB): 1,
(D, DBL): 1, (D, DBR): 1, (D, DF): 1, (D, DFL): 1, (D, DFR): 1,
(D, DL): 1, (D, DR): 1, (DB, DBL): 1, (DB, DBR): 1, (DBL, DL): 1,
(DBL, L): 1, (DBR, DR): 1, (DBR, R): 1, (DF, DFL): 1, (DF, DFR): 1,
(DF, F): 1, (DFL, DL): 1, (DFL, F): 1, (DFL, FL): 1, (DFL, L): 1,
(DFR, DR): 1, (DFR, F): 1, (DFR, FR): 1, (DFR, R): 1, (DL, L): 1,
(DR, R): 1, (F, FL): 1, (F, FR): 1, (F, UF): 1, (F, UFL): 1,
(F, UFR): 1, (FL, L): 1, (FL, UFL): 1, (FR, R): 1, (FR, UFR): 1,
(L, UBL): 1, (L, UFL): 1, (L, UL): 1, (R, UBR): 1, (R, UFR): 1,
(R, UR): 1, (U, UB): 1, (U, UBL): 1, (U, UBR): 1, (U, UF): 1,
(U, UFL): 1, (U, UFR): 1, (U, UL): 1, (U, UR): 1, (UB, UBL): 1,
(UB, UBR): 1, (UBL, UL): 1, (UBR, UR): 1, (UF, UFL): 1, (UF, UFR): 1,
(UFL, UL): 1, (UFR, UR): 1,
# DIST 2 PAIRS
(B, D): 2, (B, DL): 2, (B, DR): 2, (B, L): 2, (B, R): 2, (B, U): 2,
(B, UL): 2, (B, UR): 2, (BL, BR): 2, (BL, D): 2, (BL, DB): 2,
(BL, DBR): 2, (BL, DFL): 2, (BL, DL): 2, (BL, FL): 2, (BL, U): 2,
(BL, UB): 2, (BL, UBR): 2, (BL, UFL): 2, (BL, UL): 2, (BR, D): 2,
(BR, DB): 2, (BR, DBL): 2, (BR, DFR): 2, (BR, DR): 2, (BR, FR): 2,
(BR, U): 2, (BR, UB): 2, (BR, UBL): 2, (BR, UFR): 2, (BR, UR): 2,
(D, F): 2, (D, FL): 2, (D, FR): 2, (D, L): 2, (D, R): 2, (DB, DF): 2,
(DB, DFL): 2, (DB, DFR): 2, (DB, DL): 2, (DB, DR): 2, (DB, L): 2,
(DB, R): 2, (DB, UB): 2, (DB, UBL): 2, (DB, UBR): 2, (DBL, DBR): 2,
(DBL, DF): 2, (DBL, DFL): 2, (DBL, DFR): 2, (DBL, DR): 2, (DBL, FL): 2,
(DBL, UB): 2, (DBL, UBL): 2, (DBL, UBR): 2, (DBL, UFL): 2,
(DBL, UL): 2, (DBR, DF): 2, (DBR, DFL): 2, (DBR, DFR): 2, (DBR, DL): 2,
(DBR, FR): 2, (DBR, UB): 2, (DBR, UBL): 2, (DBR, UBR): 2,
(DBR, UFR): 2, (DBR, UR): 2, (DF, DL): 2, (DF, DR): 2, (DF, FL): 2,
(DF, FR): 2, (DF, L): 2, (DF, R): 2, (DF, UF): 2, (DF, UFL): 2,
(DF, UFR): 2, (DFL, DFR): 2, (DFL, DR): 2, (DFL, FR): 2, (DFL, UBL): 2,
(DFL, UF): 2, (DFL, UFL): 2, (DFL, UFR): 2, (DFL, UL): 2, (DFR, DL): 2,
(DFR, FL): 2, (DFR, UBR): 2, (DFR, UF): 2, (DFR, UFL): 2,
(DFR, UFR): 2, (DFR, UR): 2, (DL, DR): 2, (DL, F): 2, (DL, FL): 2,
(DL, UBL): 2, (DL, UFL): 2, (DL, UL): 2, (DR, F): 2, (DR, FR): 2,
(DR, UBR): 2, (DR, UFR): 2, (DR, UR): 2, (F, L): 2, (F, R): 2,
(F, U): 2, (F, UL): 2, (F, UR): 2, (FL, FR): 2, (FL, U): 2,
(FL, UBL): 2, (FL, UF): 2, (FL, UFR): 2, (FL, UL): 2, (FR, U): 2,
(FR, UBR): 2, (FR, UF): 2, (FR, UFL): 2, (FR, UR): 2, (L, U): 2,
(L, UB): 2, (L, UF): 2, (R, U): 2, (R, UB): 2, (R, UF): 2, (UB, UF): 2,
(UB, UFL): 2, (UB, UFR): 2, (UB, UL): 2, (UB, UR): 2, (UBL, UBR): 2,
(UBL, UF): 2, (UBL, UFL): 2, (UBL, UFR): 2, (UBL, UR): 2, (UBR, UF): 2,
(UBR, UFL): 2, (UBR, UFR): 2, (UBR, UL): 2, (UF, UL): 2, (UF, UR): 2,
(UFL, UFR): 2, (UFL, UR): 2, (UFR, UL): 2, (UL, UR): 2,
# DIST 3 PAIRS
(B, DF): 3, (B, DFL): 3, (B, DFR): 3, (B, FL): 3, (B, FR): 3,
(B, UF): 3, (B, UFL): 3, (B, UFR): 3, (BL, DF): 3, (BL, DFR): 3,
(BL, DR): 3, (BL, F): 3, (BL, R): 3, (BL, UF): 3, (BL, UFR): 3,
(BL, UR): 3, (BR, DF): 3, (BR, DFL): 3, (BR, DL): 3, (BR, F): 3,
(BR, L): 3, (BR, UF): 3, (BR, UFL): 3, (BR, UL): 3, (D, UB): 3,
(D, UBL): 3, (D, UBR): 3, (D, UF): 3, (D, UFL): 3, (D, UFR): 3,
(D, UL): 3, (D, UR): 3, (DB, F): 3, (DB, FL): 3, (DB, FR): 3, (DB, U): 3,
(DB, UFL): 3, (DB, UFR): 3, (DB, UL): 3, (DB, UR): 3, (DBL, F): 3,
(DBL, FR): 3, (DBL, R): 3, (DBL, U): 3, (DBL, UF): 3, (DBL, UR): 3,
(DBR, F): 3, (DBR, FL): 3, (DBR, L): 3, (DBR, U): 3, (DBR, UF): 3,
(DBR, UL): 3, (DF, U): 3, (DF, UBL): 3, (DF, UBR): 3, (DF, UL): 3,
(DF, UR): 3, (DFL, R): 3, (DFL, U): 3, (DFL, UB): 3, (DFL, UR): 3,
(DFR, L): 3, (DFR, U): 3, (DFR, UB): 3, (DFR, UL): 3, (DL, FR): 3,
(DL, R): 3, (DL, U): 3, (DL, UB): 3, (DL, UBR): 3, (DL, UF): 3,
(DL, UFR): 3, (DR, FL): 3, (DR, L): 3, (DR, U): 3, (DR, UB): 3,
(DR, UBL): 3, (DR, UF): 3, (DR, UFL): 3, (F, UB): 3, (F, UBL): 3,
(F, UBR): 3, (FL, R): 3, (FL, UB): 3, (FL, UBR): 3, (FL, UR): 3,
(FR, L): 3, (FR, UB): 3, (FR, UBL): 3, (FR, UL): 3, (L, UBR): 3,
(L, UFR): 3, (L, UR): 3, (R, UBL): 3, (R, UFL): 3, (R, UL): 3,
# DIST 4 PAIRS
(B, F): 4, (BL, FR): 4, (BR, FL): 4, (D, U): 4, (DB, UF): 4,
(DBL, UFR): 4, (DBR, UFL): 4, (DF, UB): 4, (DFL, UBR): 4,
(DFR, UBL): 4, (DL, UR): 4, (DR, UL): 4, (L, R): 4,
# UNDEFINED DIST PAIRS
(B, UNKNOWN): np.nan, (BL, UNKNOWN): np.nan, (BR, UNKNOWN): np.nan,
(D, UNKNOWN): np.nan, (DB, UNKNOWN): np.nan, (DBL, UNKNOWN): np.nan,
(DBR, UNKNOWN): np.nan, (DF, UNKNOWN): np.nan, (DFL, UNKNOWN): np.nan,
(DFR, UNKNOWN): np.nan, (DL, UNKNOWN): np.nan, (DR, UNKNOWN): np.nan,
(F, UNKNOWN): np.nan, (FL, UNKNOWN): np.nan, (FR, UNKNOWN): np.nan,
(L, UNKNOWN): np.nan, (R, UNKNOWN): np.nan, (U, UNKNOWN): np.nan,
(UB, UNKNOWN): np.nan, (UBL, UNKNOWN): np.nan, (UBR, UNKNOWN): np.nan,
(UF, UNKNOWN): np.nan, (UFL, UNKNOWN): np.nan, (UFR, UNKNOWN): np.nan,
(UL, UNKNOWN): np.nan, (UNKNOWN, B): np.nan, (UNKNOWN, BL): np.nan,
(UNKNOWN, BR): np.nan, (UNKNOWN, D): np.nan, (UNKNOWN, DB): np.nan,
(UNKNOWN, DBL): np.nan, (UNKNOWN, DBR): np.nan, (UNKNOWN, DF): np.nan,
(UNKNOWN, DFL): np.nan, (UNKNOWN, DFR): np.nan, (UNKNOWN, DL): np.nan,
(UNKNOWN, DR): np.nan, (UNKNOWN, F): np.nan, (UNKNOWN, FL): np.nan,
(UNKNOWN, FR): np.nan, (UNKNOWN, L): np.nan, (UNKNOWN, R): np.nan,
(UNKNOWN, U): np.nan, (UNKNOWN, UB): np.nan, (UNKNOWN, UBL): np.nan,
(UNKNOWN, UBR): np.nan, (UNKNOWN, UF): np.nan, (UNKNOWN, UFL): np.nan,
(UNKNOWN, UFR): np.nan, (UNKNOWN, UL): np.nan, (UNKNOWN, UR): np.nan,
(UR, UNKNOWN): np.nan, (UNKNOWN, UNKNOWN): np.nan,
}
# make distance symmetric
for (f1, f2), d in list(VIEW_INT_DIST.items()):
VIEW_INT_DIST[(f2, f1)] = d
# Make string based version
VIEW_CODE_DIST = {
(INT_TO_CODE[f1], INT_TO_CODE[f2]): d
for (f1, f2), d in VIEW_INT_DIST.items()
}
def RhombicuboctahedronDistanceDemo():
import utool as ut
def rhombicuboctahedro_faces():
""" yields names of all 26 rhombicuboctahedron faces"""
face_axes = [['up', 'down'], ['front', 'back'], ['left', 'right']]
ordering = {f: p for p, fs in enumerate(face_axes) for f in fs}
for i in range(1, len(face_axes) + 1):
for axes in list(ut.combinations(face_axes, i)):
for combo in ut.product(*axes):
sortx = ut.argsort(ut.take(ordering, combo))
face = tuple(ut.take(combo, sortx))
yield face
# Each face is a node.
import networkx as nx
G = nx.Graph()
faces = list(rhombicuboctahedro_faces())
G.add_nodes_from(faces)
# A fase is connected if they share an edge or a vertex
# TODO: is there a more general definition?
face_axes = [['up', 'down'], ['front', 'back'], ['left', 'right']]
ordering = {f: p for p, fs in enumerate(face_axes) for f in fs}
# In this case faces might share an edge or vertex if their names intersect
edges = []
for face1, face2 in ut.combinations(faces, 2):
set1 = set(face1)
set2 = set(face2)
if len(set1.intersection(set2)) > 0:
diff1 = set1.difference(set2)
diff2 = set2.difference(set1)
sortx1 = ut.argsort(ut.take(ordering, diff1))
sortx2 = ut.argsort(ut.take(ordering, diff2))
# If they share a name that is on opposite poles, then they cannot
# share an edge or vertex.
if not list(set(sortx1).intersection(set(sortx2))):
edges.append((face1, face2))
# print('-----')
# print('Edge: {} {}'.format(face1, face2))
# print('diff1 = {!r}'.format(diff1))
# print('diff2 = {!r}'.format(diff2))
G.add_edges_from(edges)
# Build distance lookup table
lookup = {}
for face1, face2 in ut.combinations(faces, 2):
# key = tuple(sorted([''.join(face1), ''.join(face2)]))
key = tuple(sorted([face1, face2]))
dist = nx.shortest_path_length(G, face1, face2)
lookup[key] = dist
def convert(face):
if face is None:
return 'UNKNOWN'
else:
return ''.join([p[0].upper() for p in face])
dist_lookup = {}
dist_lookup = {
(convert(k1), convert(k2)): d
for (k1, k2), d in lookup.items()
}
for face in faces:
dist_lookup[(convert(face), convert(face))] = 0
dist_lookup[(convert(face), convert(None))] = None
dist_lookup[(convert(None), convert(face))] = None
dist_lookup[(convert(None), convert(None))] = None
# z = {'({}, {})'.format(k1, k2): d for (k1, k2), d in dist_lookup.items()}
# for i in range(0, 5):
# print(ub.repr2({k: v for k, v in z.items() if v == i}, si=True))
# i = None
# print(ub.repr2({k: v for k, v in z.items() if v == i}, si=True))
# z = ut.sort_dict(z, 'vals')
# print(ub.repr2(z, nl=2, si=True))
# if False:
# from ibeis import constants as const
# VIEW = const.VIEW
# viewint_dist_lookup = {
# (VIEW.CODE_TO_INT[f1], VIEW.CODE_TO_INT[f2]): d
# (f1, f2) for (f1, f2), d in viewcode_dist_lookup.items()
# }
# for k, v in viewcode_dist_lookup.items():
# if 'up' not in k[0] and 'down' not in k[0]:
# if 'up' not in k[1] and 'down' not in k[1]:
# print(k, v)
def visualize_connection_graph():
# node_to_group = {f: str(len(f)) for f in faces}
node_to_group = {}
for f in faces:
if 'up' in f:
node_to_group[f] = '0.' + str(len(f))
elif 'down' in f:
node_to_group[f] = '1.' + str(len(f))
else:
node_to_group[f] = '2.' + str(len(f))
nx.set_node_attributes(G, name='groupid', values=node_to_group)
node_to_label = {f: ''.join(ut.take_column(f, 0)).upper() for f in faces}
nx.set_node_attributes(G, name='label', values=node_to_label)
import plottool_ibeis as pt
pt.qt4ensure()
pt.show_nx(G, prog='neato', groupby='groupid')
visualize_connection_graph()
|
|
"""Functions to simulate scans and maps."""
import numpy as np
import numpy.random as ra
import os
from astropy.io import fits
from astropy.table import Table, vstack
import astropy.units as u
from collections.abc import Iterable
from .io import mkdir_p, locations
from .utils import tqdm, jit
from astropy.coordinates import SkyCoord
from astropy.time import Time
from astropy import log
try:
import matplotlib.pyplot as plt
HAS_MPL = True
except ImportError:
HAS_MPL = False
__all__ = ["simulate_scan", "save_scan", "simulate_map"]
DEFAULT_PEAK_COUNTS = 100
COUNTS_TO_K = 0.03
DEFAULT_CAL_TEMP = 5
DEFAULT_CAL_OFFSET = DEFAULT_CAL_TEMP / COUNTS_TO_K
summary_header = """
SIMPLE = T / file does conform to FITS standard
BITPIX = 8 / number of bits per data pixel
NAXIS = 0 / number of data axes
EXTEND = T / FITS dataset may contain extensions
COMMENT FITS (Flexible Image Transport System) format is defined in 'Astronomy
COMMENT and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H
HIERARCH BackendName = 'NULL ' / Backend name
CREATOR = 'NULL ' / Software (incl. version)
DATE-OBS= '2016-10-03T14:59:08.753' / Observation time
EQUINOX = 0. / Equinox of RA, Dec
EXPTIME = 0. / Total integration time (seconds)
FITSVER = 'V.1.11 ' / FITS version
LST = 0 / Local sidereal time
HIERARCH LogFileName = 'NULL ' / Name of the log file
HIERARCH NUSEBANDS = 0 / Number of sections
OBJECT = 'W51 ' / Target source name
OBSID = 'NULL ' / Observer or operator initials
PROJID = 'NULL ' / ProjectID
HIERARCH RESTFREQ1 = 22235.08 / Rest frequency (MHz)
HIERARCH RESTFREQ2 = 22235.08 / Rest frequency (MHz)
HIERARCH RESTFREQ3 = 22235.08 / Rest frequency (MHz)
HIERARCH RESTFREQ4 = 22235.08 / Rest frequency (MHz)
HIERARCH ReceiverCode = 'CCB ' / Receiver name
HIERARCH RightAscension = 5.07757730974885 / Target right ascension (radians)
HIERARCH Declination = 0.253290907695677 / Target declination (radians)
SCANGEOM= 'NULL ' / Scan geometry
SCANMODE= 'NULL ' / Mapping mode
SCANTYPE= 'NULL ' / Scan astronomical type
SCANXVEL= 0. / Tracking rate (optional,OTF)
SWTCHMOD= 'NULL ' / Switch mode
HIERARCH ScheduleName = 'NULL ' / Name of the schedule
TELESCOP= 'SRT ' / Telescope name
VDEF = 'OP ' / Radial velocity definition
VFRAME = 'LSRK ' / Radial velocity reference frame
VRAD = 0 / Radial velocity
WOBUSED = 0 / Wobbler used?
"""
_REFERENCE_MJD = 57000.5
def _apply_spectrum_to_data(spec_func, counts, nbin, bw=1000):
"""
Examples
--------
>>> res = _apply_spectrum_to_data(lambda x: np.ones(x.size), 4, 3)
>>> np.allclose(res, [4., 4., 4.])
True
>>> res = _apply_spectrum_to_data(lambda x: np.ones(x.size), [4, 2], 3)
>>> np.allclose(res, [[4., 4., 4.], [2, 2, 2]])
True
>>> _apply_spectrum_to_data(lambda x: np.ones(x.size), 4, 1)
4
"""
if nbin == 1:
return counts
single = False
if not isinstance(counts, Iterable):
counts = [counts]
single = True
counts = np.asarray(counts)
df = bw / nbin
freqs = np.arange(0, bw, df)
single_spec = spec_func(freqs)
spec = np.zeros((len(counts), len(freqs)))
for i, c in enumerate(counts):
spec[i, :] += c * single_spec
if single:
return spec[0]
return spec
def _standard_source_spectrum(counts, nbin, bw=1000, sigma=1):
def spec_func(f):
f = f - bw / 2
return np.exp(-(f ** 2) / (2 * sigma ** 2))
return _apply_spectrum_to_data(spec_func, counts, nbin, bw)
def _standard_bkg_spectrum(counts, nbin, bw=1000):
def spec_func(f):
sp = 1 + 0.1 * np.sin(2 * np.pi * 5 / bw * f) * (1 - f / bw)
sp -= 0.5 * f / bw
return sp
return _apply_spectrum_to_data(spec_func, counts, nbin, bw)
def create_summary(filename, key_dict=None):
if key_dict is None:
key_dict = {}
header = fits.Header.fromstring(summary_header, sep="\n")
for key, value in key_dict.items():
header[key] = value
primary_hdu = fits.PrimaryHDU(header=header)
hdul = fits.HDUList([primary_hdu])
hdul.writeto(filename, overwrite=True)
return filename
def _is_number(x):
""" "Test if a string or other is a number
Examples
--------
>>> _is_number('3')
True
>>> _is_number(3.)
True
>>> _is_number('a')
False
"""
try:
float(x)
return True
except (ValueError, TypeError):
return False
def _default_flat_shape(x):
"""A flat shape.
Examples
--------
>>> _default_flat_shape(4314)
100.0
>>> np.allclose(_default_flat_shape(np.arange(3)),
... np.array([100., 100., 100.]))
True
"""
return DEFAULT_PEAK_COUNTS + np.zeros(np.asarray(x).shape)
@jit(nopython=True)
def _2d_gauss(x, y, sigma=2.5 / 60.0):
"""A Gaussian beam"""
return np.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2))
@jit(nopython=True)
def calibrator_scan_func(x):
return DEFAULT_PEAK_COUNTS * _2d_gauss(x, 0, sigma=2.5 / 60)
def sim_crossscans(
ncross,
caldir,
scan_func=calibrator_scan_func,
srcname="DummyCal",
channel_ratio=0.8,
baseline="flat",
nbin=1,
):
src_ra = 185
src_dec = 75
speed = 2.0 # arcmin/s
dt = 0.04
dtheta = speed * dt
length = 4 / dtheta
timedelta = 0
for i in tqdm(range(ncross)):
times, ras, scan0 = simulate_scan(
dt=dt,
length=length,
speed=speed,
shape=scan_func,
noise_amplitude=0.2,
center=0,
baseline=baseline,
nbin=nbin,
)
_, _, scan1 = simulate_scan(
dt=dt,
length=length,
speed=speed,
shape=scan_func,
noise_amplitude=0.2,
center=0,
baseline=baseline,
nbin=nbin,
)
ras = ras / np.cos(np.radians(src_dec)) + src_ra
if i % 2 != 0:
ras = ras[::-1]
decs = np.zeros_like(ras) + src_dec
save_scan(
times + timedelta,
ras,
decs,
{"Ch0": scan0, "Ch1": scan1 * channel_ratio},
filename=os.path.join(caldir, "{}_Ra.fits".format(i)),
src_ra=src_ra,
src_dec=src_dec,
srcname=srcname,
counts_to_K=(COUNTS_TO_K, COUNTS_TO_K / channel_ratio),
)
timedelta += times[-1] + 1
times, decs, scan0 = simulate_scan(
dt=dt,
length=length,
speed=speed,
shape=scan_func,
noise_amplitude=0.2,
center=src_dec,
baseline=baseline,
nbin=nbin,
)
_, _, scan1 = simulate_scan(
dt=dt,
length=length,
speed=speed,
shape=scan_func,
noise_amplitude=0.2,
center=src_dec,
baseline=baseline,
nbin=nbin,
)
if i % 2 != 0:
decs = decs[::-1]
ras = np.zeros_like(decs) + src_ra
save_scan(
times + timedelta,
ras,
decs,
{"Ch0": scan0, "Ch1": scan1 * channel_ratio},
filename=os.path.join(caldir, "{}_Dec.fits".format(i)),
src_ra=src_ra,
src_dec=src_dec,
srcname=srcname,
counts_to_K=(COUNTS_TO_K, COUNTS_TO_K / channel_ratio),
)
timedelta += times[-1] + 1
create_summary(
os.path.join(caldir, "summary.fits"),
{
"RightAscension": np.radians(src_ra),
"Declination": np.radians(src_dec),
"Object": srcname,
},
)
def _default_map_shape(x, y):
"""A flat map shape.
Examples
--------
>>> _default_map_shape(4314, 234)
100
>>> res = np.array([[ 100., 100., 100., 100.],
... [ 100., 100., 100., 100.],
... [ 100., 100., 100., 100.]])
>>> np.allclose(_default_map_shape(np.zeros((3, 4)), np.ones((3, 4))), res)
True
"""
x = np.asarray(x)
y = np.asarray(y)
# It will raise a ValueError when x and y are not compatible
return DEFAULT_PEAK_COUNTS + np.zeros_like(y) * np.zeros_like(x)
def sim_position_switching(
caldir,
srcname="Dummy",
nbin=1,
offset=np.radians(3),
strategy=None,
legacy_cal_format=False,
):
dt = 0.04
src_ra = 185
src_dec = 75
if strategy is None:
strategy = [1, 1, 1]
last_time = 0
for n_on in range(strategy[0]):
times, ras, on = simulate_scan(
dt=dt,
length=0,
speed=1,
baseline=(0, 10, 0),
noise_amplitude=0.2,
center=src_dec,
nbin=nbin,
)
times += last_time
last_time = times[0]
decs = np.zeros_like(ras) + src_dec
save_scan(
times,
ras,
decs,
{"Ch0": on, "Ch1": on},
filename=os.path.join(caldir, "ON_{}.fits".format(n_on)),
src_ra=src_ra,
src_dec=src_dec,
srcname=srcname,
counts_to_K=(COUNTS_TO_K, COUNTS_TO_K),
other_keywords={"SIGNAL": "SIGNAL"},
)
for n_off in range(strategy[1]):
times, _, off = simulate_scan(
dt=dt,
length=0,
speed=1,
baseline=(0, 10, 0),
shape=lambda x: 0,
noise_amplitude=0.2,
center=src_dec,
nbin=nbin,
)
times += last_time
last_time = times[0]
save_scan(
times,
ras + offset,
decs,
{"Ch0": off, "Ch1": off},
filename=os.path.join(caldir, "OFF_{}.fits".format(n_off)),
src_ra=src_ra,
src_dec=src_dec,
srcname=srcname,
counts_to_K=(COUNTS_TO_K, COUNTS_TO_K),
other_keywords={
"RightAscension Offset": offset,
"SIGNAL": "REFERENCE",
},
)
for n_cal in range(strategy[2]):
times, _, cal = simulate_scan(
dt=dt,
length=0,
speed=1,
baseline=(0, 10, 0),
shape=lambda x: 0,
noise_amplitude=0.2,
center=src_dec,
nbin=nbin,
calon=True,
)
times += last_time
last_time = times[0]
other_keywords_cal = {
"RightAscension Offset": offset,
"SIGNAL": "REFCAL",
}
other_columns = {}
if legacy_cal_format:
other_columns = {"flag_cal": 1}
other_keywords_cal["SIGNAL"] = "REFERENCE"
save_scan(
times,
ras + offset,
decs,
{"Ch0": cal, "Ch1": cal},
filename=os.path.join(caldir, "CAL_{}.fits".format(n_cal)),
src_ra=src_ra,
src_dec=src_dec,
srcname=srcname,
counts_to_K=(COUNTS_TO_K, COUNTS_TO_K),
other_keywords=other_keywords_cal,
other_columns=other_columns,
)
create_summary(
os.path.join(caldir, "summary.fits"),
{
"RightAscension": np.radians(src_ra),
"Declination": np.radians(src_dec),
"Object": srcname,
},
)
return caldir
def simulate_scan(
dt=0.04,
length=120.0,
speed=4.0,
shape=None,
noise_amplitude=1.0,
center=0.0,
baseline="flat",
nbin=1,
calon=False,
nsamples=None,
):
"""Simulate a scan.
Parameters
----------
dt : float
The integration time in seconds
length : float
Length of the scan in arcminutes
speed : float
Speed of the scan in arcminutes / second
shape : function
Function that describes the shape of the scan. If None, a
constant scan is assumed. The zero point of the scan is in the
*center* of it
noise_amplitude : float
Noise level in counts
center : float
Center coordinate in degrees
baseline : str, number or tuple
"flat", "slope" (linearly increasing/decreasing), "messy"
(random walk), a number (which gives an amplitude to the random-walk
baseline, that is 20 for "messy"), or a tuple (m, q, messy_amp) giving
the maximum and minimum absolute-value slope and intercept, and the
random-walk amplitude.
"""
if shape is None:
shape = _default_flat_shape
if nsamples is None and length == 0:
nsamples = 100
elif nsamples is None:
nsamples = np.rint(length / speed / dt)
times = np.arange(nsamples) * dt
# In degrees!
position = np.arange(-nsamples / 2, nsamples / 2) / nsamples * length / 60
scan_baseline = _create_baseline(position, baseline)
signal = _standard_source_spectrum(shape(position), nbin)
bkg = _standard_bkg_spectrum(scan_baseline, nbin)
scan_shape = signal + bkg
scan_shape += ra.normal(0, noise_amplitude, scan_shape.shape)
if calon:
scan_shape += DEFAULT_CAL_OFFSET
return times, position + center, scan_shape
def save_scan(
times,
ra,
dec,
channels,
filename="out.fits",
other_columns=None,
other_keywords=None,
scan_type=None,
src_ra=None,
src_dec=None,
srcname="Dummy",
counts_to_K=COUNTS_TO_K,
):
"""Save a simulated scan in fitszilla format.
Parameters
----------
times : iterable
times corresponding to each bin center, in seconds
ra : iterable
RA corresponding to each bin center
dec : iterable
Dec corresponding to each bin center
channels : {'Ch0': array([...]), 'Ch1': array([...]), ...}
Dictionary containing the count array. Keys represent the name of the
channel
filename : str
Output file name
srcname : str
Name of the source
counts_to_K : float, array or dict
Conversion factor between counts and K. If array, it has to be the same
length as channels.keys()
"""
if src_ra is None:
src_ra = np.mean(ra)
if src_dec is None:
src_dec = np.mean(dec)
if other_columns is None:
other_columns = {}
if other_keywords is None:
other_keywords = {}
# If it's a single value, make it into a list
if not isinstance(counts_to_K, Iterable):
counts_to_K = counts_to_K * np.ones(len(list(channels.keys())))
# If it's a list, make it into a dict
if not hasattr(counts_to_K, "keys"):
counts_to_K = dict(
[(ch, counts_to_K[i]) for i, ch in enumerate(channels.keys())]
)
curdir = os.path.abspath(os.path.dirname(__file__))
template = os.path.abspath(
os.path.join(curdir, "data", "scan_template.fits")
)
lchdulist = fits.open(template)
datahdu = lchdulist["DATA TABLE"]
temphdu = lchdulist["ANTENNA TEMP TABLE"]
secthdu = lchdulist["SECTION TABLE"]
rfinput = lchdulist["RF INPUTS"]
lchdulist[0].header["SOURCE"] = "Dummy"
lchdulist[0].header["ANTENNA"] = "SRT"
lchdulist[0].header["HIERARCH RightAscension"] = np.radians(src_ra)
lchdulist[0].header["HIERARCH Declination"] = np.radians(src_dec)
if scan_type is not None:
lchdulist[0].header["HIERARCH SubScanType"] = scan_type
for key in other_keywords.keys():
lchdulist[0].header[key] = other_keywords[key]
data_table_data = Table(datahdu.data)
data_table_data.remove_column("Ch0")
data_table_data.remove_column("Ch1")
obstimes = Time(
(times / 86400 + _REFERENCE_MJD) * u.day, format="mjd", scale="utc"
)
coords = SkyCoord(
ra, dec, unit=u.degree, location=locations["srt"], obstime=obstimes
)
altaz = coords.altaz
el = altaz.alt.rad
az = altaz.az.rad
newtable = Table(
names=["time", "raj2000", "decj2000", "el", "az"],
data=[obstimes.value, np.radians(ra), np.radians(dec), el, az],
)
for ch in channels.keys():
newtable[ch] = channels[ch]
for col in other_columns.keys():
newtable[col] = other_columns[col]
data_table_data = vstack([data_table_data, newtable])
hdu = fits.BinTableHDU(data_table_data, header=datahdu.header)
nrows = len(data_table_data)
datahdu.data = hdu.data
temptable = Table()
for ch in channels.keys():
dummy_data = newtable[ch]
if len(dummy_data.shape) == 2:
dummy_data = np.sum(dummy_data, axis=1)
temptable[ch] = dummy_data * counts_to_K[ch]
thdu = fits.BinTableHDU.from_columns(temphdu.data.columns, nrows=nrows)
for colname in temphdu.data.columns.names:
thdu.data[colname][:] = temptable[colname]
temphdu.data = thdu.data
shape = channels["Ch0"].shape
if len(shape) == 2:
secthdu.data["bins"] = shape[1]
# Sic
rfinput.data["calibratonMark"] = DEFAULT_CAL_TEMP
lchdulist[0].header["SOURCE"] = srcname
lchdulist.writeto(filename, overwrite=True)
lchdulist.close()
def _single_value_as_tuple(value, nvals=2):
"""If a value is single, return as a tuple.
Examples
--------
>>> np.all(_single_value_as_tuple(1) == (1, 1))
True
>>> np.all(_single_value_as_tuple((1, 1, 1)) == (1, 1, 1))
True
>>> np.all(_single_value_as_tuple(1, nvals=3) == (1, 1, 1))
True
"""
if isinstance(value, Iterable):
return value
return tuple([value] * nvals)
def _create_baseline(x, baseline_kind="flat"):
"""
Parameters
----------
x : float, array-like
The x values for the baseline
baseline : str, number of tuple
"flat", "slope" (linearly increasing/decreasing), "messy"
(random walk), a number (which gives an amplitude to the random-walk
baseline, that is 20 for "messy"), or a tuple (m, q, messy_amp) giving
the maximum and minimum absolute-value slope and intercept, and the
random-walk amplitude.
"""
if baseline_kind == "flat":
mmin = mmax = 0
qmin = qmax = 0
stochastic_amp = 0
elif baseline_kind == "slope":
mmin, mmax = -5, 5
qmin, qmax = 0, 150
stochastic_amp = 0
elif baseline_kind == "messy":
mmin, mmax = 0, 0
qmin, qmax = 0, 0
stochastic_amp = 20
elif _is_number(baseline_kind):
mmin, mmax = 0, 0
qmin, qmax = 0, 0
stochastic_amp = float(baseline_kind)
elif isinstance(baseline_kind, Iterable) and not isinstance(
baseline_kind, str
):
m = _single_value_as_tuple(baseline_kind[0], nvals=2)
q = _single_value_as_tuple(baseline_kind[1], nvals=2)
mmin, mmax = m[0], m[1]
qmin, qmax = q[0], q[1]
stochastic_amp = float(baseline_kind[2])
else:
raise ValueError(
"baseline has to be 'flat', 'slope', 'messy' or a " "number"
)
n = len(x)
m = ra.uniform(mmin, mmax)
q = ra.uniform(qmin, qmax)
signs = np.random.choice([-1, 1], n)
stochastic = np.cumsum(signs) * stochastic_amp / np.sqrt(n)
baseline = m * x + q
return baseline + stochastic
def simulate_sun(**kwargs):
from astropy.coordinates import get_sun
coords = get_sun(Time(_REFERENCE_MJD * u.day, format="mjd", scale="utc"))
for input in ["mean_ra", "mean_dec", "count_map", "start_time"]:
_ = kwargs.pop(input, None)
mean_ra = coords.ra.to(u.deg).value
mean_dec = coords.dec.to(u.deg).value
count_map = kwargs.pop("count_map", _sun_map)
return simulate_map(
mean_ra=mean_ra, mean_dec=mean_dec, count_map=count_map, **kwargs
)
def _sun_map(x, y, sigma=1011 / 3600):
"""A Gaussian beam"""
import numpy as np
# It will raise ValueError when they're not compatible
map = np.zeros_like(x) * np.zeros_like(y)
map[x ** 2 + y ** 2 < sigma ** 2] = 100.0
return map
def simulate_map(
dt=0.04,
length_ra=120.0,
length_dec=120.0,
speed=4.0,
spacing=0.5,
count_map=None,
noise_amplitude=1.0,
width_ra=None,
width_dec=None,
outdir="sim/",
baseline="flat",
mean_ra=180,
mean_dec=70,
srcname="Dummy",
channel_ratio=1,
nbin=1,
debug=False,
start_time=0,
):
"""Simulate a map.
Parameters
----------
dt : float
The integration time in seconds
length : float
Length of the scan in arcminutes
speed : float
Speed of the scan in arcminutes / second
shape : function
Function that describes the shape of the scan. If None, a
constant scan is assumed. The zero point of the scan is in the
*center* of it
noise_amplitude : float
Noise level in counts
spacing : float
Spacing between scans, in arcminutes
baseline : str
"flat", "slope" (linearly increasing/decreasing), "messy"
(random walk) or a number (which gives an amplitude to the random-walk
baseline, that is 20 for "messy").
count_map : function
Flux distribution function, centered on zero
outdir : str or iterable (str, str)
If a single string, put all files in that directory; if two strings,
put RA and DEC scans in the two directories.
channel_ratio : float
Ratio between the counts in the two channels
"""
if isinstance(outdir, str):
outdir = (outdir, outdir)
outdir_ra = outdir[0]
outdir_dec = outdir[1]
mkdir_p(outdir_ra)
mkdir_p(outdir_dec)
if count_map is None:
count_map = _default_map_shape
nbins_ra = int(np.rint(length_ra / speed / dt))
nbins_dec = int(np.rint(length_dec / speed / dt))
times_ra = np.arange(nbins_ra) * dt + start_time
times_dec = np.arange(nbins_dec) * dt + start_time
ra_array = (
np.arange(-nbins_ra / 2, nbins_ra / 2) / nbins_ra * length_ra / 60
)
dec_array = (
np.arange(-nbins_dec / 2, nbins_dec / 2) / nbins_dec * length_dec / 60
)
# In degrees!
if width_dec is None:
width_dec = length_dec
if width_ra is None:
width_ra = length_ra
# Dec scans
if HAS_MPL and debug:
fig = plt.figure()
delta_decs = (
np.arange(-width_dec / 2, width_dec / 2 + spacing, spacing) / 60
)
log.info("Simulating dec scans...")
for i_d, delta_dec in enumerate(tqdm(delta_decs)):
start_dec = mean_dec + delta_dec
counts_clean = _standard_source_spectrum(
count_map(ra_array, delta_dec), nbin=nbin
)
baseline0 = _standard_bkg_spectrum(
_create_baseline(ra_array, baseline), nbin=nbin
)
baseline1 = _standard_bkg_spectrum(
_create_baseline(ra_array, baseline), nbin=nbin
)
counts0 = (
counts_clean
+ ra.normal(0, noise_amplitude, counts_clean.shape)
+ baseline0
)
counts1 = (
counts_clean
+ ra.normal(0, noise_amplitude, counts_clean.shape)
+ baseline1
)
actual_ra = mean_ra + ra_array / np.cos(np.radians(start_dec))
if i_d % 2 != 0:
actual_ra = actual_ra[::-1]
fname = os.path.join(outdir_ra, "Ra{}.fits".format(i_d))
other_keywords = {"HIERARCH Declination Offset": delta_dec}
save_scan(
times_ra,
actual_ra,
np.zeros_like(actual_ra) + start_dec,
{"Ch0": counts0, "Ch1": counts1 * channel_ratio},
filename=fname,
other_keywords=other_keywords,
src_ra=mean_ra,
src_dec=mean_dec,
srcname=srcname,
counts_to_K=(COUNTS_TO_K, COUNTS_TO_K / channel_ratio),
)
if HAS_MPL and debug:
plt.plot(ra_array, counts0)
plt.plot(ra_array, counts1)
if HAS_MPL and debug:
fig.savefig(os.path.join(outdir_ra, "allscans_ra.png"))
plt.close(fig)
fig = plt.figure()
delta_ras = np.arange(-width_ra / 2, width_ra / 2 + spacing, spacing) / 60
log.info("Simulating RA scans...")
# RA scans
for i_r, delta_ra in enumerate(tqdm(delta_ras)):
start_ra = delta_ra / np.cos(np.radians(mean_dec)) + mean_ra
counts_clean = _standard_source_spectrum(
count_map(delta_ra, dec_array), nbin=nbin
)
baseline0 = _standard_bkg_spectrum(
_create_baseline(dec_array, baseline), nbin=nbin
)
baseline1 = _standard_bkg_spectrum(
_create_baseline(dec_array, baseline), nbin=nbin
)
counts0 = (
counts_clean
+ ra.normal(0, noise_amplitude, counts_clean.shape)
+ baseline0
)
counts1 = (
counts_clean
+ ra.normal(0, noise_amplitude, counts_clean.shape)
+ baseline1
)
if i_r % 2 != 0:
dec_array = dec_array[::-1]
other_keywords = {"RightAscension Offset": delta_ra}
save_scan(
times_dec,
np.zeros_like(dec_array) + start_ra,
dec_array + mean_dec,
{"Ch0": counts0, "Ch1": counts1 * channel_ratio},
other_keywords=other_keywords,
filename=os.path.join(outdir_dec, "Dec{}.fits".format(i_r)),
src_ra=mean_ra,
src_dec=mean_dec,
srcname=srcname,
)
if HAS_MPL and debug:
plt.plot(dec_array, counts0)
plt.plot(dec_array, counts1)
if HAS_MPL and debug:
fig.savefig(os.path.join(outdir_dec, "allscans_dec.png"))
plt.close(fig)
log.info("Creating summary...")
create_summary(
os.path.join(outdir_ra, "summary.fits"),
{
"RightAscension": np.radians(mean_ra),
"Declination": np.radians(mean_dec),
"Object": srcname,
},
)
if outdir_ra == outdir_dec:
return outdir_ra, outdir_ra
create_summary(
os.path.join(outdir_dec, "summary.fits"),
{
"RightAscension": np.radians(mean_ra),
"Declination": np.radians(mean_dec),
"Object": srcname,
},
)
return outdir_ra, outdir_dec
def main_simulate(args=None):
"""Preprocess the data."""
import argparse
description = "Simulate a single scan or a map with a point source."
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-s", "--source-flux", type=float, default=1, help="Source flux in Jy"
)
parser.add_argument(
"-n",
"--noise-amplitude",
type=float,
default=1,
help="White noise amplitude",
)
parser.add_argument(
"-b",
"--baseline",
type=str,
default="flat",
help='Baseline kind: "flat", "slope" (linearly '
'increasing/decreasing), "messy" '
"(random walk) or a number (which gives an "
"amplitude to the random-walk baseline, that "
'would be 20 for "messy")',
)
parser.add_argument(
"-g",
"--geometry",
nargs=4,
type=float,
default=[120, 120, 120, 120],
help="Geometry specification: length_ra, length_dec, "
"width_ra, width_dec, in arcmins. A square map of"
" 2 degrees would be specified as 120 120 120 "
"120. A cross-like map, 2x2 degrees wide but only"
" along 1-degree stripes, is specified as 120 120"
" 60 60",
)
parser.add_argument(
"--beam-width",
type=float,
default=2.5,
help="Gaussian beam width in arcminutes",
)
parser.add_argument(
"--spacing",
type=float,
default=0.5,
help="Spacing between scans in arcminutes " "(default 0.5)",
)
parser.add_argument(
"-o",
"--outdir-root",
type=str,
default="sim",
help="Output directory root. Here, source and "
"calibrator scans/maps will be saved in "
"outdir/gauss_ra, outdir/gauss_dec, "
"outdir/calibrator1, outdir/calibrator2, where "
"outdir is the outdir root",
)
parser.add_argument(
"--scan-speed",
type=float,
default=4.0,
help="Scan speed in arcminutes/second",
)
parser.add_argument(
"--integration-time",
type=float,
default=0.04,
help="Integration time in seconds",
)
parser.add_argument(
"--spectral-bins",
type=int,
default=1,
help="Simulate a spectrum with this number of bins",
)
parser.add_argument(
"--no-cal",
action="store_true",
default=False,
help="Don't simulate calibrators",
)
parser.add_argument(
"--sun",
action="store_true",
default=False,
help="Simulate a map of the Sun",
)
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Plot stuff and be verbose",
)
args = parser.parse_args(args)
def local_gauss_src_func(x, y):
return (
args.source_flux
* DEFAULT_PEAK_COUNTS
* _2d_gauss(x, y, sigma=args.beam_width / 60)
)
def calibrator_scan_func(x):
return DEFAULT_PEAK_COUNTS * _2d_gauss(
x, 0, sigma=args.beam_width / 60
)
if not args.no_cal:
cal1 = os.path.join(args.outdir_root, "calibrator1")
mkdir_p(cal1)
sim_crossscans(
5,
cal1,
scan_func=calibrator_scan_func,
channel_ratio=0.9,
baseline=args.baseline,
nbin=args.spectral_bins,
)
cal2 = os.path.join(args.outdir_root, "calibrator2")
mkdir_p(cal2)
sim_crossscans(
5,
cal2,
scan_func=calibrator_scan_func,
srcname="DummyCal2",
channel_ratio=0.9,
baseline=args.baseline,
nbin=args.spectral_bins,
)
func = simulate_map
count_map = local_gauss_src_func
if args.sun:
func = simulate_sun
count_map = _sun_map
func(
dt=args.integration_time,
length_ra=args.geometry[0],
length_dec=args.geometry[1],
speed=args.scan_speed,
spacing=args.spacing,
noise_amplitude=args.noise_amplitude,
width_ra=args.geometry[2],
width_dec=args.geometry[3],
outdir=(
os.path.join(args.outdir_root, "gauss_ra"),
os.path.join(args.outdir_root, "gauss_dec"),
),
baseline=args.baseline,
mean_ra=180,
mean_dec=70,
srcname="Dummy",
channel_ratio=0.9,
count_map=count_map,
nbin=args.spectral_bins,
)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def multi_head(heads, head_weights=None):
"""Creates a `_Head` for multi-objective learning.
This class merges the output of multiple `_Head` objects.
Specifically:
* For training, sums losses of each head, calls `train_op_fn` with this
final loss.
* For eval, merges metrics by adding `head.name` suffix to the keys in eval
metrics, such as `precision/head1`, `precision/head2`.
* For prediction, merges predictions and updates keys in prediction dict to a
2-tuple, `(head.name, prediction_key)`. Merges `export_outputs` such that
by default the first head is served.
Usage:
```python
# In `input_fn` specify labels as a dict keyed by head name:
def input_fn():
features = ...
labels1 = ...
labels2 = ...
return features, {'head1': labels1, 'head2': labels2}
# In `model_fn`, specify logits as a dict keyed by head name:
def model_fn(features, labels, mode):
# Create simple heads and specify head name.
head1 = multi_class_head(n_classes=3, name='head1')
head2 = binary_classification_head(name='head2')
# Create multi-head from two simple heads.
head = multi_head([head1, head2])
# Create logits for each head, and combine them into a dict.
logits1, logits2 = logit_fn()
logits = {'head1': logits1, 'head2': logits2}
# Return the merged EstimatorSpec
return head.create_estimator_spec(..., logits=logits, ...)
# Create an estimator with this model_fn.
estimator = tf.estimator.Estimator(model_fn=model_fn)
estimator.train(input_fn=input_fn, steps=100)
```
Also supports `logits` as a `Tensor` of shape
`[D0, D1, ... DN, logits_dimension]`. It will split the `Tensor` along the
last dimension and distribute it appropriately among the heads. E.g.:
```python
def model_fn(features, labels, mode):
# Create simple heads and specify head name.
head1 = multi_class_head(n_classes=3, name='head1')
head2 = binary_classification_head(name='head2')
# Create multi-head from two simple heads.
head = multi_head([head1, head2])
# Create logits for the multihead.
logits = logit_fn(logits_dimension=head.logits_dimension)
# Return the merged EstimatorSpec
return head.create_estimator_spec(..., logits=logits, ...)
```
Args:
heads: List or tuple of `_Head` instances. All heads must have `name`
specified. The first head in the list is the default used at serving time.
head_weights: Optional list of weights, same length as `heads`. Used when
merging losses to calculate the weighted sum of losses from each head. If
`None`, all losses are weighted equally.
Returns:
A instance of `_Head` that merges multiple heads.
Raises:
ValueError: If `heads` is empty.
ValueError: If any of the `heads` does not have `name` specified.
ValueError: If `heads` and `head_weights` have different size.
"""
if head_weights:
if len(head_weights) != len(heads):
raise ValueError(
'heads and head_weights must have the same size. '
'Given len(heads): {}. Given len(head_weights): {}.'.format(
len(heads), len(head_weights)))
if not heads:
raise ValueError('Must specify heads. Given: {}'.format(heads))
for head in heads:
if not head.name:
raise ValueError(
'All given heads must have name specified. '
'Given: {}'.format(head))
return _MultiHead(
heads=tuple(heads),
head_weights=tuple(head_weights) if head_weights else tuple())
def _no_op_train_fn(loss):
del loss
return control_flow_ops.no_op()
def _merge_losses(losses, head_weights=None):
"""Merges the given losses into one tensor."""
losses = tuple(losses)
with ops.name_scope(
'merge_losses', values=losses + (head_weights or tuple())):
if head_weights:
weighted_losses = []
for loss, weight in zip(losses, head_weights):
weighted_losses.append(math_ops.multiply(loss, weight))
else:
weighted_losses = losses
return math_ops.add_n(weighted_losses)
def _default_export_output(export_outputs, head_name):
"""Extracts the default export output from the given export_outputs dict."""
if len(export_outputs) == 1:
return next(six.itervalues(export_outputs))
for k, v in six.iteritems(export_outputs):
if k == _DEFAULT_SERVING_KEY:
return v
raise ValueError(
'{} did not specify default export_outputs. '
'Given: {} '
'Suggested fix: Use one of the heads in tf.contrib.estimator, or include '
'key {} in export_outputs.'.format(
head_name, export_outputs, _DEFAULT_SERVING_KEY))
class _MultiHead(head_lib._Head): # pylint:disable=protected-access
"""`_Head` for multi objective learning."""
def __init__(self, heads, head_weights):
self._logits_dimension = 0
for head in heads:
self._logits_dimension += head.logits_dimension
self._heads = heads
self._head_weights = head_weights
@property
def name(self):
return '_'.join([h.name for h in self._heads])
@property
def logits_dimension(self):
return self._logits_dimension
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
if isinstance(logits, dict):
logits_dict = logits
else:
logits_dict = self._split_logits(logits)
weighted_sum_losses = []
example_weight_sums = []
labels_by_head = {}
for head in self._heads:
(weighted_sum_loss,
example_weight_sum, processed_labels) = head.create_loss(
features, mode, logits_dict[head.name], labels[head.name])
weighted_sum_losses.append(weighted_sum_loss)
example_weight_sums.append(example_weight_sum)
labels_by_head[head.name] = processed_labels
weighted_sum_losses = tuple(weighted_sum_losses)
with ops.name_scope('merge_losses',
values=weighted_sum_losses + (self._head_weights or
tuple())):
if self._head_weights:
head_weighted_losses = []
head_weighted_example_weight_sums = []
for loss, example_weight_sum, weight in zip(weighted_sum_losses,
example_weight_sums,
self._head_weights):
head_weighted_losses.append(math_ops.multiply(loss, weight))
head_weighted_example_weight_sums.append(math_ops.multiply(
example_weight_sum, weight))
merged_weighted_sum_loss = math_ops.add_n(head_weighted_losses)
merged_example_weight_sum = math_ops.add_n(
head_weighted_example_weight_sums)
else:
merged_weighted_sum_loss = math_ops.add_n(weighted_sum_losses)
merged_example_weight_sum = math_ops.add_n(example_weight_sums)
return head_lib.LossSpec(
weighted_sum_loss=merged_weighted_sum_loss,
example_weight_sum=merged_example_weight_sum,
processed_labels=labels_by_head)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `_Head`."""
if isinstance(logits, dict):
logits_dict = logits
else:
logits_dict = self._split_logits(logits)
if labels and not isinstance(labels, dict):
raise ValueError('labels must be a dict. Given: {}'.format(labels))
all_estimator_spec = []
for head in self._heads:
head_name = head.name
all_estimator_spec.append(
head.create_estimator_spec(
features=features,
mode=mode,
logits=logits_dict[head_name],
labels=labels[head_name] if labels else None,
train_op_fn=_no_op_train_fn))
if mode == model_fn.ModeKeys.TRAIN:
if train_op_fn is None:
raise ValueError('train_op_fn can not be None in TRAIN mode.')
spec = self._merge_train(all_estimator_spec, train_op_fn)
with ops.name_scope(''):
summary.scalar(metric_keys.MetricKeys.LOSS, spec.loss)
return spec
if mode == model_fn.ModeKeys.PREDICT:
return self._merge_predict(all_estimator_spec)
if mode == model_fn.ModeKeys.EVAL:
return self._merge_eval(all_estimator_spec)
raise ValueError('mode={} unrecognized'.format(mode))
def _split_logits(self, logits):
"""Splits logits along the last dimension and returns a dict."""
logits_dict = {}
with ops.name_scope(None, 'split_logits', values=[logits]):
logits = ops.convert_to_tensor(logits)
batch_shape = array_ops.shape(logits)[:-1]
zeros_like_batch_shape = array_ops.zeros_like(batch_shape)
minus_ones_like_batch_shape = -1 * array_ops.ones_like(batch_shape)
begin_idx = 0
for head in self._heads:
begin_tensor = array_ops.concat(
[zeros_like_batch_shape, [begin_idx]], axis=0)
size_tensor = array_ops.concat(
[minus_ones_like_batch_shape, [head.logits_dimension]], axis=0)
logits_dict[head.name] = array_ops.slice(
logits, begin=begin_tensor, size=size_tensor)
begin_idx += head.logits_dimension
return logits_dict
def _merge_train(self, all_estimator_spec, train_op_fn):
"""Merges list of `EstimatorSpec` for training.
Args:
all_estimator_spec: list of `EstimatorSpec` for the individual heads.
train_op_fn: Function to create train op. See `create_estimator_spec`
documentation for more details.
Returns:
`EstimatorSpec` that merges all heads for TRAIN.
"""
losses = []
metrics = {}
for spec in all_estimator_spec:
losses.append(spec.loss)
# Metric keys already contain head.name.
metrics.update(spec.eval_metric_ops or {})
loss = _merge_losses(losses, self._head_weights)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops=metrics)
def _merge_predict(self, all_estimator_spec):
"""Merges list of `EstimatorSpec` for prediction.
Args:
all_estimator_spec: list of `EstimatorSpec` for the individual heads.
Returns:
`EstimatorSpec` that merges all heads for PREDICT.
"""
predictions = {}
export_outputs = {
_DEFAULT_SERVING_KEY: _default_export_output(
all_estimator_spec[0].export_outputs,
self._heads[0].name),
}
for head, spec in zip(self._heads, all_estimator_spec):
head_name = head.name
for k, v in six.iteritems(spec.export_outputs):
if k == _DEFAULT_SERVING_KEY:
key = head_name
else:
key = '%s/%s' % (k, head_name)
export_outputs[key] = v
for k, v in six.iteritems(spec.predictions):
predictions[(head_name, k)] = v
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs=export_outputs)
def _merge_eval(self, all_estimator_spec):
"""Merges list of `EstimatorSpec` for eval.
Args:
all_estimator_spec: list of `EstimatorSpec` for the individual heads.
Returns:
`EstimatorSpec` that merges all heads for EVAL.
"""
predictions = {}
metrics = {}
losses = []
with ops.name_scope('merge_eval'):
for head, spec in zip(self._heads, all_estimator_spec):
losses.append(spec.loss)
head_name = head.name
# Loss metric is not added by default.
loss_name = head_lib._summary_key( # pylint:disable=protected-access
head_name, metric_keys.MetricKeys.LOSS)
metrics[loss_name] = metrics_lib.mean(spec.loss, name=loss_name)
# Metric keys already contain head.name.
metrics.update(spec.eval_metric_ops or {})
for k, v in six.iteritems(spec.predictions):
predictions[(head_name, k)] = v
loss = _merge_losses(losses, self._head_weights)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=loss,
eval_metric_ops=metrics)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Library with a variant of appengine_rpc using httplib2.
The httplib2 module offers some of the features in appengine_rpc, with
one important one being a simple integration point for OAuth2 integration.
"""
import cStringIO
import logging
import os
import re
import types
import urllib
import urllib2
import httplib2
from oauth2client import client
from oauth2client import file as oauth2client_file
from oauth2client import tools
from google.appengine.tools.value_mixin import ValueMixin
logger = logging.getLogger('google.appengine.tools.appengine_rpc')
class Error(Exception):
pass
class AuthPermanentFail(Error):
"""Authentication will not succeed in the current context."""
class MemoryCache(object):
"""httplib2 Cache implementation which only caches locally."""
def __init__(self):
self.cache = {}
def get(self, key):
return self.cache.get(key)
def set(self, key, value):
self.cache[key] = value
def delete(self, key):
self.cache.pop(key, None)
def RaiseHttpError(url, response_info, response_body, extra_msg=''):
"""Raise a urllib2.HTTPError based on an httplib2 response tuple."""
if response_body is not None:
stream = cStringIO.StringIO()
stream.write(response_body)
stream.seek(0)
else:
stream = None
if not extra_msg:
msg = response_info.reason
else:
msg = response_info.reason + ' ' + extra_msg
raise urllib2.HTTPError(url, response_info.status, msg, response_info, stream)
class HttpRpcServerHttpLib2(object):
"""A variant of HttpRpcServer which uses httplib2.
This follows the same interface as appengine_rpc.AbstractRpcServer,
but is a totally separate implementation.
"""
def __init__(self, host, auth_function, user_agent, source,
host_override=None, extra_headers=None, save_cookies=False,
auth_tries=None, account_type=None, debug_data=True, secure=True,
ignore_certs=False, rpc_tries=3):
"""Creates a new HttpRpcServerHttpLib2.
Args:
host: The host to send requests to.
auth_function: Saved but ignored; may be used by subclasses.
user_agent: The user-agent string to send to the server. Specify None to
omit the user-agent header.
source: Saved but ignored; may be used by subclasses.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request. Values
supplied here will override other default headers that are supplied.
save_cookies: Saved but ignored; may be used by subclasses.
auth_tries: The number of times to attempt auth_function before failing.
account_type: Saved but ignored; may be used by subclasses.
debug_data: Whether debugging output should include data contents.
secure: If the requests sent using Send should be sent over HTTPS.
ignore_certs: If the certificate mismatches should be ignored.
rpc_tries: The number of rpc retries upon http server error (i.e.
Response code >= 500 and < 600) before failing.
"""
self.host = host
self.auth_function = auth_function
self.user_agent = user_agent
self.source = source
self.host_override = host_override
self.extra_headers = extra_headers or {}
self.save_cookies = save_cookies
self.auth_tries = auth_tries
self.account_type = account_type
self.debug_data = debug_data
self.secure = secure
self.ignore_certs = ignore_certs
self.rpc_tries = rpc_tries
self.scheme = secure and 'https' or 'http'
self.certpath = None
self.cert_file_available = False
if not self.ignore_certs:
self.certpath = os.path.normpath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'lib', 'cacerts',
'cacerts.txt'))
self.cert_file_available = os.path.exists(self.certpath)
self.memory_cache = MemoryCache()
def _Authenticate(self, http, saw_error):
"""Pre or Re-auth stuff...
Args:
http: An 'Http' object from httplib2.
saw_error: If the user has already tried to contact the server.
If they have, it's OK to prompt them. If not, we should not be asking
them for auth info--it's possible it'll suceed w/o auth.
"""
raise NotImplementedError()
def Send(self, request_path, payload='',
content_type='application/octet-stream',
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
Raises:
AuthPermanentFail: If authorization failed in a permanent way.
urllib2.HTTPError: On most HTTP errors.
"""
self.http = httplib2.Http(
cache=self.memory_cache, ca_certs=self.certpath,
disable_ssl_certificate_validation=(not self.cert_file_available))
self.http.follow_redirects = False
self.http.timeout = timeout
url = '%s://%s%s' % (self.scheme, self.host, request_path)
if kwargs:
url += '?' + urllib.urlencode(sorted(kwargs.items()))
headers = {}
if self.extra_headers:
headers.update(self.extra_headers)
headers['X-appcfg-api-version'] = '1'
if payload is not None:
method = 'POST'
headers['content-length'] = str(len(payload))
headers['Content-Type'] = content_type
else:
method = 'GET'
if self.host_override:
headers['Host'] = self.host_override
tries = 0
auth_tries = [0]
def NeedAuth():
"""Marker that we need auth; it'll actually be tried next time around."""
auth_tries[0] += 1
if auth_tries[0] > self.auth_tries:
RaiseHttpError(url, response_info, response, 'Too many auth attempts.')
while tries < self.rpc_tries:
tries += 1
self._Authenticate(self.http, auth_tries[0] > 0)
logger.debug('Sending request to %s headers=%s body=%s',
url, headers,
self.debug_data and payload or payload and 'ELIDED' or '')
try:
response_info, response = self.http.request(
url, method=method, body=payload, headers=headers)
except client.AccessTokenRefreshError, e:
logger.info('Got access token error', exc_info=1)
response_info = httplib2.Response({'status': 401})
response_info.reason = str(e)
response = ''
status = response_info.status
if status == 200:
return response
logger.debug('Got http error %s, this is try #%s',
response_info.status, tries)
if status == 401:
NeedAuth()
continue
elif status >= 500 and status < 600:
continue
elif status == 302:
loc = response_info.get('location')
logger.debug('Got 302 redirect. Location: %s', loc)
if (loc.startswith('https://www.google.com/accounts/ServiceLogin') or
re.match(r'https://www.google.com/a/[a-z0-9.-]+/ServiceLogin',
loc)):
NeedAuth()
continue
elif loc.startswith('http://%s/_ah/login' % (self.host,)):
RaiseHttpError(url, response_info, response,
'dev_appserver login not supported')
else:
RaiseHttpError(url, response_info, response,
'Unexpected redirect to %s' % loc)
else:
logger.debug('Unexpected results: %s', response_info)
RaiseHttpError(url, response_info, response,
'Unexpected HTTP status %s' % status)
logging.info('Too many retries for url %s', url)
RaiseHttpError(url, response_info, response)
class NoStorage(client.Storage):
"""A no-op implementation of storage."""
def locked_get(self):
return None
def locked_put(self, credentials):
pass
class HttpRpcServerOAuth2(HttpRpcServerHttpLib2):
"""A variant of HttpRpcServer which uses oauth2.
This variant is specifically meant for interactive command line usage,
as it will attempt to open a browser and ask the user to enter
information from the resulting web page.
"""
class OAuth2Parameters(ValueMixin):
"""Class encapsulating parameters related to OAuth2 authentication."""
def __init__(self, access_token, client_id, client_secret, scope,
refresh_token, credential_file, token_uri=None):
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
self.scope = scope
self.refresh_token = refresh_token
self.credential_file = credential_file
self.token_uri = token_uri
def __init__(self, host, oauth2_parameters, user_agent, source,
host_override=None, extra_headers=None, save_cookies=False,
auth_tries=None, account_type=None, debug_data=True, secure=True,
ignore_certs=False, rpc_tries=3):
"""Creates a new HttpRpcServerOAuth2.
Args:
host: The host to send requests to.
oauth2_parameters: An object of type OAuth2Parameters (defined above)
that specifies all parameters related to OAuth2 authentication. (This
replaces the auth_function parameter in the parent class.)
user_agent: The user-agent string to send to the server. Specify None to
omit the user-agent header.
source: Saved but ignored.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request. Values
supplied here will override other default headers that are supplied.
save_cookies: If the refresh token should be saved.
auth_tries: The number of times to attempt auth_function before failing.
account_type: Ignored.
debug_data: Whether debugging output should include data contents.
secure: If the requests sent using Send should be sent over HTTPS.
ignore_certs: If the certificate mismatches should be ignored.
rpc_tries: The number of rpc retries upon http server error (i.e.
Response code >= 500 and < 600) before failing.
"""
super(HttpRpcServerOAuth2, self).__init__(
host, None, user_agent, source, host_override=host_override,
extra_headers=extra_headers, auth_tries=auth_tries,
debug_data=debug_data, secure=secure, ignore_certs=ignore_certs,
rpc_tries=rpc_tries)
if not isinstance(oauth2_parameters, self.OAuth2Parameters):
raise TypeError('oauth2_parameters must be an OAuth2Parameters.')
self.oauth2_parameters = oauth2_parameters
if save_cookies:
oauth2_credential_file = (oauth2_parameters.credential_file
or '~/.appcfg_oauth2_tokens')
self.storage = oauth2client_file.Storage(
os.path.expanduser(oauth2_credential_file))
else:
self.storage = NoStorage()
if any((oauth2_parameters.access_token, oauth2_parameters.refresh_token,
oauth2_parameters.token_uri)):
token_uri = (oauth2_parameters.token_uri or
('https://%s/o/oauth2/token' %
os.getenv('APPENGINE_AUTH_SERVER', 'accounts.google.com')))
self.credentials = client.OAuth2Credentials(
oauth2_parameters.access_token,
oauth2_parameters.client_id,
oauth2_parameters.client_secret,
oauth2_parameters.refresh_token,
None,
token_uri,
self.user_agent)
else:
self.credentials = self.storage.get()
def _Authenticate(self, http, needs_auth):
"""Pre or Re-auth stuff...
This will attempt to avoid making any OAuth related HTTP connections or
user interactions unless it's needed.
Args:
http: An 'Http' object from httplib2.
needs_auth: If the user has already tried to contact the server.
If they have, it's OK to prompt them. If not, we should not be asking
them for auth info--it's possible it'll suceed w/o auth, but if we have
some credentials we'll use them anyway.
Raises:
AuthPermanentFail: The user has requested non-interactive auth but
the token is invalid.
"""
if needs_auth and (not self.credentials or self.credentials.invalid):
if self.oauth2_parameters.access_token:
logger.debug('_Authenticate skipping auth because user explicitly '
'supplied an access token.')
raise AuthPermanentFail('Access token is invalid.')
if self.oauth2_parameters.refresh_token:
logger.debug('_Authenticate skipping auth because user explicitly '
'supplied a refresh token.')
raise AuthPermanentFail('Refresh token is invalid.')
if self.oauth2_parameters.token_uri:
logger.debug('_Authenticate skipping auth because user explicitly '
'supplied a Token URI, for example for service account '
'authentication with Compute Engine')
raise AuthPermanentFail('Token URI did not yield a valid token: ' +
self.oauth_parameters.token_uri)
logger.debug('_Authenticate requesting auth')
flow = client.OAuth2WebServerFlow(
client_id=self.oauth2_parameters.client_id,
client_secret=self.oauth2_parameters.client_secret,
scope=_ScopesToString(self.oauth2_parameters.scope),
user_agent=self.user_agent)
self.credentials = tools.run(flow, self.storage)
if self.credentials and not self.credentials.invalid:
if not self.credentials.access_token_expired or needs_auth:
logger.debug('_Authenticate configuring auth; needs_auth=%s',
needs_auth)
self.credentials.authorize(http)
return
logger.debug('_Authenticate skipped auth; needs_auth=%s', needs_auth)
def _ScopesToString(scopes):
"""Converts scope value to a string."""
if isinstance(scopes, types.StringTypes):
return scopes
else:
return ' '.join(scopes)
|
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Module performs Unit Tests for the ARMA class.
It can not be considered part of the active code but of the regression test system
"""
import xml.etree.ElementTree as ET
import sys, os
from scipy import stats
import pickle as pk
import numpy as np
import pandas as pd
# find location of crow, message handler
frameworkDir = os.path.abspath(os.path.join(*([os.path.dirname(__file__)]+[os.pardir]*4+['framework'])))
sys.path.append(frameworkDir)
from utils.utils import find_crow
find_crow(frameworkDir)
from utils import randomUtils
import MessageHandler
# message handler
mh = MessageHandler.MessageHandler()
mh.initialize({'verbosity':'debug', 'callerLength':10, 'tagLength':10})
# input specs come mostly from the Models.ROM
from Models import ROM
# find location of ARMA
from SupervisedLearning import ARMA
print('Module undergoing testing:')
print(ARMA)
print('')
def createElement(tag,attrib=None,text=None):
"""
Method to create a dummy xml element readable by the distribution classes
@ In, tag, string, the node tag
@ In, attrib, dict, optional, the attribute of the xml node
@ In, text, str, optional, the dict containig what should be in the xml text
"""
if attrib is None:
attrib = {}
if text is None:
text = ''
element = ET.Element(tag,attrib)
element.text = text
return element
results = {"pass":0,"fail":0}
def checkFloat(comment,value,expected,tol=1e-10,update=True):
"""
This method is aimed to compare two floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
if np.isnan(value) and np.isnan(expected):
res = True
elif np.isnan(value) or np.isnan(expected):
res = False
else:
res = abs(value - expected) <= tol
if update:
if not res:
print("checking float",comment,'|',value,"!=",expected)
results["fail"] += 1
else:
results["pass"] += 1
return res
def checkTrue(comment,res,update=True):
"""
This method is a pass-through for consistency and updating
@ In, comment, string, a comment printed out if it fails
@ In, res, bool, the tested value
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if test
"""
if update:
if res:
results["pass"] += 1
else:
print("checking bool",comment,'|',res,'is not True!')
results["fail"] += 1
return res
def checkSame(comment,value,expected,update=True):
"""
This method is aimed to compare two identical things
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
res = value == expected
if update:
if res:
results["pass"] += 1
else:
print("checking string",comment,'|',value,"!=",expected)
results["fail"] += 1
return res
def checkArray(comment,first,second,dtype,tol=1e-10,update=True):
"""
This method is aimed to compare two arrays
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
res = True
if len(first) != len(second):
res = False
print("checking answer",comment,'|','lengths do not match:',len(first),len(second))
else:
for i in range(len(first)):
if dtype == float:
pres = checkFloat('',first[i],second[i],tol,update=False)
elif dtype in (str,unicode):
pres = checkSame('',first[i],second[i],update=False)
if not pres:
print('checking array',comment,'|','entry "{}" does not match: {} != {}'.format(i,first[i],second[i]))
res = False
if update:
if res:
results["pass"] += 1
else:
results["fail"] += 1
return res
def checkRlz(comment,first,second,tol=1e-10,update=True,skip=None):
"""
This method is aimed to compare two realization
@ In, comment, string, a comment printed out if it fails
@ In, first, dict, the first dict, the "calculated" value -> should be as obtained from the data object
@ In, second, dict, the second dict, the "expected" value -> should be as a realization submitted
@ In, tol, float, optional, the tolerance
@ In, update, bool, optional, if False then don't update results counter
@ In, skip, list, optional, keywords not to check
@ Out, res, bool, True if same
"""
if skip is None:
skip = []
res = True
if abs(len(first) - len(second)) > len(skip):
res = False
print("checking answer",comment,'|','lengths do not match:',len(first),len(second))
else:
for key,val in first.items():
if key in skip:
continue
if isinstance(val,(float,int)):
pres = checkFloat('',val,second[key][0],tol,update=False)
elif isinstance(val,(str,unicode)):
pres = checkSame('',val,second[key][0],update=False)
elif isinstance(val,np.ndarray):
if isinstance(val[0],(float,int)):
pres = (val - second[key]).sum()<1e-20 #necessary due to roundoff
else:
pres = val == second[key]
elif isinstance(val,xr.DataArray):
if isinstance(val.item(0),(float,int)):
pres = (val - second[key]).sum()<1e-20 #necessary due to roundoff
else:
pres = val.equals(second[key])
else:
raise TypeError(type(val))
if not pres:
print('checking dict',comment,'|','entry "{}" does not match: {} != {}'.format(key,first[key],second[key]))
res = False
if update:
if res:
results["pass"] += 1
else:
results["fail"] += 1
return res
def checkNone(comment,entry,update=True):
"""
Checks if entry is None.
@ In, comment, string, a comment printed out if it fails
@ In, entry, object, to test if against None
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if None
"""
res = entry is None
if update:
if res:
results["pass"] += 1
else:
print("checking answer",comment,'|','"{}" is not None!'.format(entry))
results["fail"] += 1
def checkFails(comment,errstr,function,update=True,args=None,kwargs=None):
"""
Checks if expected error occurs
@ In, comment, string, a comment printed out if it fails
@ In, errstr, str, expected fail message
@ In, function, method, method to run to test for failure
@ In, update, bool, optional, if False then don't update results counter
@ In, args, list, arguments to pass to function
@ In, kwargs, dict, keyword arguments to pass to function
@ Out, res, bool, True if failed as expected
"""
print('Error testing ...')
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
function(*args,**kwargs)
res = False
msg = 'Function call did not error!'
except Exception as e:
res = checkSame('',e.args[0],errstr,update=False)
if not res:
msg = 'Unexpected error message. \n Received: "{}"\n Expected: "{}"'.format(e.args[0],errstr)
if update:
if res:
results["pass"] += 1
print(' ... end Error testing (PASSED)')
else:
print("checking error",comment,'|',msg)
results["fail"] += 1
print(' ... end Error testing (FAILED)')
print('')
return res
######################################
# CONSTRUCTION #
######################################
def createARMAXml(targets, pivot, p, q, fourier=None):
if fourier is None:
fourier = []
xml = createElement('ROM',attrib={'name':'test', 'subType':'ARMA'})
xml.append(createElement('Target',text=','.join(targets+[pivot])))
xml.append(createElement('Features',text='scaling'))
xml.append(createElement('pivotParameter',text=pivot))
xml.append(createElement('P',text=str(p)))
xml.append(createElement('Q',text=str(q)))
if len(fourier):
xml.append(createElement('Fourier',text=','.join(str(f) for f in fourier)))
return xml
def createFromXML(xml):
inputSpec = ROM.getInputSpecification(xml)
rom = ROM()
rom._readMoreXML(xml)
arma = rom.supervisedContainer[0]
return rom, arma
def createARMA(targets, pivot, p, q, fourier=None):
xml = createARMAXml(targets, pivot, p, q, fourier)
rom, arma = createFromXML(xml)
return rom, arma
rom, arma = createARMA(['a','b'], 't', 6, 3, [86400,43200])
# TODO confirmation testing for correct construction
#############################################
# CDF, STATS OPERATIONS #
#############################################
def makeCDF(data, bins=70):
if bins is None:
bins = int(np.sqrt(len(data)+0.5))
# actually makes pdf and cdf, returns both
counts, edges = np.histogram(data, bins=bins, density=False)
counts = np.array(counts) / float(len(data))
return (edges, counts*bins), (edges, np.insert(np.cumsum(counts),0,0))
def plotCDF(edges, bins, ax, label, color, alpha=1.0):
for e,edge in enumerate(edges[:-1]):
if e == 0:
label = label
else:
label = None
ax.plot([edge,edges[e+1]], [bins[e],bins[e+1]], '.-', color=color, label=label, alpha=alpha)
def plotPDF(edges, bins, ax, label, color, s='.', alpha=1.0):
# like a pdf, with error bars for bin width
mids = 0.5*(edges[1:]+edges[:-1])
lows = edges[:-1]
highs = edges[1:]
ax.errorbar( mids, bins, xerr=[mids-lows, highs-mids], fmt=s+'-', color=color, label=label, alpha=alpha)
# Enabling plotting will help visualize the signals that are tested
# in the event they fail tests. Plotting should not be enabled in
# the regression system as this point.
plotting = False
if plotting:
import matplotlib.pyplot as plt
fig, (ax,ax2) = plt.subplots(1, 2, figsize=(16,12))
N = int(1e4)
# NOTE: evaluating is slow for 1e5, and very slow at 1e6
# Beta distribution of data, skewed low
dist = stats.lognorm(0.3)
#dist = stats.beta(2.0, 3.0)
if plotting:
x = np.linspace(dist.ppf(0.001),dist.ppf(0.999),N)
pdf = dist.pdf(x)
cdf = dist.cdf(x)
ax.plot(x,cdf,'k-',label='true beta', lw=3)
ax2.plot(x,pdf,'k-',label='true beta', lw=3)
# random samples
data=pd.read_csv("signal.csv")
data=data.e_demand.values
if plotting:
opdf, ocdf = makeCDF(data)
plotCDF(ocdf[0], ocdf[1], ax, 'data', 'C0')
plotPDF(opdf[0], opdf[1], ax2, 'data', 'C0', s='x')
# characterize
params = arma._trainCDF(data)
if plotting:
ebins = params['bins']
ecdf = params['cdf']
epdf = params['pdf']
plotCDF(ebins, ecdf, ax, 'empirical', 'C1')
plotPDF(ebins, epdf, ax2, 'empirical', 'C1')
# gaussian for reference
if plotting:
gauss = stats.norm()
gx = np.linspace(-3,3,N)
gpdf = gauss.pdf(gx)
gcdf = gauss.cdf(gx)
ax.plot(gx,gcdf,'k:',label='true normal', lw=3)
ax2.plot(gx,gpdf,'k:',label='true normal', lw=3)
# gaussianize it
normed = arma._normalizeThroughCDF(data,params)
if plotting:
npdf, ncdf = makeCDF(normed)
plotCDF(ncdf[0], ncdf[1], ax, 'normed', 'C2')
plotPDF(npdf[0], npdf[1], ax2, 'normed', 'C2')
# undo gaussian
denormed = arma._denormalizeThroughCDF(normed, params)
if plotting:
dpdf, dcdf = makeCDF(denormed)
plotCDF(dcdf[0], dcdf[1], ax, 'denormed', 'C3')
plotPDF(dpdf[0], dpdf[1], ax2, 'denormed', 'C3')
# pre-normalized and post-normalized should be the same
delta = np.abs(data - denormed)
checkArray('CDF/ICDF consistency',data,denormed,float,tol=1e-12,update=True)
if plotting:
ax.legend(loc=0)
ax2.legend(loc=0)
ax.set_title('CDF')
ax2.set_title('PDF')
plt.show()
# train ARMA on data and check CDFs of results
rom, arma = createARMA(['a'], 't', 0, 0, [])
featureVals = np.zeros(1)
targetVals = np.zeros([1,len(data),2])
# "a"
targetVals[0,:,0] = data
# "t"
t = np.arange(len(data))
targetVals[0,:,1] = t
arma.__trainLocal__(featureVals,targetVals)
nsamp = 10
samples = np.zeros([nsamp,len(data)])
for n in range(nsamp):
ev = arma.__evaluateLocal__(np.array([1.0]))
samples[n,:] = ev['a']
# Enabling plotting will help visualize the signals that are tested
# in the event they fail tests. Plotting should not be enabled in
# the regression system as this point.
plotting = False
if plotting:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
figC, (axC1,axC2) = plt.subplots(1,2)
# samples
ax.plot(t, data, 'k-', label='original')
ostats = (np.average(data), np.std(data))
for n in range(nsamp):
stats = (np.average(samples[n,:]), np.std(samples[n,:]))
checkFloat('Mean, sample {}'.format(n), ostats[0], stats[0], tol=3e-1)
checkFloat('Std, sample {}'.format(n), ostats[1], stats[1], tol=6e-1)
if plotting:
ax.plot(t, samples[n,:], '-', color='C1', label='sample', alpha=0.2)
pdf,cdf = makeCDF(samples[n,:])
# PDF/CDF
plotPDF(pdf[0], pdf[1], axC2, 'sample'+str(n), 'C1', alpha=0.3)
plotCDF(cdf[0], cdf[1], axC1, 'sample'+str(n), 'C1', alpha=0.3)
if plotting:
# original
pdf,cdf = makeCDF(data)
plotPDF(pdf[0], pdf[1], axC2, 'original', 'k')
plotCDF(cdf[0], cdf[1], axC1, 'original', 'k')
ax.legend(loc=0)
plt.show()
#############################################
# RESEEDCOPIES, ENGINE #
#############################################
testVal=arma._trainARMA(data)
arma.amITrained=True
signal1=arma._generateARMASignal(testVal)
signal2=arma._generateARMASignal(testVal)
#Test the reseed = False
armaReF=arma
armaReF.reseedCopies=False
pklReF=pk.dumps(armaReF)
unpkReF=pk.loads(pklReF)
#signal 3 and 4 should be the same
signal3=armaReF._generateARMASignal(testVal)
signal4=unpkReF._generateARMASignal(testVal)
for n in range(len(data)):
checkFloat('signal 3, signal 4 ind{}'.format(n), signal3[n], signal4[n], tol=1e-5)
#Test the reseed = True
arma.reseedCopies=True
pklReT=pk.dumps(arma)
unpkReT=pk.loads(pklReT)
#signal 5 and 6 should not be the same
signal5=arma._generateARMASignal(testVal)
signal6=unpkReT._generateARMASignal(testVal)
for n in range(len(data)):
checkTrue('signal 5, signal 6 ind{}'.format(n),signal5[n]!=signal6[n])
# Test the engine with seed
eng=randomUtils.newRNG()
arma.setEngine(eng,seed=901017,count=0)
signal7=arma._generateARMASignal(testVal)
sig7=[0.39975177, -0.14531468, 0.13138866, -0.56565224, 0.06020252,
0.60752306, -0.29076173, -1.1758456, 0.41108591, -0.05735384]
for n in range(10):
checkFloat('signal 7, evaluation ind{}'.format(n), signal7[n], sig7[n], tol=1e-7)
#################
# TODO UNTESTED #
#################
# - Segmented
# - VARMA construction
# - Analytic VARMA/ARMA variances
# - Fourier analytic coefficients
# - Signal Reconstruction
print(results)
sys.exit(results["fail"])
"""
<TestInfo>
<name>framework.test_datasets</name>
<author>talbpaul</author>
<created>2017-10-20</created>
<classesTested>DataSet</classesTested>
<description>
This test is a Unit Test for the DataSet classes.
</description>
</TestInfo>
"""
|
|
#!/usr/bin/env python3
# Author: Robert Collazo <rob@helpsocial.com>
# Author: Jacob Schofield <jacob@helpsocial.com>
# Copyright (c) 2017 HelpSocial, Inc.
# See LICENSE for details
import getpass
import os
import sys
try:
import ujson as json
except ImportError:
import json
from argparse import ArgumentParser
from os.path import abspath, dirname, join as path_join
from time import time
# add the helpsocial directory to the path
# so that we can import it more cleanly
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from helpsocial import RestConnectClient, StreamingConnectClient
from helpsocial.exceptions import ApiException
from helpsocial.hooks import RequestPrinter, ResponsePrinter, StreamResponsePrinter
from helpsocial.utils import data_get
from helpsocial.routing.dispatcher import Dispatcher
from helpsocial.routing.worker import ConsolePrintWorker
def read_config(path):
"""Parse the json configuration file found at `path` into
a python dictionary.
:type path: string
:param path: Absolute path to the configuration file.
:rtype: dict
:return: The configuration file parsed into a dictionary.
"""
if not os.path.exists(path):
raise IOError('{} does not exist.', path)
with open(path, 'r') as file_:
return json.load(file_)
def authenticate(config):
"""Use the provided configuration to retrieve
the users auth token.
:type config: dict
:param config: a dictionary configuration object.
:rtype: string:
:return: the specified user's auth token.
"""
if data_get(config, 'stream.user_token') is not None:
return data_get(config, 'stream.user_token')
client = RestConnectClient(
data_get(config, 'auth.auth_scope'),
data_get(config, 'auth.api_key')
)
username = data_get(config, 'stream.username')
if username is None:
username = input('username: ')
password = data_get(config, 'stream.password')
if password is None:
password = getpass.getpass('password: ')
return data_get(client.authenticate(username, password), 'value')
def authorize_sse_stream(config_path):
"""Demo the retrieval of a SSE Stream authorization code.
:type config_path: string
:param config_path:
"""
config = read_config(config_path)
user_token = authenticate(config)
client = RestConnectClient(
data_get(config, 'auth.auth_scope'),
data_get(config, 'auth.api_key'),
user_token=user_token,
request_hooks=[RequestPrinter()],
response_hooks=[ResponsePrinter()]
)
authorization = client.get_sse_authorization()
print('\n\nRetrieved authorization token for user.')
print('Authorization: ' + authorization)
def sse_stream(config_path, authorization=None, ttl=None,
last_event_id=None, event_types=None):
"""Demo reading from a stream of server sent events. The events
are printed to the console using a `ConsolePrintWorker`. The demo can
be completed killed issuing a keyboard interrupt or any other
kill sig.
:type config_path: string
:param config_path:
:type authorization: string
:param authorization: SSE authorization token.
:type ttl: int
:param ttl: the stream time to live, after which it will disconnect automatically
:type last_event_id: int
:param last_event_id: Last event processed
:type event_types: list
:param event_types: event types to stream.
"""
config = read_config(config_path)
dispatcher = Dispatcher(ConsolePrintWorker())
user_token = data_get(config, 'auth.user_token', authenticate(config))
client = StreamingConnectClient(
data_get(config, 'auth.auth_scope'),
data_get(config, 'auth.api_key'),
dispatcher,
user_token=user_token,
host=data_get(config, 'api.host'),
ssl=data_get(config, 'api.ssl'),
request_hooks=[RequestPrinter()],
response_hooks=[StreamResponsePrinter()]
)
params = {
'last_event_id': last_event_id,
'event_types': event_types
}
try:
if authorization is None:
authorization = RestConnectClient(
data_get(config, 'auth.auth_scope'),
data_get(config, 'auth.api_key'),
user_token=user_token
).get_sse_authorization()
start = time()
client.sse(authorization, params=params, async=True)
forever = ttl < 0
while client.is_alive():
if not forever and time() > (start + ttl):
break
except ApiException:
pass
except KeyboardInterrupt:
# We ignore the keyboard interrupt - The user sent it,
# and knows they sent it
pass
finally:
# Tell the client to stop the underlying
# stream thread.
client.shutdown()
def activity_stream(client, async=True, from_=None, to=None,
posted_by=None, type_id=None, network_id=None):
"""Open the activity JSON stream.
:type client: StreamingConnectClient
:param client:
:type async: bool
:param async:
:type from_: string
:param from_:
:type to: string
:param to:
:type posted_by: int
:param posted_by:
:type type_id: int
:param type_id:
:type network_id: int
:param network_id:
"""
params = {
'from': from_,
'to': to,
'posted_by': posted_by,
'type_id': type_id,
'network_id': network_id,
}
return client.activities(params=params, async=async)
def conversation_stream(client, async=True, from_=None, to=None,
last_conversation_id=None, with_closed=None):
"""Open the conversation JSON stream.
:type client: StreamingConnectClient
:param client:
:type async: bool
:param async:
:type from_: string
:param from_:
:type to: string
:param to:
:type last_conversation_id: int
:param last_conversation_id:
:type with_closed: bool
:param with_closed:
"""
params = {
'from': from_,
'to': to,
'last_conversation_id': last_conversation_id,
'with_closed': with_closed
}
return client.conversations(params=params, async=async)
def event_stream(client, async=True, from_=None, to=None,
last_event_id=None, event_types=None):
"""Open the event JSON stream.
:type client: StreamingConnectClient
:param client:
:type async: bool
:param async:
:type from_: string
:param from_:
:type to: string
:param to:
:type last_event_id: int
:param last_event_id: Last event processed
:type event_types: list
:param event_types: event types to stream (only available when selecting the events stream)
"""
params = {
'from': from_,
'to': to,
'last_event_id': last_event_id,
'event_types': event_types
}
return client.events(params=params, async=async)
def json_stream(stream, config_path, ttl=None, **kwargs):
"""Demo reading from a json stream. Each json entity is
printed to the console using a `ConsolePrintWorker`. The demo can
be completed killed issuing a keyboard interrupt or any other
kill sig.
:type stream: string
:param stream:
:type config_path: string
:param config_path:
:type ttl: int
:param ttl: the stream time to live, after which it will disconnect automatically
"""
config = read_config(config_path)
dispatcher = Dispatcher(ConsolePrintWorker())
user_token = data_get(config, 'auth.user_token', authenticate(config))
client = StreamingConnectClient(
data_get(config, 'auth.auth_scope'),
data_get(config, 'auth.api_key'),
dispatcher,
user_token=user_token,
host=data_get(config, 'api.host'),
ssl=data_get(config, 'api.ssl'),
request_hooks=[RequestPrinter()],
response_hooks=[StreamResponsePrinter()]
)
def default_stream(*args, **kwargs):
raise RuntimeError('Invalid stream option.')
streams = {
'activity': activity_stream,
'conversation': conversation_stream,
'events': event_stream,
}
try:
stream = streams.get(stream, default_stream)
start = time()
stream(client, async=True, **kwargs)
forever = ttl < 0
while client.is_alive():
if not forever and time() > (start + ttl):
break
except ApiException:
pass
except KeyboardInterrupt:
pass
finally:
# Tell the client to stop the underlying
# stream thread.
client.shutdown()
class Command(object):
"""Wraps the command line argument parser, handling the stream example
command options.
usage:
.. code-bloc:: python
command = Command()
command(sys.argv[1:])
"""
def __init__(self):
self._parser = ArgumentParser(
prog='stream',
description='Begin streaming the specified stream.'
)
self._configure()
def __call__(self, options):
args = vars(self._parser.parse_args(options))
func = args['func']
del args['func']
if 'from' in args:
args['from_'] = args['from']
del args['from']
func(**args)
def _configure(self):
self._subparsers = self._parser.add_subparsers(description='select stream command.')
self._activities_subparser()
self._conversations_subparser()
self._events_subparser()
self._sse_stream_subparser()
self._sse_authorization_subparser()
@staticmethod
def _add_ttl_option(parser):
"""Add the time time live option.
:type parser: argparse.ArgumentParser
:param parser:
"""
parser.add_argument('--ttl',
help='Control the length of time the stream will '
'run for in seconds. By default it will run '
'until cancelled.)',
type=int,
default=-1)
@staticmethod
def _add_config_option(parser):
"""Add the config option
:type parser: argparse.ArgumentParser
:param parser:
"""
parser.add_argument('--config',
dest='config_path',
help='Path to configuration file.',
default=path_join(dirname(abspath(__file__)), '.config.json'))
@staticmethod
def _add_default_stream_opts(parser):
Command._add_ttl_option(parser)
Command._add_config_option(parser)
@staticmethod
def _add_bounded_stream_opts(parser):
parser = parser.add_subparsers().add_parser(
'bounded',
description='Create a stream bounded between two points in time. [FROM, TO]'
)
parser.add_argument('from', metavar='FROM', help='Stream start (inclusive).')
parser.add_argument('to', metavar='TO', help='Stream stop (exclusive).')
def _sse_authorization_subparser(self):
parser = self._subparsers.add_parser('authorize-sse-stream',
description='Get authorization code for sse stream.')
Command._add_config_option(parser)
parser.set_defaults(func=authorize_sse_stream)
def _sse_stream_subparser(self):
parser = self._subparsers.add_parser('sse',
description='Stream Server Sent Events.')
Command._add_default_stream_opts(parser)
parser.add_argument('--authorization', help='SSE stream authorization code.')
parser.add_argument('--last-event-id',
type=int,
help='The id of the last event processed.')
parser.add_argument('--event-types',
nargs='*',
type=int,
help='Filter to specific event type(s).')
parser.set_defaults(func=sse_stream)
def _activities_subparser(self):
parser = self._subparsers.add_parser('activities',
description='Stream activity json.')
Command._add_default_stream_opts(parser)
Command._add_bounded_stream_opts(parser)
parser.set_defaults(func=json_stream, stream='activity')
def _conversations_subparser(self):
parser = self._subparsers.add_parser('conversations',
description='Stream conversation json.')
Command._add_default_stream_opts(parser)
Command._add_bounded_stream_opts(parser)
parser.set_defaults(func=json_stream, stream='conversation')
def _events_subparser(self):
parser = self._subparsers.add_parser('events',
description='Stream event json.')
Command._add_default_stream_opts(parser)
Command._add_bounded_stream_opts(parser)
parser.add_argument('--last-event-id',
type=int,
help='The id of the last event processed.')
parser.add_argument('--event-types',
nargs='*',
type=int,
help='Filter to specific event type(s).')
parser.set_defaults(func=json_stream, stream='events')
if __name__ == '__main__':
cmd = Command()
cmd(sys.argv[1:])
|
|
import numpy as np
import os
class ARLagResults(object):
"""
Results are from R vars::VARselect for sunspot data.
Comands run were
var_select <- VARselect(SUNACTIVITY, lag.max=16, type=c("const"))
"""
def __init__(self, type="const"):
# order of results is AIC, HQ, SC, FPE
if type == "const":
ic = [6.311751824815273, 6.321813007357017, 6.336872456958734,
551.009492543133547, 5.647615009344886, 5.662706783157502,
5.685295957560077, 283.614444209634655, 5.634199640773091,
5.654322005856580, 5.684440905060013, 279.835333966272003,
5.639415797766900, 5.664568754121261, 5.702217378125553,
281.299267441683185, 5.646102475432464, 5.676286023057697,
5.721464371862848, 283.187210932784524, 5.628416873122441,
5.663631012018546, 5.716339085624555, 278.223839284844701,
5.584204185137150, 5.624448915304128, 5.684686713710994,
266.191975554941564, 5.541163244029505, 5.586438565467356,
5.654206088675081, 254.979353737235556, 5.483155367013447,
5.533461279722170, 5.608758527730753, 240.611088468544949,
5.489939895595428, 5.545276399575022, 5.628103372384465,
242.251199397394288, 5.496713895370946, 5.557080990621412,
5.647437688231713, 243.900349905069504, 5.503539311586831,
5.568936998108170, 5.666823420519329, 245.573823561989144,
5.510365149977393, 5.580793427769605, 5.686209574981622,
247.259396991133599, 5.513740912139918, 5.589199781203001,
5.702145653215877, 248.099655693709479, 5.515627471325321,
5.596116931659277, 5.716592528473011, 248.572915484827206,
5.515935627515806, 5.601455679120634, 5.729461000735226,
248.654927915301300]
self.ic = np.asarray(ic).reshape(4,-1, order='F')
class ARResultsOLS(object):
"""
Results of fitting an AR(9) model to the sunspot data.
Results were taken from Stata using the var command.
"""
def __init__(self, constant=True):
self.avobs = 300.
if constant:
self.params = [ 6.7430535917332, 1.1649421971129, -.40535742259304,
-.16653934246587, .14980629416032, -.09462417064796,
.00491001240749, .0504665930841, -.08635349190816,
.25349103194757]
# These are returned by stata VAR, using the (V)AR scale/sigma
# we return the true OLS bse by default
# the stata residuals can be achived by np.sqrt(np.diag(res1.cov_params()))
self.bse_stata = [2.413485601, .0560359041, .0874490762,
.0900894414, .0899348339, .0900100797,
.0898385666, .0896997939, .0869773089,
.0559505756]
# The below are grom gretl's ARIMA command with conditional maxium likelihood
self.bse_gretl = [2.45474, 0.0569939, 0.0889440, 0.0916295,
0.0914723, 0.0915488, 0.0913744, 0.0912332,
0.0884642, 0.0569071]
self.rmse = 15.1279294937327
self.fpe = 236.4827257929261
self.llf = -1235.559128419549
#NOTE: we use a different definition of these ic than Stata
# but our order selection results agree with R VARselect
# close to Stata for Lutkepohl but we penalize the ic for the trend terms
# self.bic = 8.427186938618863
# self.aic = 8.30372752279699
# self.hqic = 8.353136159250697
#NOTE: predictions were taken from gretl, but agree with Stata
# test predict
#TODO: remove one of the files
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"AROLSConstantPredict.csv")
predictresults = np.loadtxt(filename)
fv = predictresults[:300,0]
pv = predictresults[300:,1]
pv_lb = predictresults[300:,2]
pv_ub = predictresults[300:,3]
pv_se = predictresults[300:,4]
del predictresults
# cases - in sample predict
# n = -1, start = 0 (fitted values)
self.FVOLSnneg1start0 = fv
# n=-1, start=9
self.FVOLSnneg1start9 = fv
# n=-1, start=100
self.FVOLSnneg1start100 = fv[100-9:]
# n = 200, start = 0
self.FVOLSn200start0 = fv[:192]
# n = 200, start = 200
self.FVOLSn200start200 = np.hstack((fv[200-9:],pv[:101-9]))
# n = 200, start = -109 use above
self.FVOLSn200startneg109 = self.FVOLSn200start200
# n = 100, start = 325, post-sample forecasting
self.FVOLSn100start325 = np.hstack((fv[-1],pv))
# n = 301, start = 9
self.FVOLSn301start9 = np.hstack((fv,pv[:2]))
# n = 301, start = 0
self.FVOLSdefault = fv
# n = 4, start = 312
self.FVOLSn4start312 = np.hstack((fv[-1],pv[:8]))
# n = 15, start = 312
self.FVOLSn15start312 = np.hstack((fv[-1],pv[:19]))
elif not constant:
self.params = [1.19582389902985, -0.40591818219637,
-0.15813796884843, 0.16620079925202,
-0.08570200254617, 0.01876298948686,
0.06130211910707, -0.08461507700047,
0.27995084653313]
self.bse_stata = [.055645055, .088579237, .0912031179, .0909032462,
.0911161784, .0908611473, .0907743174, .0880993504,
.0558560278]
self.bse_gretl = [0.0564990, 0.0899386, 0.0926027, 0.0922983,
0.0925145, 0.0922555, 0.0921674, 0.0894513,
0.0567132]
self.rmse = 15.29712618677774
self.sigma = 226.9820074869752
self.llf = -1239.41217278661
# See note above
# self.bic = 8.433861292817106
# self.hqic = 8.367215591385756
# self.aic = 8.322747818577421
self.fpe = 241.0221316614273
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"AROLSNoConstantPredict.csv")
predictresults = np.loadtxt(filename)
fv = predictresults[:300,0]
pv = predictresults[300:,1]
pv_lb = predictresults[300:,2]
pv_ub = predictresults[300:,3]
pv_se = predictresults[300:,4]
del predictresults
# cases - in sample predict
# n = -1, start = 0 (fitted values)
self.FVOLSnneg1start0 = fv
# n=-1, start=9
self.FVOLSnneg1start9 = fv
# n=-1, start=100
self.FVOLSnneg1start100 = fv[100-9:]
# n = 200, start = 0
self.FVOLSn200start0 = fv[:192]
# n = 200, start = 200
self.FVOLSn200start200 = np.hstack((fv[200-9:],pv[:101-9]))
# n = 200, start = -109 use above
self.FVOLSn200startneg109 = self.FVOLSn200start200
# n = 100, start = 325, post-sample forecasting
self.FVOLSn100start325 = np.hstack((fv[-1],pv))
# n = 301, start = 9
self.FVOLSn301start9 = np.hstack((fv,pv[:2]))
# n = 301, start = 0
self.FVOLSdefault = fv
# n = 4, start = 312
self.FVOLSn4start312 = np.hstack((fv[-1],pv[:8]))
# n = 15, start = 312
self.FVOLSn15start312 = np.hstack((fv[-1],pv[:19]))
class ARResultsMLE(object):
"""
Results of fitting an AR(9) model to the sunspot data using exact MLE.
Results were taken from gretl.
"""
def __init__(self, constant=True):
self.avobs = 300
if constant:
# NOTE: Stata's estimated parameters differ from gretl
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"ARMLEConstantPredict.csv")
predictresults = np.loadtxt(filename, delimiter=",")
year = predictresults[:,0]
pv = predictresults[:,1]
# cases - in sample predict
# start = 0 (fitted values)
self.FVMLEdefault = pv[:309]
# start=9
self.FVMLEstart9end308 = pv[9:309]
# start=100, end=309
self.FVMLEstart100end308 = pv[100:309]
# start = 0, end
self.FVMLEstart0end200 = pv[:201]
# n = 200, start = 200
self.FVMLEstart200end334 = pv[200:]
# start = 309, end=334 post-sample forecasting
self.FVMLEstart308end334 = pv[308:]
# end = 310, start = 9
self.FVMLEstart9end309 = pv[9:310]
# end = 301, start = 0
self.FVMLEstart0end301 = pv[:302]
# end = 312, start = 4
self.FVMLEstart4end312 = pv[4:313]
# end = 7, start = 2
self.FVMLEstart2end7 = pv[2:8]
else:
pass
|
|
import ipaddress
import os
import threading
from enum import Enum
from functools import lru_cache
from pathlib import Path
from typing import Iterable, Callable, Optional, Tuple, List, Any, BinaryIO
import certifi
from OpenSSL.crypto import X509
from cryptography.hazmat.primitives.asymmetric import rsa
from OpenSSL import SSL, crypto
from mitmproxy import certs
# redeclared here for strict type checking
class Method(Enum):
TLS_SERVER_METHOD = SSL.TLS_SERVER_METHOD
TLS_CLIENT_METHOD = SSL.TLS_CLIENT_METHOD
try:
SSL._lib.TLS_server_method # type: ignore
except AttributeError as e: # pragma: no cover
raise RuntimeError("Your installation of the cryptography Python package is outdated.") from e
class Version(Enum):
UNBOUNDED = 0
SSL3 = SSL.SSL3_VERSION
TLS1 = SSL.TLS1_VERSION
TLS1_1 = SSL.TLS1_1_VERSION
TLS1_2 = SSL.TLS1_2_VERSION
TLS1_3 = SSL.TLS1_3_VERSION
class Verify(Enum):
VERIFY_NONE = SSL.VERIFY_NONE
VERIFY_PEER = SSL.VERIFY_PEER
DEFAULT_MIN_VERSION = Version.TLS1_2
DEFAULT_MAX_VERSION = Version.UNBOUNDED
DEFAULT_OPTIONS = (
SSL.OP_CIPHER_SERVER_PREFERENCE
| SSL.OP_NO_COMPRESSION
)
class MasterSecretLogger:
def __init__(self, filename: Path):
self.filename = filename.expanduser()
self.f: Optional[BinaryIO] = None
self.lock = threading.Lock()
# required for functools.wraps, which pyOpenSSL uses.
__name__ = "MasterSecretLogger"
def __call__(self, connection: SSL.Connection, keymaterial: bytes) -> None:
with self.lock:
if self.f is None:
self.filename.parent.mkdir(parents=True, exist_ok=True)
self.f = self.filename.open("ab")
self.f.write(b"\n")
self.f.write(keymaterial + b"\n")
self.f.flush()
def close(self):
with self.lock:
if self.f is not None:
self.f.close()
def make_master_secret_logger(filename: Optional[str]) -> Optional[MasterSecretLogger]:
if filename:
return MasterSecretLogger(Path(filename))
return None
log_master_secret = make_master_secret_logger(
os.getenv("MITMPROXY_SSLKEYLOGFILE") or os.getenv("SSLKEYLOGFILE")
)
def _create_ssl_context(
*,
method: Method,
min_version: Version,
max_version: Version,
cipher_list: Optional[Iterable[str]],
) -> SSL.Context:
context = SSL.Context(method.value)
ok = SSL._lib.SSL_CTX_set_min_proto_version(context._context, min_version.value) # type: ignore
ok += SSL._lib.SSL_CTX_set_max_proto_version(context._context, max_version.value) # type: ignore
if ok != 2:
raise RuntimeError(
f"Error setting TLS versions ({min_version=}, {max_version=}). "
"The version you specified may be unavailable in your libssl."
)
# Options
context.set_options(DEFAULT_OPTIONS)
# Cipher List
if cipher_list is not None:
try:
context.set_cipher_list(b":".join(x.encode() for x in cipher_list))
except SSL.Error as e:
raise RuntimeError("SSL cipher specification error: {e}") from e
# SSLKEYLOGFILE
if log_master_secret:
context.set_keylog_callback(log_master_secret)
return context
@lru_cache(256)
def create_proxy_server_context(
*,
min_version: Version,
max_version: Version,
cipher_list: Optional[Tuple[str, ...]],
verify: Verify,
hostname: Optional[str],
ca_path: Optional[str],
ca_pemfile: Optional[str],
client_cert: Optional[str],
alpn_protos: Optional[Tuple[bytes, ...]],
) -> SSL.Context:
context: SSL.Context = _create_ssl_context(
method=Method.TLS_CLIENT_METHOD,
min_version=min_version,
max_version=max_version,
cipher_list=cipher_list,
)
if verify is not Verify.VERIFY_NONE and hostname is None:
raise ValueError("Cannot validate certificate hostname without SNI")
context.set_verify(verify.value, None)
if hostname is not None:
assert isinstance(hostname, str)
# Manually enable hostname verification on the context object.
# https://wiki.openssl.org/index.php/Hostname_validation
param = SSL._lib.SSL_CTX_get0_param(context._context) # type: ignore
# Matching on the CN is disabled in both Chrome and Firefox, so we disable it, too.
# https://www.chromestatus.com/feature/4981025180483584
SSL._lib.X509_VERIFY_PARAM_set_hostflags( # type: ignore
param,
SSL._lib.X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS | SSL._lib.X509_CHECK_FLAG_NEVER_CHECK_SUBJECT # type: ignore
)
try:
ip: bytes = ipaddress.ip_address(hostname).packed
except ValueError:
SSL._openssl_assert( # type: ignore
SSL._lib.X509_VERIFY_PARAM_set1_host(param, hostname.encode(), 0) == 1 # type: ignore
)
else:
SSL._openssl_assert( # type: ignore
SSL._lib.X509_VERIFY_PARAM_set1_ip(param, ip, len(ip)) == 1 # type: ignore
)
if ca_path is None and ca_pemfile is None:
ca_pemfile = certifi.where()
try:
context.load_verify_locations(ca_pemfile, ca_path)
except SSL.Error as e:
raise RuntimeError(f"Cannot load trusted certificates ({ca_pemfile=}, {ca_path=}).") from e
# Client Certs
if client_cert:
try:
context.use_privatekey_file(client_cert)
context.use_certificate_chain_file(client_cert)
except SSL.Error as e:
raise RuntimeError(f"Cannot load TLS client certificate: {e}") from e
if alpn_protos:
# advertise application layer protocols
context.set_alpn_protos(alpn_protos)
return context
@lru_cache(256)
def create_client_proxy_context(
*,
min_version: Version,
max_version: Version,
cipher_list: Optional[Tuple[str, ...]],
cert: certs.Cert,
key: rsa.RSAPrivateKey,
chain_file: Optional[Path],
alpn_select_callback: Optional[Callable[[SSL.Connection, List[bytes]], Any]],
request_client_cert: bool,
extra_chain_certs: Tuple[certs.Cert, ...],
dhparams: certs.DHParams,
) -> SSL.Context:
context: SSL.Context = _create_ssl_context(
method=Method.TLS_SERVER_METHOD,
min_version=min_version,
max_version=max_version,
cipher_list=cipher_list,
)
context.use_certificate(cert.to_pyopenssl())
context.use_privatekey(crypto.PKey.from_cryptography_key(key))
if chain_file is not None:
try:
context.load_verify_locations(str(chain_file), None)
except SSL.Error as e:
raise RuntimeError(f"Cannot load certificate chain ({chain_file}).") from e
if alpn_select_callback is not None:
assert callable(alpn_select_callback)
context.set_alpn_select_callback(alpn_select_callback)
if request_client_cert:
# The request_client_cert argument requires some explanation. We're
# supposed to be able to do this with no negative effects - if the
# client has no cert to present, we're notified and proceed as usual.
# Unfortunately, Android seems to have a bug (tested on 4.2.2) - when
# an Android client is asked to present a certificate it does not
# have, it hangs up, which is frankly bogus. Some time down the track
# we may be able to make the proper behaviour the default again, but
# until then we're conservative.
context.set_verify(Verify.VERIFY_PEER.value, accept_all)
else:
context.set_verify(Verify.VERIFY_NONE.value, None)
for i in extra_chain_certs:
context.add_extra_chain_cert(i.to_pyopenssl())
if dhparams:
SSL._lib.SSL_CTX_set_tmp_dh(context._context, dhparams) # type: ignore
return context
def accept_all(
conn_: SSL.Connection,
x509: X509,
errno: int,
err_depth: int,
is_cert_verified: int,
) -> bool:
# Return true to prevent cert verification error
return True
def is_tls_record_magic(d):
"""
Returns:
True, if the passed bytes start with the TLS record magic bytes.
False, otherwise.
"""
d = d[:3]
# TLS ClientHello magic, works for SSLv3, TLSv1.0, TLSv1.1, TLSv1.2, and TLSv1.3
# http://www.moserware.com/2009/06/first-few-milliseconds-of-https.html#client-hello
# https://tls13.ulfheim.net/
return (
len(d) == 3 and
d[0] == 0x16 and
d[1] == 0x03 and
0x0 <= d[2] <= 0x03
)
|
|
# Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import getpass
import json
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common import pagination
from trove.common.stream_codecs import JsonCodec
from trove.common import utils as utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.couchdb import system
from trove.guestagent.datastore import service
from trove.guestagent.db import models
from trove.guestagent import pkg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
packager = pkg.Package()
COUCHDB_LIB_DIR = "/var/lib/couchdb"
COUCHDB_LOG_DIR = "/var/log/couchdb"
COUCHDB_CONFIG_DIR = "/etc/couchdb"
COUCHDB_BIN_DIR = "/var/run/couchdb"
class CouchDBApp(object):
"""
Handles installation and configuration of CouchDB
on a Trove instance.
"""
def __init__(self, status, state_change_wait_time=None):
"""
Sets default status and state_change_wait_time.
"""
self.state_change_wait_time = (
state_change_wait_time if state_change_wait_time else
CONF.state_change_wait_time
)
LOG.debug("state_change_wait_time = %s." % self.state_change_wait_time)
self.status = status
def install_if_needed(self, packages):
"""
Install CouchDB if needed, do nothing if it is already installed.
"""
LOG.info(_('Preparing guest as a CouchDB server.'))
if not packager.pkg_is_installed(packages):
LOG.debug("Installing packages: %s." % str(packages))
packager.pkg_install(packages, {}, system.TIME_OUT)
LOG.info(_("Finished installing CouchDB server."))
def change_permissions(self):
"""
When CouchDB is installed, a default user 'couchdb' is created.
Inorder to start/stop/restart CouchDB service as the current
OS user, add the current OS user to the 'couchdb' group and provide
read/write access to the 'couchdb' group.
"""
try:
LOG.debug("Changing permissions.")
for dir in [COUCHDB_LIB_DIR, COUCHDB_LOG_DIR,
COUCHDB_BIN_DIR, COUCHDB_CONFIG_DIR]:
operating_system.chown(dir, 'couchdb', 'couchdb', as_root=True)
operating_system.chmod(dir, FileMode.ADD_GRP_RW, as_root=True)
operating_system.change_user_group(getpass.getuser(), 'couchdb',
as_root=True)
LOG.debug("Successfully changed permissions.")
except exception.ProcessExecutionError:
LOG.exception(_("Error changing permissions."))
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
self.status.stop_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time,
disable_on_boot=do_not_start_on_reboot, update_db=update_db)
def start_db(self, update_db=False):
self.status.start_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time,
enable_on_boot=True, update_db=update_db)
def restart(self):
self.status.restart_db_service(
system.SERVICE_CANDIDATES, self.state_change_wait_time)
def make_host_reachable(self):
try:
LOG.debug("Changing bind address to 0.0.0.0 .")
self.stop_db()
out, err = utils.execute_with_timeout(
system.UPDATE_BIND_ADDRESS, shell=True
)
self.start_db()
except exception.ProcessExecutionError:
LOG.exception(_("Error while trying to update bind address of"
" CouchDB server."))
def start_db_with_conf_changes(self, config_contents):
'''
Will not be implementing configuration change API for CouchDB in
the Kilo release. Currently all that this method does is to start
the CouchDB server without any configuration changes. Looks like
this needs to be implemented to enable volume resize on the guest
agent side.
'''
LOG.info(_("Starting CouchDB with configuration changes."))
self.start_db(True)
def store_admin_password(self, password):
LOG.debug('Storing the admin password.')
creds = CouchDBCredentials(username=system.COUCHDB_ADMIN_NAME,
password=password)
creds.write(system.COUCHDB_ADMIN_CREDS_FILE)
return creds
def create_admin_user(self, password):
'''
Creating the admin user, os_admin, for the couchdb instance
'''
LOG.debug('Creating the admin user.')
creds = self.store_admin_password(password)
out, err = utils.execute_with_timeout(
system.COUCHDB_CREATE_ADMIN % {'password': creds.password},
shell=True)
LOG.debug('Created admin user.')
def secure(self):
'''
Create the Trove admin user.
The service should not be running at this point.
'''
self.start_db(update_db=False)
password = utils.generate_random_password()
self.create_admin_user(password)
LOG.debug("CouchDB secure complete.")
@property
def admin_password(self):
creds = CouchDBCredentials()
creds.read(system.COUCHDB_ADMIN_CREDS_FILE)
return creds.password
class CouchDBAppStatus(service.BaseDbStatus):
"""
Handles all of the status updating for the CouchDB guest agent.
We can verify that CouchDB is running by running the command:
curl http://127.0.0.1:5984/
The response will be similar to:
{"couchdb":"Welcome","version":"1.6.0"}
"""
def _get_actual_db_status(self):
try:
out, err = utils.execute_with_timeout(
system.COUCHDB_SERVER_STATUS, shell=True
)
LOG.debug("CouchDB status = %r" % out)
server_status = json.loads(out)
status = server_status["couchdb"]
if status == 'Welcome':
LOG.debug("Status of CouchDB is active.")
return rd_instance.ServiceStatuses.RUNNING
else:
LOG.debug("Status of CouchDB is not active.")
return rd_instance.ServiceStatuses.SHUTDOWN
except exception.ProcessExecutionError:
LOG.exception(_("Error getting CouchDB status."))
return rd_instance.ServiceStatuses.SHUTDOWN
class CouchDBAdmin(object):
'''Handles administrative functions on CouchDB.'''
# user is cached by making it a class attribute
admin_user = None
def _admin_user(self):
if not type(self).admin_user:
creds = CouchDBCredentials()
creds.read(system.COUCHDB_ADMIN_CREDS_FILE)
user = models.CouchDBUser(creds.username, creds.password)
type(self).admin_user = user
return type(self).admin_user
def _is_modifiable_user(self, name):
if name in cfg.get_ignored_users():
return False
elif name == system.COUCHDB_ADMIN_NAME:
return False
return True
def _is_modifiable_database(self, name):
return name not in cfg.get_ignored_dbs()
def create_user(self, users):
LOG.debug("Creating user(s) for accessing CouchDB database(s).")
self._admin_user()
try:
for item in users:
user = models.CouchDBUser.deserialize_user(item)
try:
LOG.debug("Creating user: %s." % user.name)
utils.execute_with_timeout(
system.CREATE_USER_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'username': user.name,
'username': user.name,
'password': user.password},
shell=True)
except exception.ProcessExecutionError as pe:
LOG.exception(_("Error creating user: %s.") % user.name)
pass
for database in user.databases:
mydb = models.CouchDBSchema.deserialize_schema(database)
try:
LOG.debug("Granting user: %s access to database: %s."
% (user.name, mydb.name))
out, err = utils.execute_with_timeout(
system.GRANT_ACCESS_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'dbname': mydb.name,
'username': user.name},
shell=True)
except exception.ProcessExecutionError as pe:
LOG.debug("Error granting user: %s access to"
"database: %s." % (user.name, mydb.name))
LOG.debug(pe)
pass
except exception.ProcessExecutionError as pe:
LOG.exception(_("An error occurred creating users: %s.") %
pe.message)
pass
def delete_user(self, user):
LOG.debug("Delete a given CouchDB user.")
couchdb_user = models.CouchDBUser.deserialize_user(user)
db_names = self.list_database_names()
for db in db_names:
userlist = []
try:
out, err = utils.execute_with_timeout(
system.DB_ACCESS_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'dbname': db},
shell=True)
except exception.ProcessExecutionError:
LOG.debug(
"Error while trying to get the users for database: %s." %
db)
continue
evalout = ast.literal_eval(out)
if evalout:
members = evalout['members']
names = members['names']
for i in range(0, len(names)):
couchdb_user.databases = db
userlist.append(names[i])
if couchdb_user.name in userlist:
userlist.remove(couchdb_user.name)
out2, err2 = utils.execute_with_timeout(
system.REVOKE_ACCESS_COMMAND % {
'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'dbname': db,
'username': userlist},
shell=True)
try:
out2, err = utils.execute_with_timeout(
system.DELETE_REV_ID %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password},
shell=True)
evalout2 = ast.literal_eval(out2)
rows = evalout2['rows']
userlist = []
for i in range(0, len(rows)):
row = rows[i]
username = "org.couchdb.user:" + couchdb_user.name
if row['key'] == username:
rev = row['value']
revid = rev['rev']
utils.execute_with_timeout(
system.DELETE_USER_COMMAND % {
'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'username': couchdb_user.name,
'revid': revid},
shell=True)
except exception.ProcessExecutionError as pe:
LOG.exception(_(
"There was an error while deleting user: %s.") % pe)
raise exception.GuestError(_("Unable to delete user: %s.") %
couchdb_user.name)
def list_users(self, limit=None, marker=None, include_marker=False):
'''List all users and the databases they have access to.'''
users = []
db_names = self.list_database_names()
try:
out, err = utils.execute_with_timeout(
system.ALL_USERS_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password},
shell=True)
except exception.ProcessExecutionError:
LOG.debug("Error while trying to get list of all couchdb users")
evalout = ast.literal_eval(out)
rows = evalout['rows']
userlist = []
for i in range(0, len(rows)):
row = rows[i]
uname = row['key']
if not self._is_modifiable_user(uname):
break
elif uname[17:]:
userlist.append(uname[17:])
for i in range(len(userlist)):
user = models.CouchDBUser(userlist[i])
for db in db_names:
try:
out2, err = utils.execute_with_timeout(
system.DB_ACCESS_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'dbname': db},
shell=True)
except exception.ProcessExecutionError:
LOG.debug(
"Error while trying to get users for database: %s."
% db)
continue
evalout2 = ast.literal_eval(out2)
if evalout2:
members = evalout2['members']
names = members['names']
for i in range(0, len(names)):
if user.name == names[i]:
user.databases = db
users.append(user.serialize())
next_marker = None
return users, next_marker
def get_user(self, username, hostname):
'''Get Information about the given user.'''
LOG.debug('Getting user %s.' % username)
user = self._get_user(username, hostname)
if not user:
return None
return user.serialize()
def _get_user(self, username, hostname):
user = models.CouchDBUser(username)
db_names = self.list_database_names()
for db in db_names:
try:
out, err = utils.execute_with_timeout(
system.DB_ACCESS_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'dbname': db},
shell=True)
except exception.ProcessExecutionError:
LOG.debug(
"Error while trying to get the users for database: %s." %
db)
continue
evalout = ast.literal_eval(out)
if evalout:
members = evalout['members']
names = members['names']
for i in range(0, len(names)):
if user.name == names[i]:
user.databases = db
return user
def grant_access(self, username, databases):
if self._get_user(username, None).name != username:
raise exception.BadRequest(_(
'Cannot grant access for non-existant user: '
'%(user)s') % {'user': username})
else:
user = models.CouchDBUser(username)
if not self._is_modifiable_user(user.name):
LOG.warning(_('Cannot grant access for reserved user '
'%(user)s') % {'user': username})
if not user:
raise exception.BadRequest(_(
'Cannot grant access for reserved or non-existant user '
'%(user)s') % {'user': username})
for db_name in databases:
out, err = utils.execute_with_timeout(
system.GRANT_ACCESS_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'dbname': db_name,
'username': username},
shell=True)
def revoke_access(self, username, database):
userlist = []
if self._is_modifiable_user(username):
out, err = utils.execute_with_timeout(
system.DB_ACCESS_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'dbname': database},
shell=True)
evalout = ast.literal_eval(out)
members = evalout['members']
names = members['names']
for i in range(0, len(names)):
userlist.append(names[i])
if username in userlist:
userlist.remove(username)
out2, err2 = utils.execute_with_timeout(
system.REVOKE_ACCESS_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'dbname': database,
'username': userlist},
shell=True)
def list_access(self, username, hostname):
'''Returns a list of all databases which the user has access to'''
user = self._get_user(username, hostname)
return user.databases
def enable_root(self, root_pwd=None):
'''Create admin user root'''
root_user = models.CouchDBRootUser(password=root_pwd)
out, err = utils.execute_with_timeout(
system.ENABLE_ROOT %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'password': root_pwd},
shell=True)
return root_user.serialize()
def is_root_enabled(self):
'''Check if user root exists'''
out, err = utils.execute_with_timeout(
system.IS_ROOT_ENABLED %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password},
shell=True)
evalout = ast.literal_eval(out)
if evalout['root']:
return True
else:
return False
def create_database(self, databases):
'''Create the given database(s).'''
dbName = None
db_create_failed = []
LOG.debug("Creating CouchDB databases.")
for database in databases:
dbName = models.CouchDBSchema.deserialize_schema(database).name
if self._is_modifiable_database(dbName):
LOG.debug('Creating CouchDB database %s' % dbName)
try:
utils.execute_with_timeout(
system.CREATE_DB_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'dbname': dbName},
shell=True)
except exception.ProcessExecutionError:
LOG.exception(_(
"There was an error creating database: %s.") % dbName)
db_create_failed.append(dbName)
pass
else:
LOG.warning(_('Cannot create database with a reserved name '
'%(db)s') % {'db': dbName})
db_create_failed.append(dbName)
if len(db_create_failed) > 0:
LOG.exception(_("Creating the following databases failed: %s.") %
db_create_failed)
def list_database_names(self):
'''Get the list of database names.'''
out, err = utils.execute_with_timeout(
system.LIST_DB_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password},
shell=True)
dbnames_list = eval(out)
for hidden in cfg.get_ignored_dbs():
if hidden in dbnames_list:
dbnames_list.remove(hidden)
return dbnames_list
def list_databases(self, limit=None, marker=None, include_marker=False):
'''Lists all the CouchDB databases.'''
databases = []
db_names = self.list_database_names()
pag_dblist, marker = pagination.paginate_list(db_names, limit, marker,
include_marker)
databases = [models.CouchDBSchema(db_name).serialize()
for db_name in pag_dblist]
LOG.debug('databases = ' + str(databases))
return databases, marker
def delete_database(self, database):
'''Delete the specified database.'''
dbName = models.CouchDBSchema.deserialize_schema(database).name
if self._is_modifiable_database(dbName):
try:
LOG.debug("Deleting CouchDB database: %s." % dbName)
utils.execute_with_timeout(
system.DELETE_DB_COMMAND %
{'admin_name': self._admin_user().name,
'admin_password': self._admin_user().password,
'dbname': dbName},
shell=True)
except exception.ProcessExecutionError:
LOG.exception(_(
"There was an error while deleting database:%s.") % dbName)
raise exception.GuestError(_("Unable to delete database: %s.")
% dbName)
else:
LOG.warning(_('Cannot delete a reserved database '
'%(db)s') % {'db': dbName})
class CouchDBCredentials(object):
"""Handles storing/retrieving credentials. Stored as json in files"""
def __init__(self, username=None, password=None):
self.username = username
self.password = password
def read(self, filename):
credentials = operating_system.read_file(filename, codec=JsonCodec())
self.username = credentials['username']
self.password = credentials['password']
def write(self, filename):
self.clear_file(filename)
credentials = {'username': self.username,
'password': self.password}
operating_system.write_file(filename, credentials, codec=JsonCodec())
operating_system.chmod(filename, operating_system.FileMode.SET_USR_RW)
@staticmethod
def clear_file(filename):
LOG.debug("Creating clean file %s" % filename)
if operating_system.file_discovery([filename]):
operating_system.remove(filename)
# force file creation by just opening it
open(filename, 'wb')
operating_system.chmod(filename,
operating_system.FileMode.SET_USR_RW,
as_root=True)
|
|
from __future__ import print_function
import unittest
import warnings
warnings.resetwarnings()
warnings.simplefilter("ignore")
class TestBasic(unittest.TestCase):
def setUp(self):
import warnings
super().setUp()
warnings.simplefilter('ignore')
def test_table(self):
from rowgenerators.rowpipe import Table
t = Table('foobar')
t.add_column('i1',datatype='int')
t.add_column('i2', valuetype='int')
t.add_column('i3', valuetype='measure/int')
t.add_column('f1',datatype='float')
t.add_column('f2', valuetype='float')
t.add_column('f3', valuetype='measure/float')
self.assertEqual(6, len(list(t)))
for c in t:
print(c)
def test_expand_transform_1(self):
from rowgenerators.rowpipe import Table
from rowgenerators.rowpipe import RowProcessor
from contexttimer import Timer
from itertools import zip_longest
def doubleit(v):
return int(v) * 2
env = {
'doubleit': doubleit
}
t = Table('extable')
t.add_column('id', datatype='int')
t.add_column('b', datatype='int')
t.add_column('v1', datatype='int', transform='^row.a')
t.add_column('v2', datatype='int', transform='row.v1;doubleit')
t.add_column('v3', datatype='int', transform='^row.a;doubleit')
for c in t:
print('---',c)
for i, tr in enumerate(c.expanded_transform):
print(' ',i, len(list(tr)), list(tr))
headers = ['stage'] + list(c.name for c in t)
table = [[i] + [ tr.str(i) for tr in stage ] for i, stage in enumerate(t.stage_transforms)]
from tabulate import tabulate
print (tabulate(table, headers, tablefmt="rst"))
class Source(object):
headers = 'a b'.split()
def __iter__(self):
for i in range(N):
yield i, 2*i
rp = RowProcessor(Source(), t, env=env, code_path='/tmp/rowgenerators/test_transform.py')
def test_expand_transform_2(self):
from rowgenerators.rowpipe import Table
from rowgenerators.rowpipe import RowProcessor
from contexttimer import Timer
from itertools import zip_longest
def doubleit(v):
return int(v) * 2
env = {
'doubleit': doubleit
}
t = Table('extable')
t.add_column('id', datatype='int')
t.add_column('v4', datatype='float', transform='^row.a;doubleit;doubleit')
t.add_column('v5', datatype='int', transform='^row.a;doubleit|doubleit')
t.add_column('v6', datatype='str', transform="^str('v6-string')")
for c in t:
print('---',c)
for i, tr in enumerate(c.expanded_transform):
print(' ',i, len(list(tr)), list(tr))
headers = ['stage'] + list(c.name for c in t)
table = [[i] + [ tr.str(i) for tr in stage ] for i, stage in enumerate(t.stage_transforms)]
from tabulate import tabulate
print (tabulate(table, headers, tablefmt="rst"))
class Source(object):
headers = 'a b'.split()
def __iter__(self):
for i in range(N):
yield i, 2*i
rp = RowProcessor(Source(), t, env=env, code_path='/tmp/rowgenerators/test_transform.py')
# NOTE. This speed test is about 12x to 23x faster running in PyPy than CPython!
def test_basic_transform(self):
from rowgenerators.rowpipe import Table
from rowgenerators.rowpipe import RowProcessor
from contexttimer import Timer
def doubleit(v):
return int(v) * 2
env = {
'doubleit': doubleit
}
t = Table('foobar')
t.add_column('id', datatype='int')
t.add_column('a', datatype='int')
t.add_column('v1', datatype='int', transform='^row.a')
t.add_column('v2', datatype='int', transform='row.v1;doubleit')
t.add_column('v3', datatype='int', transform='^row.a;doubleit')
t.add_column('v4', datatype='float', transform='^row.a;doubleit;doubleit')
t.add_column('v5', datatype='int', transform='^row.a;doubleit|doubleit')
t.add_column('v6', datatype='float')
N = 20000
class Source(object):
headers = 'a b'.split()
def __iter__(self):
for i in range(N):
yield i, 2*i
rp = RowProcessor(Source(), t, env=env, code_path='/tmp/rowgenerators/test_transform.py')
print("Code: ", rp.code_path)
headers = rp.headers
for row in rp:
d = dict(zip(headers, row))
self.assertEqual(d['a'], d['v1'], d)
self.assertEqual(2 * d['a'], d['v2'], d)
self.assertEqual(2 * d['a'], d['v3'], d)
self.assertEqual(4 * d['a'], d['v4'], d)
self.assertEqual(4 * d['a'], d['v5'], d)
count = 0
row_sum = 0
with Timer() as t:
for row in rp:
count += 1
row_sum += round(sum(row[:6]))
self.assertEqual(2199890000, row_sum)
print('Rate=', float(N) / t.elapsed)
def test_init_transform(self):
from rowgenerators.rowpipe import Table
def expand_transform(code, datatype='int', valuetype=None):
t = Table('foobar')
c = t.add_column('c', datatype=datatype, valuetype=valuetype, transform=code)
return c.expanded_transform
print(expand_transform('^GeoidCensusTract|v.as_acs()'))
print(expand_transform('v.as_acs()'))
def test_many_transform(self):
from rowgenerators.rowpipe import Table
from rowgenerators.rowpipe import RowProcessor
from contexttimer import Timer
def doubleit(v):
return int(v) * 2
def printstuff(v, manager, accumulator):
print(type(accumulator), accumulator)
return v
def accumulate(v, accumulator):
from collections import deque
if not 'deque' in accumulator:
accumulator['deque'] = deque([0], 3)
accumulator['deque'].append(v)
return sum(accumulator['deque'])
def addtwo(x):
return x+2
env = {
'doubleit': doubleit,
'printstuff': printstuff,
'accumulate': accumulate,
'addtwo': addtwo
}
transforms = [
('',45),
('^row.a', 45),
('^row.a;doubleit', 90),
('^row.a;doubleit;doubleit', 180),
('^row.a;doubleit|doubleit', 180),
('^row.a;row.c*2|doubleit', 180),
('^row.a;row.c/3|doubleit', 24),
('doubleit', 90),
('doubleit;doubleit', 180),
('doubleit|doubleit', 180),
('row.c*2|doubleit', 180),
('row.c/3|doubleit', 24),
('accumulate', 109),
('manager.factor_a*row.c', 450),
('addtwo(row.c)', 65),
]
N = 10
class Source(object):
headers = 'a'.split()
def __iter__(self):
for i in range(N):
yield (i,)
class Manager(object):
factor_a = 10
for i, (tr, final_sum) in enumerate(transforms):
t = Table('foobar')
t.add_column('c', datatype='int', transform=tr)
rp = RowProcessor(Source(), t, env=env,
manager=Manager(),
code_path='/tmp/rowgenerators/test_many_transform_{}.py'.format(i))
row_sum = 0
with Timer() as t:
for row in rp:
row_sum += sum(row)
self.assertEqual(final_sum, row_sum)
if __name__ == '__main__':
unittest.main()
|
|
"""
Various distributions from the database.
"""
import logging
from collections import Counter, defaultdict
from . import db
from .api import get_documents, get_submissions, get_submission
from .counter_utils import normalize
logger = logging.getLogger(__name__)
## Document distributions
def document_uniform(corpus_tag):
"""
The uniform distribution over documents
"""
docs = list(get_documents(corpus_tag))
return Counter({doc_id: 1./len(docs) for doc_id in docs})
def test_document_uniform():
tag = 'kbp2016'
P = document_uniform(tag)
assert len(P) == 15001
Z = sum(P.values())
assert abs(Z - 1.0) < 1.e-5, "Distribution for documents is not normalized: Z = {}".format(Z)
_, prob = next(iter(P.items()))
assert prob == 1./15001
def document_entity(corpus_tag, seed_documents, mention_table="evaluation_mention"):
"""
Constructs a distribution over documents based on links from @link_table.
The probability of a document is proportional to how many links it shares.
"""
# TODO: Reweight documents and mentions with some sort of TF-IDF scoring.
distribution = Counter()
with db.CONN:
with db.CONN.cursor() as cur:
cur.execute("CREATE TEMPORARY TABLE _seed_document (doc_id TEXT NOT NULL) ON COMMIT DROP;")
db.execute_values(cur, "INSERT INTO _seed_document VALUES %s", seed_documents)
for row in db.select(r"""
WITH links AS (
SELECT DISTINCT plainto_tsquery(m.gloss) AS query
FROM {mention_table} m
JOIN _seed_document d ON (m.doc_id = d.doc_id)
WHERE m.canonical_span = m.span
),
document_links AS (
SELECT d.doc_id, query
FROM document_tag d, document_index i,
links l
WHERE d.tag = %(corpus_tag)s
AND i.doc_id = d.doc_id
AND i.tsvector @@ query
)
SELECT doc_id, COUNT(*) AS count
FROM document_links
GROUP BY doc_id;
""".format(mention_table=mention_table), cur, corpus_tag=corpus_tag):
distribution[row.doc_id] = row.count
return normalize(distribution)
def test_document_entity():
tag = 'kbp2016'
seed_docs = [(doc_id,) for doc_id, _ in zip(get_documents(tag), range(10))]
P = document_entity(tag, seed_docs, "suggested_mention")
assert len(P) == 14544
Z = sum(P.values())
assert abs(Z - 1.0) < 1.e-5, "Distribution for documents is not normalized: Z = {}".format(Z)
## Submission distributions
def submission_instance(corpus_tag, submission_id=None):
if submission_id is not None:
assert get_submission(submission_id).corpus_tag == corpus_tag, "Submission {} is not on corpus {}".format(submission_id, corpus_tag)
where = "WHERE s.submission_id = %(submission_id)s"
else:
where = ""
distribution = defaultdict(Counter)
for row in db.select("""
WITH _counts AS (
SELECT submission_id, SUM(count) AS count
FROM submission_statistics s
GROUP BY submission_id
)
SELECT s.submission_id, s.doc_id, s.subject, s.object, 1./c.count AS prob
FROM submission_relation s
JOIN submission s_ ON (s_.id = s.submission_id AND s_.corpus_tag = %(corpus_tag)s)
JOIN _counts c ON (s.submission_id = c.submission_id)
{where}
""".format(where=where), corpus_tag=corpus_tag, submission_id=submission_id):
distribution[row.submission_id][row.doc_id, (row.subject.lower, row.subject.upper), (row.object.lower, row.object.upper)] = float(row.prob)
return distribution
def test_submission_instance():
tag = 'kbp2016'
Ps = submission_instance(tag)
for submission in get_submissions(tag):
Z = sum(Ps[submission.id].values())
assert abs(Z - 1.0) < 1.e-5, "Distribution for {} is not normalized: Z = {}".format(submission.id, Z)
def test_submission_instance_with_id():
tag = 'kbp2016'
submission = get_submissions(tag)[0]
Ps = submission_instance(tag, submission.id)
assert len(Ps) == 1 and submission.id in Ps
P = Ps[submission.id]
Z = sum(P.values())
assert abs(Z - 1.0) < 1.e-5, "Distribution for {} is not normalized: Z = {}".format(submission.id, Z)
def submission_relation(corpus_tag, submission_id=None):
if submission_id is not None:
assert get_submission(submission_id).corpus_tag == corpus_tag, "Submission {} is not on corpus {}".format(submission_id, corpus_tag)
where = "WHERE s.submission_id = %(submission_id)s"
else:
where = ""
distribution = defaultdict(Counter)
for row in db.select("""
WITH _counts AS (
SELECT submission_id, relation, SUM(count) AS count
FROM submission_statistics s
GROUP BY submission_id, relation),
_relation_counts AS (SELECT submission_id, COUNT(*) FROM _counts GROUP BY submission_id)
SELECT s.submission_id, s.doc_id, s.subject, s.object, (1./c.count)/(r.count) AS prob
FROM submission_relation s
JOIN submission s_ ON (s_.id = s.submission_id AND s_.corpus_tag = %(corpus_tag)s)
JOIN _counts c ON (s.submission_id = c.submission_id AND s.relation = c.relation)
JOIN _relation_counts r ON (s.submission_id = r.submission_id)
{where}
""".format(where=where), corpus_tag=corpus_tag, submission_id=submission_id):
distribution[row.submission_id][row.doc_id, (row.subject.lower, row.subject.upper), (row.object.lower, row.object.upper)] = float(row.prob)
return distribution
def test_submission_relation():
tag = 'kbp2016'
Ps = submission_relation(tag)
for submission in get_submissions(tag):
Z = sum(Ps[submission.id].values())
assert abs(Z - 1.0) < 1.e-5, "Distribution for {} is not normalized: Z = {}".format(submission.id, Z)
def test_submission_relation_by_id():
tag = 'kbp2016'
submission = get_submissions(tag)[0]
Ps = submission_relation(tag, submission.id)
assert len(Ps) == 1 and submission.id in Ps
P = Ps[submission.id]
Z = sum(P.values())
assert abs(Z - 1.0) < 1.e-5, "Distribution for {} is not normalized: Z = {}".format(submission.id, Z)
def submission_entity(corpus_tag, submission_id=None):
if submission_id is not None:
assert get_submission(submission_id).corpus_tag == corpus_tag, "Submission {} is not on corpus {}".format(submission_id, corpus_tag)
where = "WHERE s.submission_id = %(submission_id)s"
else:
where = ""
distribution = defaultdict(Counter)
for row in db.select("""
WITH _counts AS (
SELECT submission_id, subject_entity, SUM(count) AS count
FROM submission_statistics s
GROUP BY submission_id, subject_entity),
_entity_counts AS (SELECT submission_id, COUNT(*) FROM _counts GROUP BY submission_id)
SELECT s.submission_id, s.doc_id, s.subject, s.object, s.subject_entity, (1./c.count)/(ec.count) AS prob
FROM submission_entity_relation s
JOIN submission s_ ON (s_.id = s.submission_id AND s_.corpus_tag = %(corpus_tag)s)
JOIN _counts c ON (s.submission_id = c.submission_id AND c.subject_entity = s.subject_entity)
JOIN _entity_counts ec ON (s.submission_id = ec.submission_id)
{where}
""".format(where=where), corpus_tag=corpus_tag, submission_id=submission_id):
distribution[row.submission_id][row.doc_id, (row.subject.lower, row.subject.upper), (row.object.lower, row.object.upper)] = float(row.prob)
return distribution
def test_submission_entity():
tag = 'kbp2016'
Ps = submission_entity(tag)
for submission in get_submissions(tag):
Z = sum(Ps[submission.id].values())
assert abs(Z - 1.0) < 1.e-5, "Distribution for {} is not normalized: Z = {}".format(submission.id, Z)
def test_submission_entity_by_id():
tag = 'kbp2016'
submission = get_submissions(tag)[0]
Ps = submission_entity(tag, submission.id)
assert len(Ps) == 1 and submission.id in Ps
P = Ps[submission.id]
Z = sum(P.values())
assert abs(Z - 1.0) < 1.e-5, "Distribution for {} is not normalized: Z = {}".format(submission.id, Z)
def submission_entity_relation(corpus_tag, submission_id=None):
if submission_id is not None:
assert get_submission(submission_id).corpus_tag == corpus_tag, "Submission {} is not on corpus {}".format(submission_id, corpus_tag)
where = "WHERE s.submission_id = %(submission_id)s"
else:
where = ""
distribution = defaultdict(Counter)
for row in db.select("""
SELECT s.submission_id, s.doc_id, s.subject, s.object, s.subject_entity, (erc.count/ec.count)/(rc.count) AS likelihood
FROM submission_entity_relation s
JOIN submission s_ ON (s_.id = s.submission_id AND s_.corpus_tag = %(corpus_tag)s)
JOIN submission_relation_counts rc ON (s.submission_id = rc.submission_id AND rc.relation = s.relation)
JOIN submission_entity_counts ec ON (s.submission_id = ec.submission_id AND ec.subject_entity = s.subject_entity)
JOIN submission_entity_relation_counts erc ON (s.submission_id = erc.submission_id AND erc.subject_entity = s.subject_entity)
{where}
""".format(where=where), corpus_tag=corpus_tag, submission_id=submission_id):
distribution[row.submission_id][row.doc_id, (row.subject.lower, row.subject.upper), (row.object.lower, row.object.upper)] = float(row.likelihood)
for submission_id in distribution:
distribution[submission_id] = normalize(distribution[submission_id])
return distribution
def test_submission_entity_relation():
tag = 'kbp2016'
Ps = submission_entity_relation(tag)
for submission in get_submissions(tag):
Z = sum(Ps[submission.id].values())
assert abs(Z - 1.0) < 1.e-5, "Distribution for {} is not normalized: Z = {}".format(submission.id, Z)
def test_submission_entity_relation_by_id():
tag = 'kbp2016'
submission = get_submissions(tag)[0]
Ps = submission_entity_relation(tag, submission.id)
assert len(Ps) == 1 and submission.id in Ps
P = Ps[submission.id]
Z = sum(P.values())
assert abs(Z - 1.0) < 1.e-5, "Distribution for {} is not normalized: Z = {}".format(submission.id, Z)
## Obtaining samples from database.
def Y0(corpus_tag, submission_id=None):
"""
Use the document_sample table to get which documents have been exhaustively sampled.
"""
if submission_id is not None:
assert get_submission(submission_id).corpus_tag == corpus_tag, "Submission {} is not on corpus {}".format(submission_id, corpus_tag)
where = "AND s.id = %(submission_id)s"
else:
where = ""
ret = defaultdict(list)
# NOTE: This is perfectly OK to do, BECAUSE it is the exhaustive
# annotation.
rows = db.select("""
SELECT s.id AS submission_id, r.doc_id, r.subject, r.object, COALESCE(s_.correct, FALSE) AS gx
FROM submission s
JOIN document_sample d ON (true)
JOIN document_tag t ON (d.doc_id = t.doc_id AND t.tag = %(corpus_tag)s)
JOIN evaluation_relation r ON (d.doc_id = r.doc_id)
LEFT JOIN submission_entries s_ ON (s.id = submission_id AND r.doc_id = s_.doc_id AND r.subject = s_.subject AND r.object = s_.object)
WHERE s.corpus_tag = %(corpus_tag)s {where}
ORDER BY s.id, r.doc_id, r.subject, r.object
""".format(where=where), corpus_tag=corpus_tag, submission_id=submission_id)
for row in rows:
ret[row.submission_id].append(((row.doc_id, (row.subject.lower, row.subject.upper), (row.object.lower, row.object.upper)), row.gx))
return ret
def test_Y0():
corpus_tag = 'kbp2016'
Y0_ = Y0(corpus_tag)
for Y in Y0_.values():
assert len(Y) == 926
for Ys in zip(Y0_.values()):
assert len(set(y[0] for y in Ys)) == 1
def test_Y0_by_id():
corpus_tag = 'kbp2016'
submission_id = 1
Y = Y0(corpus_tag, submission_id)[submission_id]
assert len(Y) == 926
def Xh(corpus_tag, distribution_type, submission_id = None):
if submission_id is not None:
assert get_submission(submission_id).corpus_tag == corpus_tag, "Submission {} is not on corpus {}".format(submission_id, corpus_tag)
where = "AND b.submission_id = %(submission_id)s"
else:
where = ""
ret = defaultdict(list)
rows = db.select("""
SELECT b.submission_id, d.doc_id, d.subject, d.object, s.correct AS fx
FROM sample_batch b
JOIN submission_sample d ON (b.id = d.batch_id)
JOIN submission_entries s ON (d.doc_id = s.doc_id AND d.subject = s.subject AND d.object = s.object AND b.submission_id = s.submission_id)
WHERE b.distribution_type = %(distribution_type)s {where}
ORDER BY d.doc_id, d.subject, d.object
""".format(where=where), submission_id=submission_id, distribution_type=distribution_type)
for row in rows:
ret[row.submission_id].append(((row.doc_id, (row.subject.lower, row.subject.upper), (row.object.lower, row.object.upper)), row.fx))
return ret
def test_Xhs():
corpus_tag = 'kbp2016'
distribution_type = "relation"
submission_id = 1
Xhs = Xh(corpus_tag, distribution_type)
assert len(Xhs) == 3
# TODO: KNOWN BUG
# something is wrong in how we ended up turking output
assert len(Xhs[submission_id]) == 255
def test_Xhs_by_id():
corpus_tag = 'kbp2016'
distribution_type = "relation"
submission_id = 1
Xh_ = Xh(corpus_tag, distribution_type, submission_id)[submission_id]
# TODO: KNOWN BUG
# something is wrong in how we ended up turking output
assert len(Xh_) == 255
|
|
"""Config flow for HomeKit integration."""
import random
import string
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
CONF_DOMAINS,
CONF_ENTITIES,
CONF_ENTITY_ID,
CONF_NAME,
CONF_PORT,
)
from homeassistant.core import callback, split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import (
CONF_EXCLUDE_DOMAINS,
CONF_EXCLUDE_ENTITIES,
CONF_INCLUDE_DOMAINS,
CONF_INCLUDE_ENTITIES,
)
from .const import (
CONF_AUTO_START,
CONF_ENTITY_CONFIG,
CONF_FILTER,
CONF_HOMEKIT_MODE,
CONF_VIDEO_CODEC,
DEFAULT_AUTO_START,
DEFAULT_CONFIG_FLOW_PORT,
DEFAULT_HOMEKIT_MODE,
HOMEKIT_MODE_ACCESSORY,
HOMEKIT_MODES,
SHORT_ACCESSORY_NAME,
SHORT_BRIDGE_NAME,
VIDEO_CODEC_COPY,
)
from .const import DOMAIN # pylint:disable=unused-import
from .util import async_find_next_available_port
CONF_CAMERA_COPY = "camera_copy"
CONF_INCLUDE_EXCLUDE_MODE = "include_exclude_mode"
MODE_INCLUDE = "include"
MODE_EXCLUDE = "exclude"
INCLUDE_EXCLUDE_MODES = [MODE_EXCLUDE, MODE_INCLUDE]
SUPPORTED_DOMAINS = [
"alarm_control_panel",
"automation",
"binary_sensor",
"camera",
"climate",
"cover",
"demo",
"device_tracker",
"fan",
"humidifier",
"input_boolean",
"light",
"lock",
"media_player",
"person",
"remote",
"scene",
"script",
"sensor",
"switch",
"vacuum",
"water_heater",
]
DEFAULT_DOMAINS = [
"alarm_control_panel",
"climate",
"cover",
"humidifier",
"fan",
"light",
"lock",
"media_player",
"switch",
"vacuum",
"water_heater",
]
DOMAINS_PREFER_ACCESSORY_MODE = ["camera", "media_player"]
CAMERA_DOMAIN = "camera"
CAMERA_ENTITY_PREFIX = f"{CAMERA_DOMAIN}."
_EMPTY_ENTITY_FILTER = {
CONF_INCLUDE_DOMAINS: [],
CONF_EXCLUDE_DOMAINS: [],
CONF_INCLUDE_ENTITIES: [],
CONF_EXCLUDE_ENTITIES: [],
}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for HomeKit."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize config flow."""
self.hk_data = {}
self.entry_title = None
async def async_step_accessory_mode(self, user_input=None):
"""Choose specific entity in accessory mode."""
if user_input is not None:
entity_id = user_input[CONF_ENTITY_ID]
entity_filter = _EMPTY_ENTITY_FILTER.copy()
entity_filter[CONF_INCLUDE_ENTITIES] = [entity_id]
self.hk_data[CONF_FILTER] = entity_filter
if entity_id.startswith(CAMERA_ENTITY_PREFIX):
self.hk_data[CONF_ENTITY_CONFIG] = {
entity_id: {CONF_VIDEO_CODEC: VIDEO_CODEC_COPY}
}
return await self.async_step_pairing()
all_supported_entities = _async_get_matching_entities(
self.hass, domains=DOMAINS_PREFER_ACCESSORY_MODE
)
return self.async_show_form(
step_id="accessory_mode",
data_schema=vol.Schema(
{vol.Required(CONF_ENTITY_ID): vol.In(all_supported_entities)}
),
)
async def async_step_bridge_mode(self, user_input=None):
"""Choose specific domains in bridge mode."""
if user_input is not None:
entity_filter = _EMPTY_ENTITY_FILTER.copy()
entity_filter[CONF_INCLUDE_DOMAINS] = user_input[CONF_INCLUDE_DOMAINS]
self.hk_data[CONF_FILTER] = entity_filter
return await self.async_step_pairing()
default_domains = [] if self._async_current_names() else DEFAULT_DOMAINS
return self.async_show_form(
step_id="bridge_mode",
data_schema=vol.Schema(
{
vol.Required(
CONF_INCLUDE_DOMAINS, default=default_domains
): cv.multi_select(SUPPORTED_DOMAINS),
}
),
)
async def async_step_pairing(self, user_input=None):
"""Pairing instructions."""
if user_input is not None:
return self.async_create_entry(title=self.entry_title, data=self.hk_data)
self.hk_data[CONF_PORT] = await async_find_next_available_port(
self.hass, DEFAULT_CONFIG_FLOW_PORT
)
self.hk_data[CONF_NAME] = self._async_available_name(
self.hk_data[CONF_HOMEKIT_MODE]
)
self.entry_title = f"{self.hk_data[CONF_NAME]}:{self.hk_data[CONF_PORT]}"
return self.async_show_form(
step_id="pairing",
description_placeholders={CONF_NAME: self.hk_data[CONF_NAME]},
)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
self.hk_data = {
CONF_HOMEKIT_MODE: user_input[CONF_HOMEKIT_MODE],
}
if user_input[CONF_HOMEKIT_MODE] == HOMEKIT_MODE_ACCESSORY:
return await self.async_step_accessory_mode()
return await self.async_step_bridge_mode()
homekit_mode = self.hk_data.get(CONF_HOMEKIT_MODE, DEFAULT_HOMEKIT_MODE)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOMEKIT_MODE, default=homekit_mode): vol.In(
HOMEKIT_MODES
)
}
),
errors=errors,
)
async def async_step_import(self, user_input=None):
"""Handle import from yaml."""
if not self._async_is_unique_name_port(user_input):
return self.async_abort(reason="port_name_in_use")
return self.async_create_entry(
title=f"{user_input[CONF_NAME]}:{user_input[CONF_PORT]}", data=user_input
)
@callback
def _async_current_names(self):
"""Return a set of bridge names."""
return {
entry.data[CONF_NAME]
for entry in self._async_current_entries()
if CONF_NAME in entry.data
}
@callback
def _async_available_name(self, homekit_mode):
"""Return an available for the bridge."""
base_name = SHORT_BRIDGE_NAME
if homekit_mode == HOMEKIT_MODE_ACCESSORY:
base_name = SHORT_ACCESSORY_NAME
# We always pick a RANDOM name to avoid Zeroconf
# name collisions. If the name has been seen before
# pairing will probably fail.
acceptable_chars = string.ascii_uppercase + string.digits
suggested_name = None
while not suggested_name or suggested_name in self._async_current_names():
trailer = "".join(random.choices(acceptable_chars, k=4))
suggested_name = f"{base_name} {trailer}"
return suggested_name
@callback
def _async_is_unique_name_port(self, user_input):
"""Determine is a name or port is already used."""
name = user_input[CONF_NAME]
port = user_input[CONF_PORT]
for entry in self._async_current_entries():
if entry.data[CONF_NAME] == name or entry.data[CONF_PORT] == port:
return False
return True
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for homekit."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
self.hk_options = {}
self.included_cameras = set()
async def async_step_yaml(self, user_input=None):
"""No options for yaml managed entries."""
if user_input is not None:
# Apparently not possible to abort an options flow
# at the moment
return self.async_create_entry(title="", data=self.config_entry.options)
return self.async_show_form(step_id="yaml")
async def async_step_advanced(self, user_input=None):
"""Choose advanced options."""
if not self.show_advanced_options or user_input is not None:
if user_input:
self.hk_options.update(user_input)
self.hk_options[CONF_AUTO_START] = self.hk_options.get(
CONF_AUTO_START, DEFAULT_AUTO_START
)
for key in (CONF_DOMAINS, CONF_ENTITIES):
if key in self.hk_options:
del self.hk_options[key]
return self.async_create_entry(title="", data=self.hk_options)
return self.async_show_form(
step_id="advanced",
data_schema=vol.Schema(
{
vol.Optional(
CONF_AUTO_START,
default=self.hk_options.get(
CONF_AUTO_START, DEFAULT_AUTO_START
),
): bool
}
),
)
async def async_step_cameras(self, user_input=None):
"""Choose camera config."""
if user_input is not None:
entity_config = self.hk_options[CONF_ENTITY_CONFIG]
for entity_id in self.included_cameras:
if entity_id in user_input[CONF_CAMERA_COPY]:
entity_config.setdefault(entity_id, {})[
CONF_VIDEO_CODEC
] = VIDEO_CODEC_COPY
elif (
entity_id in entity_config
and CONF_VIDEO_CODEC in entity_config[entity_id]
):
del entity_config[entity_id][CONF_VIDEO_CODEC]
return await self.async_step_advanced()
cameras_with_copy = []
entity_config = self.hk_options.setdefault(CONF_ENTITY_CONFIG, {})
for entity in self.included_cameras:
hk_entity_config = entity_config.get(entity, {})
if hk_entity_config.get(CONF_VIDEO_CODEC) == VIDEO_CODEC_COPY:
cameras_with_copy.append(entity)
data_schema = vol.Schema(
{
vol.Optional(
CONF_CAMERA_COPY,
default=cameras_with_copy,
): cv.multi_select(self.included_cameras),
}
)
return self.async_show_form(step_id="cameras", data_schema=data_schema)
async def async_step_include_exclude(self, user_input=None):
"""Choose entities to include or exclude from the domain."""
if user_input is not None:
entity_filter = _EMPTY_ENTITY_FILTER.copy()
if isinstance(user_input[CONF_ENTITIES], list):
entities = user_input[CONF_ENTITIES]
else:
entities = [user_input[CONF_ENTITIES]]
if (
self.hk_options[CONF_HOMEKIT_MODE] == HOMEKIT_MODE_ACCESSORY
or user_input[CONF_INCLUDE_EXCLUDE_MODE] == MODE_INCLUDE
):
entity_filter[CONF_INCLUDE_ENTITIES] = entities
# Include all of the domain if there are no entities
# explicitly included as the user selected the domain
domains_with_entities_selected = _domains_set_from_entities(entities)
entity_filter[CONF_INCLUDE_DOMAINS] = [
domain
for domain in self.hk_options[CONF_DOMAINS]
if domain not in domains_with_entities_selected
]
self.included_cameras = {
entity_id
for entity_id in entities
if entity_id.startswith(CAMERA_ENTITY_PREFIX)
}
else:
entity_filter[CONF_INCLUDE_DOMAINS] = self.hk_options[CONF_DOMAINS]
entity_filter[CONF_EXCLUDE_ENTITIES] = entities
if CAMERA_DOMAIN in entity_filter[CONF_INCLUDE_DOMAINS]:
camera_entities = _async_get_matching_entities(
self.hass,
domains=[CAMERA_DOMAIN],
)
self.included_cameras = {
entity_id
for entity_id in camera_entities
if entity_id not in entities
}
else:
self.included_cameras = set()
self.hk_options[CONF_FILTER] = entity_filter
if self.included_cameras:
return await self.async_step_cameras()
return await self.async_step_advanced()
entity_filter = self.hk_options.get(CONF_FILTER, {})
all_supported_entities = _async_get_matching_entities(
self.hass,
domains=self.hk_options[CONF_DOMAINS],
)
data_schema = {}
entities = entity_filter.get(CONF_INCLUDE_ENTITIES, [])
if self.hk_options[CONF_HOMEKIT_MODE] == HOMEKIT_MODE_ACCESSORY:
entity_schema = vol.In
else:
if entities:
include_exclude_mode = MODE_INCLUDE
else:
include_exclude_mode = MODE_EXCLUDE
entities = entity_filter.get(CONF_EXCLUDE_ENTITIES, [])
data_schema[
vol.Required(CONF_INCLUDE_EXCLUDE_MODE, default=include_exclude_mode)
] = vol.In(INCLUDE_EXCLUDE_MODES)
entity_schema = cv.multi_select
data_schema[vol.Optional(CONF_ENTITIES, default=entities)] = entity_schema(
all_supported_entities
)
return self.async_show_form(
step_id="include_exclude", data_schema=vol.Schema(data_schema)
)
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if self.config_entry.source == SOURCE_IMPORT:
return await self.async_step_yaml(user_input)
if user_input is not None:
self.hk_options.update(user_input)
return await self.async_step_include_exclude()
self.hk_options = dict(self.config_entry.options)
entity_filter = self.hk_options.get(CONF_FILTER, {})
homekit_mode = self.hk_options.get(CONF_HOMEKIT_MODE, DEFAULT_HOMEKIT_MODE)
domains = entity_filter.get(CONF_INCLUDE_DOMAINS, [])
include_entities = entity_filter.get(CONF_INCLUDE_ENTITIES)
if include_entities:
domains.extend(_domains_set_from_entities(include_entities))
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(CONF_HOMEKIT_MODE, default=homekit_mode): vol.In(
HOMEKIT_MODES
),
vol.Required(
CONF_DOMAINS,
default=domains,
): cv.multi_select(SUPPORTED_DOMAINS),
}
),
)
def _async_get_matching_entities(hass, domains=None):
"""Fetch all entities or entities in the given domains."""
return {
state.entity_id: f"{state.entity_id} ({state.attributes.get(ATTR_FRIENDLY_NAME, state.entity_id)})"
for state in sorted(
hass.states.async_all(domains and set(domains)),
key=lambda item: item.entity_id,
)
}
def _domains_set_from_entities(entity_ids):
"""Build a set of domains for the given entity ids."""
domains = set()
for entity_id in entity_ids:
domains.add(split_entity_id(entity_id)[0])
return domains
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import testscenarios
from oslo import messaging
from oslo.messaging._drivers import common as exceptions
from oslo.messaging.openstack.common import jsonutils
from tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
class NovaStyleException(Exception):
format = 'I am Nova'
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
message = self.format % kwargs
super(NovaStyleException, self).__init__(message)
class KwargsStyleException(NovaStyleException):
format = 'I am %(who)s'
def add_remote_postfix(ex):
ex_type = type(ex)
message = str(ex)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
{'__str__': str_override,
'__unicode__': str_override})
new_ex_type.__module__ = '%s_Remote' % ex.__class__.__module__
try:
ex.__class__ = new_ex_type
except TypeError:
ex.args = (message,) + ex.args[1:]
return ex
class SerializeRemoteExceptionTestCase(test_utils.BaseTestCase):
_log_failure = [
('log_failure', dict(log_failure=True)),
('do_not_log_failure', dict(log_failure=False)),
]
_add_remote = [
('add_remote', dict(add_remote=True)),
('do_not_add_remote', dict(add_remote=False)),
]
_exception_types = [
('bog_standard', dict(cls=Exception,
args=['test'],
kwargs={},
clsname='Exception',
modname='exceptions',
msg='test')),
('nova_style', dict(cls=NovaStyleException,
args=[],
kwargs={},
clsname='NovaStyleException',
modname=__name__,
msg='I am Nova')),
('nova_style_with_msg', dict(cls=NovaStyleException,
args=['testing'],
kwargs={},
clsname='NovaStyleException',
modname=__name__,
msg='testing')),
('kwargs_style', dict(cls=KwargsStyleException,
args=[],
kwargs={'who': 'Oslo'},
clsname='KwargsStyleException',
modname=__name__,
msg='I am Oslo')),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._log_failure,
cls._add_remote,
cls._exception_types)
def setUp(self):
super(SerializeRemoteExceptionTestCase, self).setUp()
def test_serialize_remote_exception(self):
errors = []
def stub_error(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
errors.append(str(msg) % a)
self.stubs.Set(exceptions.LOG, 'error', stub_error)
try:
try:
raise self.cls(*self.args, **self.kwargs)
except Exception as ex:
if self.add_remote:
ex = add_remote_postfix(ex)
raise ex
except Exception:
exc_info = sys.exc_info()
serialized = exceptions.serialize_remote_exception(
exc_info, log_failure=self.log_failure)
failure = jsonutils.loads(serialized)
self.assertEqual(failure['class'], self.clsname, failure)
self.assertEqual(failure['module'], self.modname)
self.assertEqual(failure['message'], self.msg)
self.assertEqual(failure['args'], [self.msg])
self.assertEqual(failure['kwargs'], self.kwargs)
# Note: _Remote prefix not stripped from tracebacks
tb = ex.__class__.__name__ + ': ' + self.msg
self.assertIn(tb, ''.join(failure['tb']))
if self.log_failure:
self.assertTrue(len(errors) > 0, errors)
else:
self.assertEqual(len(errors), 0, errors)
SerializeRemoteExceptionTestCase.generate_scenarios()
class DeserializeRemoteExceptionTestCase(test_utils.BaseTestCase):
_standard_allowed = [__name__]
scenarios = [
('bog_standard',
dict(allowed=_standard_allowed,
clsname='Exception',
modname='exceptions',
cls=Exception,
args=['test'],
kwargs={},
str='test\ntraceback\ntraceback\n',
message='test',
remote_name='Exception',
remote_args=('test\ntraceback\ntraceback\n', ),
remote_kwargs={})),
('nova_style',
dict(allowed=_standard_allowed,
clsname='NovaStyleException',
modname=__name__,
cls=NovaStyleException,
args=[],
kwargs={},
str='test\ntraceback\ntraceback\n',
message='I am Nova',
remote_name='NovaStyleException_Remote',
remote_args=('I am Nova', ),
remote_kwargs={})),
('nova_style_with_msg',
dict(allowed=_standard_allowed,
clsname='NovaStyleException',
modname=__name__,
cls=NovaStyleException,
args=['testing'],
kwargs={},
str='test\ntraceback\ntraceback\n',
message='testing',
remote_name='NovaStyleException_Remote',
remote_args=('testing', ),
remote_kwargs={})),
('kwargs_style',
dict(allowed=_standard_allowed,
clsname='KwargsStyleException',
modname=__name__,
cls=KwargsStyleException,
args=[],
kwargs={'who': 'Oslo'},
str='test\ntraceback\ntraceback\n',
message='I am Oslo',
remote_name='KwargsStyleException_Remote',
remote_args=('I am Oslo', ),
remote_kwargs={})),
('not_allowed',
dict(allowed=[],
clsname='NovaStyleException',
modname=__name__,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: NovaStyleException test\n"
"[u'traceback\\ntraceback\\n']."),
msg=("Remote error: NovaStyleException test\n"
"[u'traceback\\ntraceback\\n']."),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'NovaStyleException',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_module',
dict(allowed=['notexist'],
clsname='Exception',
modname='notexist',
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: Exception test\n"
"[u'traceback\\ntraceback\\n']."),
msg=("Remote error: Exception test\n"
"[u'traceback\\ntraceback\\n']."),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'Exception',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_exception',
dict(allowed=[],
clsname='FarcicalError',
modname='exceptions',
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: FarcicalError test\n"
"[u'traceback\\ntraceback\\n']."),
msg=("Remote error: FarcicalError test\n"
"[u'traceback\\ntraceback\\n']."),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'FarcicalError',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_kwarg',
dict(allowed=[],
clsname='Exception',
modname='exceptions',
cls=messaging.RemoteError,
args=[],
kwargs={'foobar': 'blaa'},
str=("Remote error: Exception test\n"
"[u'traceback\\ntraceback\\n']."),
msg=("Remote error: Exception test\n"
"[u'traceback\\ntraceback\\n']."),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'Exception',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('system_exit',
dict(allowed=[],
clsname='SystemExit',
modname='exceptions',
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: SystemExit test\n"
"[u'traceback\\ntraceback\\n']."),
msg=("Remote error: SystemExit test\n"
"[u'traceback\\ntraceback\\n']."),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'SystemExit',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
]
def test_deserialize_remote_exception(self):
failure = {
'class': self.clsname,
'module': self.modname,
'message': 'test',
'tb': ['traceback\ntraceback\n'],
'args': self.args,
'kwargs': self.kwargs,
}
serialized = jsonutils.dumps(failure)
ex = exceptions.deserialize_remote_exception(serialized, self.allowed)
self.assertIsInstance(ex, self.cls)
self.assertEqual(ex.__class__.__name__, self.remote_name)
self.assertEqual(str(ex), self.str)
if hasattr(self, 'msg'):
self.assertEqual(ex.msg, self.msg)
else:
self.assertEqual(ex.message, self.message)
self.assertEqual(ex.args, self.remote_args)
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Modules for RAG.
"""
import torch
import torch.cuda
import torch.nn
import torch.nn.functional as F
from typing import Any, Tuple, Dict, Optional, List, Union, Type
from parlai.agents.hugging_face.t5 import (
ParlaiT5Encoder,
ParlaiT5Decoder,
build_t5,
set_device,
)
from parlai.agents.transformer.modules import (
TransformerEncoder,
TransformerDecoder,
get_n_positions_from_options,
create_embeddings,
)
from parlai.core.dict import DictionaryAgent
from parlai.core.opt import Opt
from parlai.core.torch_generator_agent import TorchGeneratorModel
from parlai.utils.torch import padded_tensor
from parlai.agents.rag.retrievers import retriever_factory, Document
class RagModel(TorchGeneratorModel):
"""
RagModel.
The RagModel operates in the following phases:
1) retrieve: given a tokenized query, return relevant documents
2) expand: given queries and documents, expand the inputs n_docs times,
concatenating each document with a relevant context
3) encode: given expanded input, encode into encoder representations
4) decoding: given encoder outputs, compute n_docs decoder representations for
each batch item.
5) marginalize: given the decoded representations, marginalize over the documents
appropriately.
The RagModel overloads the `encoder` and `decoder` attributes of your standard
`TorchGeneratorModel` to accomplish the five phases above.
"""
def __init__(self, opt, dictionary, retriever_shared=None):
from parlai.agents.rag.rag import RAG_MODELS
self.pad_idx = dictionary[dictionary.null_token]
self.start_idx = dictionary[dictionary.start_token]
self.end_idx = dictionary[dictionary.end_token]
super().__init__(self.pad_idx, self.start_idx, self.end_idx)
self.fp16 = (
not opt['no_cuda'] and torch.cuda.is_available() and opt.get('fp16', False)
)
self.dict = dictionary
self.embeddings = create_embeddings(
dictionary, opt['embedding_size'], self.pad_idx
)
# attrs
self.rag_model_type = opt['rag_model_type']
self._rag_model_interface = RAG_MODELS[self.rag_model_type](opt, self.pad_idx)
self.generation_model = opt['generation_model']
self.n_extra_positions = opt['n_extra_positions']
self.n_positions = get_n_positions_from_options(opt) + opt['n_extra_positions']
assert opt['n_extra_positions'] >= 0
self.expanded_input_truncate = min(
opt['text_truncate'] or opt['truncate'], get_n_positions_from_options(opt)
)
if self.n_extra_positions > 0:
# This attribute is overloaded.
# when n_extra_positions == 0, it is the truncation of the full expanded input
# when >0, it is the maximum length of the knowledge tokens.
self.expanded_input_truncate = self.n_extra_positions
self.min_doc_token_length = opt['min_doc_token_length']
# modules
self.retriever = retriever_factory(opt, dictionary, shared=retriever_shared)
self.seq2seq_encoder = self.build_encoder(
opt,
dictionary=dictionary,
embedding=self.embeddings,
padding_idx=self.pad_idx,
)
self.seq2seq_decoder = self.build_decoder(
opt, embedding=self.embeddings, padding_idx=self.pad_idx
)
@classmethod
def build_encoder(
cls,
opt: Opt,
*args,
dictionary: Optional[DictionaryAgent] = None,
embedding: Optional[torch.nn.Embedding] = None,
encoder_class: Optional[Type] = None,
**kwargs,
):
if encoder_class is None:
assert dictionary is not None
return RagEncoder(
opt=opt, dictionary=dictionary, embedding=embedding, **kwargs
)
else:
return encoder_class(opt, *args, **kwargs)
@classmethod
def build_decoder(
cls,
opt: Opt,
*args,
embedding: Optional[torch.nn.Embedding] = None,
n_positions: Optional[int] = None,
decoder_class: Optional[Type] = None,
**kwargs,
):
if decoder_class is None:
return RagDecoder(opt=opt, embedding=embedding, n_positions=n_positions)
else:
return decoder_class(opt, *args, **kwargs)
def tokenize_query(self, query: str) -> List[int]:
"""
Tokenize the query for the retriever.
"""
return self.retriever.tokenize_query(query)
def get_retriever_delimiter(self) -> str:
"""
Return the retriever's delimiter.
"""
return self.retriever.get_delimiter()
def encoder(
self,
input: torch.LongTensor,
input_lengths: torch.LongTensor,
query_vec: torch.LongTensor,
input_turns_cnt: torch.LongTensor,
positions: Optional[torch.LongTensor] = None,
segments: Optional[torch.LongTensor] = None,
) -> Tuple[
torch.Tensor,
torch.BoolTensor,
Optional[torch.LongTensor],
Optional[List[List[Document]]],
Optional[torch.Tensor],
]:
"""
Retrieve documents and expand input via concatenation.
Then, encode as usual in the seq2seq encoder.
:param input:
2D [bsz, seqlen] input to the encoder
:param input_lengths:
1D [bsz] lengths of each input item
:param query_vec:
2D [bsz*n_turns, seqlen] input for the retriever
:param input_turns_cnt:
1D [bsz] number of dialogue turns for each input example
:return (encoder_out, encoder_mask, input_turns_cnt, top_docs, top_doc_scores):
encoder_out: encoded representations of context/document pairs
encoder_mask: mask for enc_out
input_turns_cnt: pass along the input turns count for the decoder
top_docs: List of top Documents for each batch example
top_doc_scores: scores for each retrieved document.
"""
# Retrieve, get expanded input
if all([tensor is not None for tensor in [input_lengths, query_vec]]):
expanded_input, top_docs, top_doc_scores = self.retrieve_and_concat(
input, input_lengths, query_vec, input_turns_cnt
)
else:
expanded_input = input
top_docs = top_doc_scores = None
# Run through seq2seq encoder
tensor, mask = self.seq2seq_encoder(
expanded_input, positions, segments
) # type: ignore
return tensor, mask, input_turns_cnt, top_docs, top_doc_scores
def decoder(
self,
input: torch.LongTensor,
encoder_state: Tuple[Any, ...],
incr_state: Optional[Dict[str, Any]] = None,
) -> Tuple[torch.Tensor, Optional[Dict[str, Any]]]:
"""
Decode, RAG-Style.
Obtain decoder representations as usual, then marginalize appropriately.
:param input:
input for the decoder
:param encoder_state:
RAG encoder states
:param incr_state:
incremental decoder state
:return (output, new_incr_state):
return the output token distribution, as well as new incremental state.
"""
# 1. Get decoder outputs
enc_out, enc_mask, input_turns_cnt, docs, doc_scores = encoder_state
dec_out, new_incr_state = self.seq2seq_decoder(
input, (enc_out, enc_mask), incr_state
) # type: ignore
dec_out = self.decoder_output(dec_out)
if all([obj is not None for obj in [docs, doc_scores]]):
# 2. Get logprobs
n_docs = doc_scores.size(1)
out_probs = F.log_softmax(
dec_out, dim=-1, dtype=torch.float32 # type: ignore
).view(
input.shape[0] // n_docs, n_docs, -1, dec_out.size(-1)
) # [bsz * beam_size, n_docs, input_len, esz]
# 3. Marginalize
marginalized = self._rag_model_interface.marginalize(
out_probs, F.log_softmax(doc_scores, dim=1), input_turns_cnt
)
else:
# With RAG Sequence Generation, we do not marginalize over documents.
marginalized = dec_out
return marginalized, new_incr_state
def seq2seq_forward_pass(
self, xs: torch.LongTensor, ys: torch.LongTensor
) -> Tuple[torch.Tensor, torch.Tensor, Tuple[Any, ...]]:
"""
Simulate a standard seq2seq encoder/decoder forward pass.
Used in thorough decoding.
:param xs:
input tokens
:param ys:
teacher forced decoder outputs
:return (logits, preds, encoder_states):
logits: token output distribution
preds: max probability token at each output position
encoder_states: output states from the encoder
"""
encoder_states = self.seq2seq_encoder(xs) # type: ignore
bsz = ys.size(0)
seqlen = ys.size(1)
inputs = ys.narrow(1, 0, seqlen - 1)
dec_inputs = self._rag_model_interface.get_initial_forced_decoder_input(
bsz,
inputs,
n_docs=1,
start_idx=self.START_IDX,
end_idx=self.END_IDX,
input_turns_cnt=None,
)
latent, _ = self.seq2seq_decoder(
dec_inputs, encoder_states, None
) # type: ignore
logits = self.decoder_output(latent)
_, preds = logits.max(dim=-1)
return logits, preds, encoder_states
def decoder_output(self, latent: torch.Tensor) -> torch.Tensor:
"""
Output layer for the decoder; maps latent state to token distributions.
:param latent:
final representations from last decoder layer.
:return logits:
return output distribution over tokens.
"""
return F.linear(latent, self.embeddings.weight)
def retrieve_and_concat(
self,
input: torch.LongTensor,
input_lengths: torch.LongTensor,
query_vec: torch.LongTensor,
input_turns_cnt: torch.LongTensor,
) -> Tuple[torch.LongTensor, List[List[Document]], torch.Tensor]:
"""
Retrieve documents, concat with input.
:param input:
2D [bsz, seqlen] input to the encoder
:param input_lengths:
1D [bsz] lengths of each input item
:param query_vec:
2D [bsz*n_turns, seqlen] input for the retriever
:param input_turns_cnt:
1D [bsz] number of dialogue turns for each input example
:return (expanded_input, top_docs, top_doc_scores):
expanded_input: [bsz * n_docs, seqlen+doc_len] tensor of context/document inputs
top_docs: List of top documents for each input
top_doc_scores: document scores for each document
"""
# 1. Retrieve
top_docs, top_doc_scores = self.retriever.retrieve(query_vec)
# 2. Expand the input
if input_turns_cnt is not None:
input = input.repeat_interleave(input_turns_cnt, dim=0) # type: ignore
input_lengths = input_lengths.repeat_interleave(
input_turns_cnt, dim=0
) # type: ignore
expanded_input = self.concat_docs_and_input(
input, input_lengths, top_docs, top_doc_scores.size(1)
)
return expanded_input, top_docs, top_doc_scores
def concat_docs_and_input(
self,
input: torch.LongTensor,
input_lengths: torch.LongTensor,
top_docs: List[List[Document]],
max_num_docs: int,
right_padded: bool = True,
) -> torch.LongTensor:
"""
Add document tokens to input tokens.
:param input:
original input tokens
:param input_lengths:
original input lengths
:param top_docs:
list of n_docs top documents for each input sequence
:param max_num_docs:
maximum number of docs out of all examples
:param right_padded:
whether the input is right padded.
:return (tokens, lengths):
return expanded token vectors & corresponding lengths
"""
max_len = self.expanded_input_truncate
expanded_input = []
for i, docs in enumerate(top_docs):
for rank in range(len(docs)):
input_i = input[i, :]
doc = docs[rank]
doc_tokens = self.dict.txt2vec(doc.get_passage_str())
if self.generation_model == 'bart' and self.n_extra_positions <= 0:
# move SOS to start of passage since we append question to end
input_i = input_i[1:]
sample_doc_tokens = torch.LongTensor(
[self.start_idx] + doc_tokens
).to(input)
else:
sample_doc_tokens = torch.LongTensor(doc_tokens).to(input)
if self.n_extra_positions <= 0:
# Prepend document to text
input_i_len = input_lengths[i]
new_input_length = min(
self.expanded_input_truncate - self.min_doc_token_length,
input_i_len,
)
if right_padded:
input_i = input_i[input_i_len - new_input_length : input_i_len]
else:
input_i = input_i[input_i.size(0) - new_input_length :]
doc_max_len = max(max_len - len(input_i), 0)
sample_doc_tokens = sample_doc_tokens[:doc_max_len]
expanded_input.append(
torch.cat([sample_doc_tokens, input_i])[:max_len]
)
else:
# Append Document to text
sample_doc_tokens = sample_doc_tokens[:max_len]
input_i_new = input_i.new(
self.n_positions - self.n_extra_positions
).fill_(self.pad_idx)
input_i_new[input_i_new.size(0) - input_i.size(0) :] = input_i
expanded_input.append(torch.cat([input_i_new, sample_doc_tokens]))
# append extra null inputs if there are diff # of docs per input
expanded_input += [
input[i, :].new(input[i, :].size()).fill_(self.pad_idx)
] * (max_num_docs - len(docs))
expanded_input, _ = padded_tensor(
expanded_input,
fp16friendly=self.fp16 and right_padded,
max_len=max_len if self.n_extra_positions <= 0 else None,
pad_idx=self.pad_idx,
left_padded=not right_padded,
)
expanded_input = expanded_input.to(input.device)
return expanded_input # type: ignore
def output(self, tensor: torch.Tensor) -> torch.Tensor:
"""
RAG "output" is already scaled in RagModel.decoder.
"""
return tensor
def reorder_encoder_states(
self,
encoder_states: Tuple[torch.Tensor, ...],
indices: Union[List[int], torch.LongTensor],
) -> Tuple[torch.Tensor, ...]:
"""
Reorder the encoder states.
Each RAG Model type prepares encoder states for generation differently.
"""
if not torch.is_tensor(indices):
indices = torch.LongTensor(indices).to(
encoder_states[0].device
) # type: ignore
return self._rag_model_interface.reorder_encoder_states(encoder_states, indices)
def reorder_decoder_incremental_state(
self,
incremental_state: Dict[str, Any],
inds: Union[List[int], torch.LongTensor],
) -> Optional[Dict[int, dict]]:
"""
TODO: Determine how to do this
"""
return self._rag_model_interface.reorder_decoder_incremental_state(
incremental_state, inds, self.seq2seq_decoder
)
def decode_forced(
self, encoder_states: Tuple[torch.Tensor, ...], ys: torch.LongTensor
) -> Tuple[torch.Tensor, torch.LongTensor]:
"""
Decode with a fixed, true sequence, computing loss.
Override TGM.decode_forced to both:
1) handle BART eos/bos issues, and
2) appropriately get forced decoder input.
:param encoder_states:
encoder output states
:param ys:
teacher forced label
:return logits, preds:
logits: output token distribution (as logits, not probs)
preds: tokens corresponding with max probs according to output distribution.
"""
bsz = ys.size(0)
seqlen = ys.size(1)
inputs = ys.narrow(1, 0, seqlen - 1)
if (ys[:, 0] == self.START_IDX).any() and self.generation_model != 'bart':
raise AssertionError(
"The Beginning of Sentence token is automatically added to the "
"label in decode_forced, but you included it in the label. This means "
"your model will have a double BOS token, which is probably not what "
"you intended."
)
doc_scores = encoder_states[-1]
inputs = self._rag_model_interface.get_initial_forced_decoder_input(
bsz,
inputs,
n_docs=doc_scores.size(1) if doc_scores is not None else None,
start_idx=self.START_IDX,
end_idx=self.END_IDX,
input_turns_cnt=encoder_states[2],
)
latent, _ = self.decoder(inputs, encoder_states)
logits = self.output(latent)
_, preds = logits.max(dim=-1)
return logits, preds # type: ignore
class RagEncoder(TransformerEncoder):
"""
Subclass TransformerEncoder to use additional positions if desired.
"""
def __init__(
self,
opt: Opt,
dictionary: DictionaryAgent,
embedding: Optional[torch.nn.Embedding] = None,
padding_idx: int = 0,
):
"""
RagEncoder initialization.
The Rag Seq2seq encoder is just a regular encoder
"""
n_init_positions = get_n_positions_from_options(opt) + opt['n_extra_positions']
super().__init__(
opt=opt,
vocabulary_size=len(dictionary),
embedding=embedding,
padding_idx=padding_idx,
reduction_type='none',
n_positions=n_init_positions,
)
class RagDecoder(TransformerDecoder):
"""
RagDecoder is a subclass of TransformerDecoder.
No further modifications necessary.
"""
pass
class T5RagModel(RagModel):
"""
T5 For RAG.
"""
def __init__(self, opt, dictionary, retriever_shared=None):
opt['t5'] = build_t5(opt)
if opt['t5_model_parallel']:
opt['t5'].parallelize()
else:
opt['t5'].deparallelize()
super().__init__(opt, dictionary, retriever_shared)
self.embedding_size = opt['t5'].model_dim
self.t5 = opt.pop('t5', None)
@classmethod
def build_encoder(
cls,
opt: Opt,
*args,
dictionary: Optional[DictionaryAgent] = None,
embedding: Optional[torch.nn.Embedding] = None,
encoder_class: Optional[Type] = None,
**kwargs,
):
return RagModel.build_encoder(
opt,
encoder=opt['t5'].get_encoder(),
encoder_class=ParlaiT5Encoder,
**kwargs,
)
@classmethod
def build_decoder(
cls,
opt: Opt,
*args,
embedding: Optional[torch.nn.Embedding] = None,
n_positions: Optional[int] = None,
decoder_class: Optional[Type] = None,
**kwargs,
):
return RagModel.build_decoder(
opt,
decoder=opt['t5'].get_decoder(),
decoder_class=ParlaiT5Decoder,
**kwargs,
)
def reorder_decoder_incremental_state(
self, incremental_state: Dict[int, dict], inds: torch.Tensor
) -> Optional[Dict[int, dict]]:
return None
@set_device
def decoder_output(self, latent: torch.Tensor):
tensor = latent * (self.t5.model_dim ** -0.5)
logits = self.t5.lm_head(tensor)
return logits
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
from sqlalchemy.orm import exc
from neutron.common import constants as q_const
from neutron.common import ipv6_utils as ipv6
from neutron.common import utils
from neutron.db import allowedaddresspairs_db as addr_pair
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import securitygroup as ext_sg
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
DIRECTION_IP_PREFIX = {'ingress': 'source_ip_prefix',
'egress': 'dest_ip_prefix'}
DHCP_RULE_PORT = {4: (67, 68, q_const.IPv4), 6: (547, 546, q_const.IPv6)}
class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin):
"""Mixin class to add agent-based security group implementation."""
def get_port_from_device(self, device):
"""Get port dict from device name on an agent.
Subclass must provide this method or get_ports_from_devices.
:param device: device name which identifies a port on the agent side.
What is specified in "device" depends on a plugin agent implementation.
For example, it is a port ID in OVS agent and netdev name in Linux
Bridge agent.
:return: port dict returned by DB plugin get_port(). In addition,
it must contain the following fields in the port dict returned.
- device
- security_groups
- security_group_rules,
- security_group_source_groups
- fixed_ips
"""
raise NotImplementedError(_("%s must implement get_port_from_device "
"or get_ports_from_devices.")
% self.__class__.__name__)
def get_ports_from_devices(self, devices):
"""Bulk method of get_port_from_device.
Subclasses may override this to provide better performance for DB
queries, backend calls, etc.
"""
return [self.get_port_from_device(device) for device in devices]
def create_security_group_rule(self, context, security_group_rule):
rule = super(SecurityGroupServerRpcMixin,
self).create_security_group_rule(context,
security_group_rule)
sgids = [rule['security_group_id']]
self.notifier.security_groups_rule_updated(context, sgids)
return rule
def create_security_group_rule_bulk(self, context, security_group_rules):
rules = super(SecurityGroupServerRpcMixin,
self).create_security_group_rule_bulk_native(
context, security_group_rules)
sgids = set([r['security_group_id'] for r in rules])
self.notifier.security_groups_rule_updated(context, list(sgids))
return rules
def delete_security_group_rule(self, context, sgrid):
rule = self.get_security_group_rule(context, sgrid)
super(SecurityGroupServerRpcMixin,
self).delete_security_group_rule(context, sgrid)
self.notifier.security_groups_rule_updated(context,
[rule['security_group_id']])
def update_security_group_on_port(self, context, id, port,
original_port, updated_port):
"""Update security groups on port.
This method returns a flag which indicates request notification
is required and does not perform notification itself.
It is because another changes for the port may require notification.
"""
need_notify = False
port_updates = port['port']
if (ext_sg.SECURITYGROUPS in port_updates and
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
port_updates[ext_sg.SECURITYGROUPS])):
# delete the port binding and read it with the new rules
port_updates[ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._delete_port_security_group_bindings(context, id)
self._process_port_create_security_group(
context,
updated_port,
port_updates[ext_sg.SECURITYGROUPS])
need_notify = True
else:
updated_port[ext_sg.SECURITYGROUPS] = (
original_port[ext_sg.SECURITYGROUPS])
return need_notify
def is_security_group_member_updated(self, context,
original_port, updated_port):
"""Check security group member updated or not.
This method returns a flag which indicates request notification
is required and does not perform notification itself.
It is because another changes for the port may require notification.
"""
need_notify = False
if (original_port['fixed_ips'] != updated_port['fixed_ips'] or
original_port['mac_address'] != updated_port['mac_address'] or
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
updated_port.get(ext_sg.SECURITYGROUPS))):
need_notify = True
return need_notify
def notify_security_groups_member_updated_bulk(self, context, ports):
"""Notify update event of security group members for ports.
The agent setups the iptables rule to allow
ingress packet from the dhcp server (as a part of provider rules),
so we need to notify an update of dhcp server ip
address to the plugin agent.
security_groups_provider_updated() just notifies that an event
occurs and the plugin agent fetches the update provider
rule in the other RPC call (security_group_rules_for_devices).
"""
security_groups_provider_updated = False
sec_groups = set()
for port in ports:
if port['device_owner'] == q_const.DEVICE_OWNER_DHCP:
security_groups_provider_updated = True
# For IPv6, provider rule need to be updated in case router
# interface is created or updated after VM port is created.
elif port['device_owner'] == q_const.DEVICE_OWNER_ROUTER_INTF:
if any(netaddr.IPAddress(fixed_ip['ip_address']).version == 6
for fixed_ip in port['fixed_ips']):
security_groups_provider_updated = True
else:
sec_groups |= set(port.get(ext_sg.SECURITYGROUPS))
if security_groups_provider_updated:
self.notifier.security_groups_provider_updated(context)
if sec_groups:
self.notifier.security_groups_member_updated(
context, list(sec_groups))
def notify_security_groups_member_updated(self, context, port):
self.notify_security_groups_member_updated_bulk(context, [port])
def security_group_info_for_ports(self, context, ports):
sg_info = {'devices': ports,
'security_groups': {},
'sg_member_ips': {}}
rules_in_db = self._select_rules_for_ports(context, ports)
remote_security_group_info = {}
for (port_id, rule_in_db) in rules_in_db:
remote_gid = rule_in_db.get('remote_group_id')
security_group_id = rule_in_db.get('security_group_id')
ethertype = rule_in_db['ethertype']
if ('security_group_source_groups'
not in sg_info['devices'][port_id]):
sg_info['devices'][port_id][
'security_group_source_groups'] = []
if remote_gid:
if (remote_gid
not in sg_info['devices'][port_id][
'security_group_source_groups']):
sg_info['devices'][port_id][
'security_group_source_groups'].append(remote_gid)
if remote_gid not in remote_security_group_info:
remote_security_group_info[remote_gid] = {}
if ethertype not in remote_security_group_info[remote_gid]:
# this set will be serialized into a list by rpc code
remote_security_group_info[remote_gid][ethertype] = set()
direction = rule_in_db['direction']
rule_dict = {
'direction': direction,
'ethertype': ethertype}
for key in ('protocol', 'port_range_min', 'port_range_max',
'remote_ip_prefix', 'remote_group_id'):
if rule_in_db.get(key):
if key == 'remote_ip_prefix':
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
rule_dict[direction_ip_prefix] = rule_in_db[key]
continue
rule_dict[key] = rule_in_db[key]
if security_group_id not in sg_info['security_groups']:
sg_info['security_groups'][security_group_id] = []
if rule_dict not in sg_info['security_groups'][security_group_id]:
sg_info['security_groups'][security_group_id].append(
rule_dict)
# Update the security groups info if they don't have any rules
sg_ids = self._select_sg_ids_for_ports(context, ports)
for (sg_id, ) in sg_ids:
if sg_id not in sg_info['security_groups']:
sg_info['security_groups'][sg_id] = []
sg_info['sg_member_ips'] = remote_security_group_info
# the provider rules do not belong to any security group, so these
# rules still reside in sg_info['devices'] [port_id]
self._apply_provider_rule(context, sg_info['devices'])
return self._get_security_group_member_ips(context, sg_info)
def _get_security_group_member_ips(self, context, sg_info):
ips = self._select_ips_for_remote_group(
context, sg_info['sg_member_ips'].keys())
for sg_id, member_ips in ips.items():
for ip in member_ips:
ethertype = 'IPv%d' % netaddr.IPNetwork(ip).version
if ethertype in sg_info['sg_member_ips'][sg_id]:
sg_info['sg_member_ips'][sg_id][ethertype].add(ip)
return sg_info
def _select_sg_ids_for_ports(self, context, ports):
if not ports:
return []
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id
query = context.session.query(sg_binding_sgid)
query = query.filter(sg_binding_port.in_(ports.keys()))
return query.all()
def _select_rules_for_ports(self, context, ports):
if not ports:
return []
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id
sgr_sgid = sg_db.SecurityGroupRule.security_group_id
query = context.session.query(sg_binding_port,
sg_db.SecurityGroupRule)
query = query.join(sg_db.SecurityGroupRule,
sgr_sgid == sg_binding_sgid)
query = query.filter(sg_binding_port.in_(ports.keys()))
return query.all()
def _select_ips_for_remote_group(self, context, remote_group_ids):
ips_by_group = {}
if not remote_group_ids:
return ips_by_group
for remote_group_id in remote_group_ids:
ips_by_group[remote_group_id] = set()
ip_port = models_v2.IPAllocation.port_id
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id
# Join the security group binding table directly to the IP allocation
# table instead of via the Port table skip an unnecessary intermediary
query = context.session.query(sg_binding_sgid,
models_v2.IPAllocation.ip_address,
addr_pair.AllowedAddressPair.ip_address)
query = query.join(models_v2.IPAllocation,
ip_port == sg_binding_port)
# Outerjoin because address pairs may be null and we still want the
# IP for the port.
query = query.outerjoin(
addr_pair.AllowedAddressPair,
sg_binding_port == addr_pair.AllowedAddressPair.port_id)
query = query.filter(sg_binding_sgid.in_(remote_group_ids))
# Each allowed address pair IP record for a port beyond the 1st
# will have a duplicate regular IP in the query response since
# the relationship is 1-to-many. Dedup with a set
for security_group_id, ip_address, allowed_addr_ip in query:
ips_by_group[security_group_id].add(ip_address)
if allowed_addr_ip:
ips_by_group[security_group_id].add(allowed_addr_ip)
return ips_by_group
def _select_remote_group_ids(self, ports):
remote_group_ids = []
for port in ports.values():
for rule in port.get('security_group_rules'):
remote_group_id = rule.get('remote_group_id')
if remote_group_id:
remote_group_ids.append(remote_group_id)
return remote_group_ids
def _select_network_ids(self, ports):
return set((port['network_id'] for port in ports.values()))
def _select_dhcp_ips_for_network_ids(self, context, network_ids):
if not network_ids:
return {}
query = context.session.query(models_v2.Port.mac_address,
models_v2.Port.network_id,
models_v2.IPAllocation.ip_address)
query = query.join(models_v2.IPAllocation)
query = query.filter(models_v2.Port.network_id.in_(network_ids))
owner = q_const.DEVICE_OWNER_DHCP
query = query.filter(models_v2.Port.device_owner == owner)
ips = {}
for network_id in network_ids:
ips[network_id] = []
for mac_address, network_id, ip in query:
if (netaddr.IPAddress(ip).version == 6
and not netaddr.IPAddress(ip).is_link_local()):
ip = str(ipv6.get_ipv6_addr_by_EUI64(q_const.IPV6_LLA_PREFIX,
mac_address))
if ip not in ips[network_id]:
ips[network_id].append(ip)
return ips
def _select_ra_ips_for_network_ids(self, context, network_ids):
"""Select IP addresses to allow sending router advertisement from.
If OpenStack dnsmasq sends RA, get link local address of
gateway and allow RA from this Link Local address.
The gateway port link local address will only be obtained
when router is created before VM instance is booted and
subnet is attached to router.
If OpenStack doesn't send RA, allow RA from gateway IP.
Currently, the gateway IP needs to be link local to be able
to send RA to VM.
"""
if not network_ids:
return {}
ips = {}
for network_id in network_ids:
ips[network_id] = set([])
query = context.session.query(models_v2.Subnet)
subnets = query.filter(models_v2.Subnet.network_id.in_(network_ids))
for subnet in subnets:
gateway_ip = subnet['gateway_ip']
if subnet['ip_version'] != 6 or not gateway_ip:
continue
if not netaddr.IPAddress(gateway_ip).is_link_local():
if subnet['ipv6_ra_mode']:
gateway_ip = self._get_lla_gateway_ip_for_subnet(context,
subnet)
else:
# TODO(xuhanp):Figure out how to allow gateway IP from
# existing device to be global address and figure out the
# link local address by other method.
continue
if gateway_ip:
ips[subnet['network_id']].add(gateway_ip)
return ips
def _get_lla_gateway_ip_for_subnet(self, context, subnet):
query = context.session.query(models_v2.Port.mac_address)
query = query.join(models_v2.IPAllocation)
query = query.filter(
models_v2.IPAllocation.subnet_id == subnet['id'])
query = query.filter(
models_v2.IPAllocation.ip_address == subnet['gateway_ip'])
query = query.filter(
models_v2.Port.device_owner.in_(q_const.ROUTER_INTERFACE_OWNERS))
try:
mac_address = query.one()[0]
except (exc.NoResultFound, exc.MultipleResultsFound):
LOG.warn(_LW('No valid gateway port on subnet %s is '
'found for IPv6 RA'), subnet['id'])
return
lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
q_const.IPV6_LLA_PREFIX,
mac_address))
return lla_ip
def _convert_remote_group_id_to_ip_prefix(self, context, ports):
remote_group_ids = self._select_remote_group_ids(ports)
ips = self._select_ips_for_remote_group(context, remote_group_ids)
for port in ports.values():
updated_rule = []
for rule in port.get('security_group_rules'):
remote_group_id = rule.get('remote_group_id')
direction = rule.get('direction')
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
if not remote_group_id:
updated_rule.append(rule)
continue
port['security_group_source_groups'].append(remote_group_id)
base_rule = rule
for ip in ips[remote_group_id]:
if ip in port.get('fixed_ips', []):
continue
ip_rule = base_rule.copy()
version = netaddr.IPNetwork(ip).version
ethertype = 'IPv%s' % version
if base_rule['ethertype'] != ethertype:
continue
ip_rule[direction_ip_prefix] = str(
netaddr.IPNetwork(ip).cidr)
updated_rule.append(ip_rule)
port['security_group_rules'] = updated_rule
return ports
def _add_ingress_dhcp_rule(self, port, ips):
dhcp_ips = ips.get(port['network_id'])
for dhcp_ip in dhcp_ips:
source_port, dest_port, ethertype = DHCP_RULE_PORT[
netaddr.IPAddress(dhcp_ip).version]
dhcp_rule = {'direction': 'ingress',
'ethertype': ethertype,
'protocol': 'udp',
'port_range_min': dest_port,
'port_range_max': dest_port,
'source_port_range_min': source_port,
'source_port_range_max': source_port,
'source_ip_prefix': dhcp_ip}
port['security_group_rules'].append(dhcp_rule)
def _add_ingress_ra_rule(self, port, ips):
ra_ips = ips.get(port['network_id'])
for ra_ip in ra_ips:
ra_rule = {'direction': 'ingress',
'ethertype': q_const.IPv6,
'protocol': q_const.PROTO_NAME_ICMP_V6,
'source_ip_prefix': ra_ip,
'source_port_range_min': q_const.ICMPV6_TYPE_RA}
port['security_group_rules'].append(ra_rule)
def _apply_provider_rule(self, context, ports):
network_ids = self._select_network_ids(ports)
ips_dhcp = self._select_dhcp_ips_for_network_ids(context, network_ids)
ips_ra = self._select_ra_ips_for_network_ids(context, network_ids)
for port in ports.values():
self._add_ingress_ra_rule(port, ips_ra)
self._add_ingress_dhcp_rule(port, ips_dhcp)
def security_group_rules_for_ports(self, context, ports):
rules_in_db = self._select_rules_for_ports(context, ports)
for (port_id, rule_in_db) in rules_in_db:
port = ports[port_id]
direction = rule_in_db['direction']
rule_dict = {
'security_group_id': rule_in_db['security_group_id'],
'direction': direction,
'ethertype': rule_in_db['ethertype'],
}
for key in ('protocol', 'port_range_min', 'port_range_max',
'remote_ip_prefix', 'remote_group_id'):
if rule_in_db.get(key):
if key == 'remote_ip_prefix':
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
rule_dict[direction_ip_prefix] = rule_in_db[key]
continue
rule_dict[key] = rule_in_db[key]
port['security_group_rules'].append(rule_dict)
self._apply_provider_rule(context, ports)
return self._convert_remote_group_id_to_ip_prefix(context, ports)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AnomalyAlertingConfigurationLogicType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""cross metrics operator
should be specified when setting up multiple metric alerting configurations
"""
AND_ENUM = "AND"
OR_ENUM = "OR"
XOR = "XOR"
class AnomalyDetectionConfigurationLogicType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""condition operator
should be specified when combining multiple detection conditions
"""
AND_ENUM = "AND"
OR_ENUM = "OR"
class AnomalyDetectorDirection(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""detection direction
"""
BOTH = "Both"
DOWN = "Down"
UP = "Up"
class AnomalyScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Anomaly scope
"""
ALL = "All"
DIMENSION = "Dimension"
TOP_N = "TopN"
class AnomalyStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""anomaly status
only return for alerting anomaly result
"""
ACTIVE = "Active"
RESOLVED = "Resolved"
class AnomalyValue(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AUTO_DETECT = "AutoDetect"
ANOMALY = "Anomaly"
NOT_ANOMALY = "NotAnomaly"
class AuthenticationTypeEnum(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""authentication type for corresponding data source
"""
BASIC = "Basic"
MANAGED_IDENTITY = "ManagedIdentity"
AZURE_SQL_CONNECTION_STRING = "AzureSQLConnectionString"
DATA_LAKE_GEN2_SHARED_KEY = "DataLakeGen2SharedKey"
SERVICE_PRINCIPAL = "ServicePrincipal"
SERVICE_PRINCIPAL_IN_KV = "ServicePrincipalInKV"
class ChangePointValue(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
AUTO_DETECT = "AutoDetect"
CHANGE_POINT = "ChangePoint"
NOT_CHANGE_POINT = "NotChangePoint"
class DataSourceCredentialType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of data source credential
"""
AZURE_SQL_CONNECTION_STRING = "AzureSQLConnectionString"
DATA_LAKE_GEN2_SHARED_KEY = "DataLakeGen2SharedKey"
SERVICE_PRINCIPAL = "ServicePrincipal"
SERVICE_PRINCIPAL_IN_KV = "ServicePrincipalInKV"
class DataSourceType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""data source type
"""
AZURE_APPLICATION_INSIGHTS = "AzureApplicationInsights"
AZURE_BLOB = "AzureBlob"
AZURE_COSMOS_DB = "AzureCosmosDB"
AZURE_DATA_EXPLORER = "AzureDataExplorer"
AZURE_DATA_LAKE_STORAGE_GEN2 = "AzureDataLakeStorageGen2"
AZURE_EVENT_HUBS = "AzureEventHubs"
AZURE_LOG_ANALYTICS = "AzureLogAnalytics"
AZURE_TABLE = "AzureTable"
INFLUX_DB = "InfluxDB"
MONGO_DB = "MongoDB"
MY_SQL = "MySql"
POSTGRE_SQL = "PostgreSql"
SQL_SERVER = "SqlServer"
class Direction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""value filter direction
"""
BOTH = "Both"
DOWN = "Down"
UP = "Up"
class EntityStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""data feed status
"""
ACTIVE = "Active"
PAUSED = "Paused"
class FeedbackQueryTimeMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""time mode to filter feedback
"""
METRIC_TIMESTAMP = "MetricTimestamp"
FEEDBACK_CREATED_TIME = "FeedbackCreatedTime"
class FeedbackType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""feedback type
"""
ANOMALY = "Anomaly"
CHANGE_POINT = "ChangePoint"
PERIOD = "Period"
COMMENT = "Comment"
class FillMissingPointType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""the type of fill missing point for anomaly detection
"""
SMART_FILLING = "SmartFilling"
PREVIOUS_VALUE = "PreviousValue"
CUSTOM_VALUE = "CustomValue"
NO_FILLING = "NoFilling"
class Granularity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""granularity of the time series
"""
YEARLY = "Yearly"
MONTHLY = "Monthly"
WEEKLY = "Weekly"
DAILY = "Daily"
HOURLY = "Hourly"
MINUTELY = "Minutely"
CUSTOM = "Custom"
class HookType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""hook type
"""
WEBHOOK = "Webhook"
EMAIL = "Email"
class IncidentStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""incident status
only return for alerting incident result
"""
ACTIVE = "Active"
RESOLVED = "Resolved"
class IngestionStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""latest ingestion task status for this data slice.
"""
NOT_STARTED = "NotStarted"
SCHEDULED = "Scheduled"
RUNNING = "Running"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
NO_DATA = "NoData"
ERROR = "Error"
PAUSED = "Paused"
class NeedRollupEnum(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""mark if the data feed need rollup
"""
NO_ROLLUP = "NoRollup"
NEED_ROLLUP = "NeedRollup"
ALREADY_ROLLUP = "AlreadyRollup"
class PeriodType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""the type of setting period
"""
AUTO_DETECT = "AutoDetect"
ASSIGN_VALUE = "AssignValue"
class RollUpMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""roll up method
"""
NONE = "None"
SUM = "Sum"
MAX = "Max"
MIN = "Min"
AVG = "Avg"
COUNT = "Count"
class Severity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""min alert severity
"""
LOW = "Low"
MEDIUM = "Medium"
HIGH = "High"
class SnoozeScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""snooze scope
"""
METRIC = "Metric"
SERIES = "Series"
class TimeMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""time mode
"""
ANOMALY_TIME = "AnomalyTime"
CREATED_TIME = "CreatedTime"
MODIFIED_TIME = "ModifiedTime"
class ValueType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""data used to implement value filter
"""
VALUE = "Value"
MEAN = "Mean"
class ViewMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""data feed access mode, default is Private
"""
PRIVATE = "Private"
PUBLIC = "Public"
|
|
# -*- coding: utf-8 -*-
"""Module to provide the backend for the aws federation proxy service"""
from __future__ import print_function, absolute_import, unicode_literals, division
import json
import time
import logging
import requests
from six.moves.urllib.parse import quote_plus
from yamlreader import data_merge
from boto.sts import STSConnection
from .util import _get_item_from_module
def log_function_call(old_func):
"""Log Timings of function calls."""
def new_func(self, *args, **kwargs):
start = time.time()
try:
retval = old_func(self, *args, **kwargs)
except Exception as exc:
stop = time.time()
self.logger.debug(
"%s(%s, %s) raised Exception %s after %.3f seconds",
old_func.__name__, args, kwargs, exc, stop - start)
raise
stop = time.time()
self.logger.debug(
"%s(%s, %s) took %.3f seconds and returned %s",
old_func.__name__, args, kwargs, stop - start, retval)
return retval
return new_func
class AWSError(Exception):
"""Exception class for throwing AWSError exceptions"""
pass
class ConfigurationError(Exception):
"""Exception class for throwing ConfigurationError exceptions"""
pass
class PermissionError(Exception):
"""Exception class for throwing PermissionError exceptions"""
pass
class AWSFederationProxy(object):
"""For a given user, fetch AWS accounts/roles and retrieve credentials"""
def __init__(self, user, config, account_config, logger=None):
default_config = {
'aws': {
'access_key': None,
'secret_key': None
},
'provider': {
'class': 'Provider'
}
}
self.logger = logger or logging.getLogger(__name__)
self.user = user
self.application_config = data_merge(default_config, config)
self.account_config = account_config
self.provider = None
self._setup_provider()
def _setup_provider(self):
"""Import and set up provider module from given config"""
try:
provider_config = self.application_config['provider']
provider_module_name = provider_config['module']
except KeyError:
message = "No module defined in 'provider' configuration."
raise ConfigurationError(message)
provider_class_name = self.application_config['provider']['class']
try:
provider_class = _get_item_from_module(provider_module_name,
provider_class_name)
except Exception as exc:
raise ConfigurationError(str(exc))
try:
self.provider = provider_class(
user=self.user,
config=self.application_config['provider'],
logger=self.logger)
except Exception as error:
message = 'Could not instantiate provider "{class_name}": {error}'
raise ConfigurationError(message.format(
class_name=provider_class_name, error=error))
@log_function_call
def get_account_and_role_dict(self):
"""Get all accounts and roles for the user"""
return self.provider.get_accounts_and_roles()
def check_user_permissions(self, account_alias, role):
"""Check if a user has permissions to access a role.
Raise exception if access is not granted."""
accounts_and_roles = self.get_account_and_role_dict()
permitted_roles = accounts_and_roles.get(account_alias, [])
for permitted_role, reason in permitted_roles:
if role == permitted_role:
self.logger.info(
"Giving user '%s' access to account '%s' role '%s': %s",
self.user, account_alias, role, reason)
return
message = ("User '{user}' may not access role '{role}' in "
"account '{account}'")
message = message.format(user=self.user,
role=role,
account=account_alias)
self.logger.warn(message)
raise PermissionError(message)
@log_function_call
def get_aws_credentials(self, account_alias, role):
"""Get temporary credentials from AWS"""
self.check_user_permissions(account_alias, role)
try:
account_id = self.account_config[account_alias]['id']
except Exception:
message = "No Configuration for account '{account}'."
raise ConfigurationError(message.format(account=account_alias))
arn = "arn:aws:iam::{account_id}:role/{role}".format(
account_id=account_id, role=role)
key_id = self.application_config['aws']['access_key']
secret_key = self.application_config['aws']['secret_key']
try:
sts_connection = STSConnection(
aws_access_key_id=key_id,
aws_secret_access_key=secret_key)
assumed_role_object = sts_connection.assume_role(
role_arn=arn,
role_session_name=self.user)
except Exception as error:
if getattr(error, 'status', None) == 403:
raise PermissionError(str(error))
self.logger.exception("AWS STS failed with: {exc_vars}".format(
exc_vars=vars(error)))
raise AWSError(str(error))
return assumed_role_object.credentials
@staticmethod
def _generate_urlencoded_json_credentials(credentials):
"""Return urlencoded json-string with given credentials"""
json_temp_credentials = (
'{{'
'"sessionId":"{access_key}",'
'"sessionKey":"{secret_key}",'
'"sessionToken":"{session_token}"'
'}}'
)
try:
json_temp_credentials = json_temp_credentials.format(
**credentials.to_dict())
except KeyError as error:
raise Exception('Missing Key {0} in credentials'.format(error))
return quote_plus(json_temp_credentials)
@classmethod
def _get_signin_token(cls, credentials):
"""Return signin token for given credentials"""
request_url = (
"https://signin.aws.amazon.com/federation"
"?Action=getSigninToken"
"&SessionDuration=43200"
"&Session=" +
cls._generate_urlencoded_json_credentials(credentials))
reply = requests.get(request_url)
if reply.status_code != 200:
message = 'Could not get session from AWS: Error {0} {1}'
raise AWSError(message.format(reply.status_code, reply.reason))
# reply.text is a JSON document with a single element named SigninToken
return json.loads(reply.text)["SigninToken"]
@log_function_call
def _construct_console_url(self, signin_token, callback_url):
"""Construct and return string with URL to aws console"""
# Create URL that will let users sign in to the console using the
# sign-in token. This URL must be used within 15 minutes of when the
# sign-in token was issued.
request_url_template = (
"https://signin.aws.amazon.com/federation"
"?Action=login"
"&Issuer={callbackurl}"
"&Destination={destination}"
"&SigninToken={signin_token}")
return request_url_template.format(
callbackurl=quote_plus(callback_url),
destination=quote_plus("https://console.aws.amazon.com/"),
signin_token=signin_token)
def get_console_url(self, credentials, callback_url):
"""Return Console URL for given credentials"""
token = self._get_signin_token(credentials)
return self._construct_console_url(token, callback_url)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util, model_helper, brew, build
@unittest.skipIf(build.CAFFE2_NO_OPERATOR_SCHEMA,
'Built with CAFFE2_NO_OPERATOR_SCHEMA')
class TestShapeInference(test_util.TestCase):
def testShapeInferenceSimpleFC(self):
m = model_helper.ModelHelper(name="test_model")
brew.fc(m, "data", "fc1", dim_in=96, dim_out=32)
brew.fc(m, "fc1", "fc2", dim_in=32, dim_out=55)
for b in [0, 64]:
(shapes, types) = workspace.InferShapesAndTypes(
[m.param_init_net, m.net],
{'data': [b, 96]}
)
self.assertEquals(shapes['data'], [b, 96])
self.assertEquals(shapes['fc1_w'], [32, 96])
self.assertEquals(shapes['fc1_b'], [32])
self.assertEquals(shapes['fc1'], [b, 32])
self.assertEquals(shapes['fc2_w'], [55, 32])
self.assertEquals(shapes['fc2_b'], [55])
self.assertEquals(shapes['fc2'], [b, 55])
def testFCAxis2(self):
model = model_helper.ModelHelper(name="test_model")
model.net.FC(["x", "w", "b"], ["y"], axis=2)
workspace.FeedBlob("x", np.random.rand(4, 20, 36).astype(np.float32))
workspace.FeedBlob("w", np.random.rand(36, 36).astype(np.float32))
workspace.FeedBlob("b", np.random.rand(36,).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testFCTransposed(self):
model = model_helper.ModelHelper(name="test_model")
model.net.FCTransposed(["x", "wt", "b"], ["y"])
workspace.FeedBlob("x", np.random.rand(20, 36).astype(np.float32))
workspace.FeedBlob("wt", np.random.rand(36, 48).astype(np.float32))
workspace.FeedBlob("b", np.random.rand(48,).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceSlice(self):
model = model_helper.ModelHelper(name="test_model")
model.net.Slice(["x"], ["y"], starts=[0, 0, 0, 0], ends=[-1, -1, -3, -1])
workspace.FeedBlob("x", np.random.rand(64, 1, 255, 384).astype(np.float32))
slice_starts = np.array([0, 0, 0, 0]).astype(np.int32)
slice_ends = np.array([-1, -1, -3, -1]).astype(np.int32)
slice_starts = model.net.GivenTensorIntFill(
[], shape=[4], values=slice_starts)
slice_ends = model.net.GivenTensorIntFill(
[], shape=[4], values=slice_ends)
model.net.Slice(["x2", slice_starts, slice_ends], ["y2"])
workspace.FeedBlob("x2", np.random.rand(64, 1, 255, 384).astype(np.float32))
self.InferTensorRunAndCompare(model, ["y2"])
def testShapeInferenceDistances(self):
model = model_helper.ModelHelper(name="test_model")
model.net.L1Distance(["x1", "y1"], "dl1_D1")
model.net.SquaredL2Distance(["x1", "y1"], "dl2_D1")
model.net.CosineSimilarity(["x1", "y1"], "dcos_D1")
model.net.DotProduct(["x1", "y1"], "ddot_D1")
model.net.DotProductWithPadding(["x1", "y1"], "ddotpad_D1")
model.net.L1Distance(["x2", "y2"], "dl1_D2")
model.net.SquaredL2Distance(["x2", "y2"], "dl2_D2")
model.net.CosineSimilarity(["x2", "y2"], "dcos_D2")
model.net.DotProduct(["x2", "y2"], "ddot_D2")
model.net.DotProductWithPadding(["x2", "z2"], "ddotpad_D2")
workspace.FeedBlob("x1", np.random.rand(10).astype(np.float32))
workspace.FeedBlob("y1", np.random.rand(10).astype(np.float32))
workspace.FeedBlob("x2", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("y2", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("z2", np.random.rand(10, 4).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceReduceBackFrontX(self):
model = model_helper.ModelHelper(name="test_model")
model.net.ReduceBackSum(["x"], ["x_back_sum"])
model.net.ReduceBackMean(["x"], ["x_back_mean"])
model.net.ReduceBackMax(["x"], ["x_back_max"])
model.net.ReduceFrontSum(["x"], ["x_front_sum"])
model.net.ReduceFrontMean(["x"], ["x_front_mean"])
model.net.ReduceFrontMax(["x"], ["x_front_max"])
workspace.FeedBlob("x", np.random.rand(10, 12, 18).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testGather(self):
model = model_helper.ModelHelper(name="test_model")
model.net.Gather(["X", "idx"], "Y")
workspace.FeedBlob("X", np.random.rand(100, 4, 5).astype(np.float32))
workspace.FeedBlob("idx", np.array([[3, 18], [99, 4], [2, 5]]).astype(np.int32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceConvNet(self):
model = model_helper.ModelHelper(name="convtest")
model.NHWC2NCHW("data", "data_nchw")
brew.conv(model, "data_nchw", 'conv1', 3, 64,
weight_init=("MSRAFill", {}), kernel=7,
stride=2, pad=3, no_bias=0)
brew.spatial_bn(model, 'conv1', 'conv1_spatbn_relu', 64, epsilon=1e-3, is_test=False)
brew.relu(model, 'conv1_spatbn_relu', 'conv1_spatbn_relu')
brew.max_pool(model, 'conv1_spatbn_relu', 'pool1', kernel=3, stride=2)
brew.fc(model, 'pool1', 'fc', dim_in=(64 * 56 * 56), dim_out=100)
brew.dropout(model, 'fc', 'fc_drop', is_test=False)
model.Sigmoid('fc_drop', 'fc_sigm')
brew.softmax(model, 'fc_sigm', 'softmax')
model.LabelCrossEntropy(['softmax', 'label'], 'xent')
loss = model.AveragedLoss('xent', 'loss')
model.AddGradientOperators([loss])
LR = model.param_init_net.ConstantFill(
[], 'LR', shape=[1], value=0.1
)
for param in model.GetParams():
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, LR, param],
[param_grad, param_momentum, param],
)
workspace.FeedBlob(
"data",
np.random.rand(16, 227, 227, 3).astype(np.float32),
)
workspace.FeedBlob(
"label",
(100 * np.random.rand(16)).astype(np.int32),
)
workspace.FeedBlob(
"label",
(100 * np.random.rand(16)).astype(np.int32),
)
# Then do automatic comparison test: run the next once to
# initialize everything
self.InferTensorRunAndCompare(model)
def testShapeInferenceTranspose(self):
model = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"tensor",
np.random.rand(4, 2, 3, 3, 5).astype(np.float32)
)
# Testing with axes undefined
brew.transpose(
model,
["tensor"],
"transpose",
)
self.InferTensorRunAndCompare(model)
# Testing with axes defined
brew.transpose(
model,
["tensor"],
"transpose",
axes=np.random.permutation(5)
)
return self.InferTensorRunAndCompare(model)
def testShapeInferencePad(self):
model = model_helper.ModelHelper(name="padtest")
model.PadImage("data", 'padded', pad_t=100, pad_l=37, pad_b=28,
pad_r=20, mode="constant", order="NCHW")
workspace.FeedBlob(
"data",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceTwoClass(self):
model = model_helper.ModelHelper(name="twoclass")
model.MakeTwoClass("v", "v2")
workspace.FeedBlob("v", np.random.rand(32).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferencePadZero(self):
model = model_helper.ModelHelper(name="padtest")
model.PadImage("data", 'padded', pad=0, mode="constant",
order="NCHW")
workspace.FeedBlob(
"data",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceMatMul(self):
model = model_helper.ModelHelper(name="test_model")
model.MatMul(["x", "y"], "MatMul")
workspace.FeedBlob("x", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("y", np.random.rand(5, 10).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceSoftmaxWithLoss(self):
model = model_helper.ModelHelper(name="test_model")
model.SoftmaxWithLoss(
["logits", "labels"],
["softmax", "loss"],
)
# 2D Shape of [batch_size, num_classes]
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
# Shape of size batch_size with all values [0, num_classes)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=(4, 1)).astype(np.int32),
)
self.InferTensorRunAndCompare(model)
# Testing with 1D labels arg
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=4).astype(np.int32),
)
self.InferTensorRunAndCompare(model)
# Testing with weight_tensor
model.SoftmaxWithLoss(
["logits", "labels", "weight_tensor"],
["softmax", "loss"],
)
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=4).astype(np.int32),
)
workspace.FeedBlob(
"weight_tensor",
np.random.rand(4).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test spatial model
model = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"img",
np.random.rand(32, 19, 33, 28).astype(np.float32)
)
workspace.FeedBlob(
"img_labels",
(np.random.rand(32, 33, 28) * 19).astype(np.int32)
)
model.SpatialSoftmaxWithLoss(
["img", "img_labels"],
["softmax_img", "loss"],
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceIm2Col(self):
# Test with NCHW
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel=4, dilation=2, stride=2,
order="NCHW")
workspace.FeedBlob(
"X",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test with NHWC
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel=4, dilation=2, stride=2,
order="NHWC")
workspace.FeedBlob(
"X",
np.random.rand(16, 228, 228, 3).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test with different width and height
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel_h=8, kernel_w=4,
dilation=2, stride=2)
workspace.FeedBlob(
"X",
np.random.rand(16, 3, 228, 114).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceTile(self):
m = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"tensor",
np.random.rand(4, 2, 3, 3, 5).astype(np.float32)
)
# Testing with axes undefined
for i in range(0, 4):
m.net.Tile(
"tensor", "tiled_tensor_{}".format(i), tiles=5, axis=i)
self.InferTensorRunAndCompare(m)
def testShapeInferenceFlatten(self):
model = model_helper.ModelHelper(name="test_model")
model.FlattenToVec("X", "FlatVec")
model.FlattenToVec("empty", "EmptyFlatVec")
workspace.FeedBlob("X", np.random.rand(17, 5, 13).astype(np.float32))
workspace.FeedBlob("empty", np.random.rand(0, 2, 3).astype(np.float32))
self.InferTensorRunAndCompare(model)
# test Flatten with default axis (=1)
model = model_helper.ModelHelper(name="test_model")
model.Flatten("X", "Flat")
model.Flatten("empty", "EmptyFlat")
workspace.FeedBlob("X", np.random.rand(17, 5, 13).astype(np.float32))
workspace.FeedBlob("empty", np.random.rand(0, 2, 3).astype(np.float32))
self.InferTensorRunAndCompare(model)
# test Flatten with axis
model = model_helper.ModelHelper(name="test_model")
x = np.random.randn(17, 5, 13)
for axis in range(x.ndim + 1):
model.Flatten("x", "Flat", axis=axis)
workspace.FeedBlob("x", x)
self.InferTensorRunAndCompare(model)
empty = np.random.randn(0, 5, 13)
for axis in range(empty.ndim + 1):
model.Flatten("empty", "Flat", axis=axis)
workspace.FeedBlob("empty", empty)
self.InferTensorRunAndCompare(model)
def testShapeInferenceReshape(self):
model = model_helper.ModelHelper(name="test_model")
model.Reshape("X", ["Reshaped", "Old_Shape"], shape=[8, 0, -1, 2])
workspace.FeedBlob("X", np.random.rand(4, 26, 32).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceUnique(self):
for n in [0, 1]:
model = model_helper.ModelHelper(name="test_model")
model.Unique("X", ["Y"])
model.Unique("X", ["Z", "remap"])
workspace.FeedBlob("X", np.random.rand(n).astype(np.int64))
self.InferTensorRunAndCompare(model)
def testLengthsSum(self):
model = model_helper.ModelHelper(name="test_model")
model.LengthsSum(["X", "length"], ["sum"])
workspace.FeedBlob("X", np.random.rand(6, 32).astype(np.float32))
workspace.FeedBlob("length", np.array([1, 2, 3], dtype=np.int32))
self.InferTensorRunAndCompare(model)
def testLengthsPad(self):
model = model_helper.ModelHelper(name="test_model")
model.LengthsPad(
["X", "length"],
["X_padded"],
target_length=10,
padding_value=-1.0,
)
workspace.FeedBlob("X", np.random.rand(6, 32).astype(np.float32))
workspace.FeedBlob("length", np.array([1, 2, 3], dtype=np.int32))
self.InferTensorRunAndCompare(model)
def testConcat(self):
net = core.Net("concat")
net.Concat(["A", "B"], ["C", "splits"], axis=1)
net.Concat(["C", "D"], ["E"], order="NCHW")
net.Concat(["E", "F"], ["G"], add_axis=1, order="NHWC")
(shapes, types) = workspace.InferShapesAndTypes(
[net],
{
'A': [10, 12, 9, 10],
'B': [10, 9, 9, 10],
'D': [10, 2, 9, 10],
'F': [10, 23, 9, 10]
}
)
self.assertEqual(shapes['C'], [10, 21, 9, 10])
self.assertEqual(shapes['splits'], [2])
self.assertEqual(shapes['E'], [10, 23, 9, 10])
self.assertEqual(shapes['G'], [10, 23, 9, 2, 10])
def testConcatInt32(self):
net = core.Net("concat")
net.Concat(["A", "B"], ["C", "splits"], axis=1)
net.Concat(["C", "D"], ["E"], order="NCHW")
net.Concat(["E", "F"], ["G"], add_axis=1, order="NHWC")
(shapes, types) = workspace.InferShapesAndTypes(
[net],
blob_dimensions={
'A': [10, 12, 9, 10],
'B': [10, 9, 9, 10],
'D': [10, 2, 9, 10],
'F': [10, 23, 9, 10]
},
blob_types={
'A': core.DataType.INT32,
'B': core.DataType.INT32,
'D': core.DataType.INT32,
'F': core.DataType.INT32,
}
)
self.assertEqual(shapes['C'], [10, 21, 9, 10])
self.assertEqual(shapes['splits'], [2])
self.assertEqual(shapes['E'], [10, 23, 9, 10])
self.assertEqual(shapes['G'], [10, 23, 9, 2, 10])
self.assertEqual(types['C'], core.DataType.INT32)
self.assertEqual(types['splits'], core.DataType.INT32)
self.assertEqual(types['E'], core.DataType.INT32)
self.assertEqual(types['G'], core.DataType.INT32)
def testSqueeze(self):
net = core.Net("sq")
net.Squeeze(["data"], ["data_squeezed"], dims=[3, 1])
(shapes, types) = workspace.InferShapesAndTypes(
[net],
{'data': [64, 1, 96, 1, 4]}
)
self.assertEqual(shapes['data_squeezed'], [64, 96, 4])
def testCast(self):
model = model_helper.ModelHelper(name="test_model")
types = [
('bool', np.bool, caffe2_pb2.TensorProto.BOOL),
#('byte', None, caffe2_pb2.TensorProto.BYTE),
('int8', np.int8, caffe2_pb2.TensorProto.INT8),
('uint8', np.uint8, caffe2_pb2.TensorProto.UINT8),
('int16', np.int16, caffe2_pb2.TensorProto.INT16),
('uint16', np.uint16, caffe2_pb2.TensorProto.UINT16),
#('float16', np.float16, caffe2_pb2.TensorProto.FLOAT16),
('int32', np.int32, caffe2_pb2.TensorProto.INT32),
('float', np.float32, caffe2_pb2.TensorProto.FLOAT),
('int64', np.int64, caffe2_pb2.TensorProto.INT64),
('double', np.float64, caffe2_pb2.TensorProto.DOUBLE),
#('string', None, caffe2_pb2.TensorProto.STRING),
]
for (xstr, xnp, _) in types:
xname = 'X%s' % xstr
workspace.FeedBlob(xname, np.random.rand(1).astype(xnp))
for (ystr, _, yc2) in types:
yname = 'Y%s_to_%s' % (xstr, ystr)
model.Cast(xname, yname, to=yc2)
self.InferTensorRunAndCompare(model)
def testShapeInferenceRoiPool(self):
for is_test in [True, False]:
model = model_helper.ModelHelper(name="test_model")
outputs = ['Y'] if is_test else ['Y', 'argmaxes']
model.net.RoIPool(
['X', 'R'], outputs, pooled_h=4, pooled_w=5, is_test=is_test)
workspace.FeedBlob(
"X",
np.random.rand(100, 3, 4, 5).astype(np.float32))
workspace.FeedBlob(
"R",
np.random.rand(2, 5).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferencePow(self):
model = model_helper.ModelHelper(name="powtest")
model.Pow("x", 'y', exponent=-1.0)
workspace.FeedBlob('x', np.random.rand(1, 2, 3, 4).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testInt8Conversion(self):
model = model_helper.ModelHelper(name="fp32_int8_conversion_test")
model.FloatToFused8BitRowwiseQuantized('x', 'x_8bit')
model.Fused8BitRowwiseQuantizedToFloat('x_8bit', 'x_recovered')
workspace.FeedBlob('x', np.random.rand(100, 150).astype(np.float32))
self.InferTensorRunAndCompare(model)
x = workspace.FetchBlob('x')
x_recovered = workspace.FetchBlob('x_recovered')
# TODO: find a tighter bound
assert(np.allclose(x, x_recovered, atol=1e-2))
def testHalfInt8Conversion(self):
model = model_helper.ModelHelper(name="fp16_int8_conversion_test")
model.HalfFloatToFused8BitRowwiseQuantized('x', 'x_8bit')
model.Fused8BitRowwiseQuantizedToHalfFloat('x_8bit', 'x_recovered')
workspace.FeedBlob('x', np.random.rand(100, 150).astype(np.float16))
self.InferTensorRunAndCompare(model)
x = workspace.FetchBlob('x')
x_recovered = workspace.FetchBlob('x_recovered')
# TODO: find a tighter bound
assert(np.allclose(x, x_recovered, atol=1e-2))
def testShapeOp(self):
model = model_helper.ModelHelper(name="shape_op_test")
model.Shape('x', 'y')
workspace.FeedBlob('x', np.random.rand(100, 150).astype(np.float32))
self.InferTensorRunAndCompare(model)
def InferTensorRunAndCompare(self, model, expected_uninferred_blobs=None):
'''
Runs shape inference, and then the model to check
that the inferred shapes agree with the actual ones
'expected_uninferred_blobs' is the list of blobs for which type and
shape cannot be inferred.
'''
(shapes, types) = workspace.InferShapesAndTypes(
[model.param_init_net, model.net],
)
# .. Create net
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
workspace.RunNet(model.Proto().name)
# ... and then check the shapes mismatch
correct_shapes = {}
correct_types = {}
for b in workspace.Blobs():
arr = workspace.FetchBlob(b)
correct_shapes[b] = arr.shape
if type(arr) is np.ndarray:
if arr.dtype == np.dtype('float32'):
correct_types[b] = caffe2_pb2.TensorProto.FLOAT
elif arr.dtype == np.dtype('int32'):
correct_types[b] = caffe2_pb2.TensorProto.INT32
# BYTE
# STRING
elif arr.dtype == np.dtype('bool'):
correct_types[b] = caffe2_pb2.TensorProto.BOOL
elif arr.dtype == np.dtype('uint8'):
correct_types[b] = caffe2_pb2.TensorProto.UINT8
elif arr.dtype == np.dtype('int8'):
correct_types[b] = caffe2_pb2.TensorProto.INT8
elif arr.dtype == np.dtype('uint16'):
correct_types[b] = caffe2_pb2.TensorProto.UINT16
elif arr.dtype == np.dtype('int16'):
correct_types[b] = caffe2_pb2.TensorProto.INT16
elif arr.dtype == np.dtype('int64'):
correct_types[b] = caffe2_pb2.TensorProto.INT64
elif arr.dtype == np.dtype('float16'):
correct_types[b] = caffe2_pb2.TensorProto.FLOAT16
elif arr.dtype == np.dtype('float64'):
correct_types[b] = caffe2_pb2.TensorProto.DOUBLE
else:
correct_types[b] = "unknown {}".format(arr.dtype)
else:
correct_types[b] = str(type(arr))
if expected_uninferred_blobs is None:
expected_uninferred_blobs = []
for b in correct_shapes:
# skip blobs for which shape couldn't be inferred
if b in expected_uninferred_blobs:
continue
self.assertTrue(
np.array_equal(
np.array(shapes[b]).astype(np.int32),
np.array(correct_shapes[b]).astype(np.int32)
),
"Shape {} mismatch: {} vs. correct {}".format(
b, shapes[b], correct_shapes[b]
)
)
self.assertFalse(
b not in types and b in correct_types,
"Type for {} not defined".format(b),
)
self.assertEqual(
types[b],
correct_types[b],
"Type {} mismatch: {} vs. {}".format(
b, types[b], correct_types[b],
)
)
if __name__ == "__main__":
unittest.main()
|
|
""" Exmples of usage with tests.
Tests in this file represent examples taken from JSON-RPC specification.
http://www.jsonrpc.org/specification#examples
"""
import sys
import json
from ..manager import JSONRPCResponseManager
from ..jsonrpc2 import JSONRPC20Request, JSONRPC20BatchRequest
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
def isjsonequal(json1, json2):
return json.loads(json1) == json.loads(json2)
class TestJSONRPCExamples(unittest.TestCase):
def setUp(self):
self.dispatcher = {
"subtract": lambda a, b: a - b,
}
def test_rpc_call_with_positional_parameters(self):
req = '{"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": 1}' # noqa
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(isjsonequal(
response.json,
'{"jsonrpc": "2.0", "result": 19, "id": 1}'
))
req = '{"jsonrpc": "2.0", "method": "subtract", "params": [23, 42], "id": 2}' # noqa
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(isjsonequal(
response.json,
'{"jsonrpc": "2.0", "result": -19, "id": 2}'
))
def test_rpc_call_with_named_parameters(self):
def subtract(minuend=None, subtrahend=None):
return minuend - subtrahend
dispatcher = {
"subtract": subtract,
"sum": lambda *args: sum(args),
"get_data": lambda: ["hello", 5],
}
req = '{"jsonrpc": "2.0", "method": "subtract", "params": {"subtrahend": 23, "minuend": 42}, "id": 3}' # noqa
response = JSONRPCResponseManager.handle(req, dispatcher)
self.assertTrue(isjsonequal(
response.json,
'{"jsonrpc": "2.0", "result": 19, "id": 3}'
))
req = '{"jsonrpc": "2.0", "method": "subtract", "params": {"minuend": 42, "subtrahend": 23}, "id": 4}' # noqa
response = JSONRPCResponseManager.handle(req, dispatcher)
self.assertTrue(isjsonequal(
response.json,
'{"jsonrpc": "2.0", "result": 19, "id": 4}',
))
def test_notification(self):
req = '{"jsonrpc": "2.0", "method": "update", "params": [1,2,3,4,5]}'
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertEqual(response, None)
req = '{"jsonrpc": "2.0", "method": "foobar"}'
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertEqual(response, None)
def test_rpc_call_of_non_existent_method(self):
req = '{"jsonrpc": "2.0", "method": "foobar", "id": "1"}'
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(isjsonequal(
response.json,
'{"jsonrpc": "2.0", "error": {"code": -32601, "message": "Method not found"}, "id": "1"}' # noqa
))
def test_rpc_call_with_invalid_json(self):
req = '{"jsonrpc": "2.0", "method": "foobar, "params": "bar", "baz]'
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(isjsonequal(
response.json,
'{"jsonrpc": "2.0", "error": {"code": -32700, "message": "Parse error"}, "id": null}' # noqa
))
def test_rpc_call_with_invalid_request_object(self):
req = '{"jsonrpc": "2.0", "method": 1, "params": "bar"}'
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(isjsonequal(
response.json,
'{"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": null}' # noqa
))
def test_rpc_call_batch_invalid_json(self):
req = """[
{"jsonrpc": "2.0", "method": "sum", "params": [1,2,4], "id": "1"},
{"jsonrpc": "2.0", "method"
]"""
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(isjsonequal(
response.json,
'{"jsonrpc": "2.0", "error": {"code": -32700, "message": "Parse error"}, "id": null}' # noqa
))
def test_rpc_call_with_an_empty_array(self):
req = '[]'
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(isjsonequal(
response.json,
'{"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": null}' # noqa
))
def test_rpc_call_with_rpc_call_with_an_invalid_batch_but_not_empty(self):
req = '[1]'
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(isjsonequal(
response.json,
'{"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": null}' # noqa
))
def test_rpc_call_with_invalid_batch(self):
req = '[1,2,3]'
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(
response,
json.loads("""[
{"jsonrpc": "2.0", "error": {"code": -32600,
"message": "Invalid Request"}, "id": null},
{"jsonrpc": "2.0", "error": {"code": -32600,
"message": "Invalid Request"}, "id": null},
{"jsonrpc": "2.0", "error": {"code": -32600,
"message": "Invalid Request"}, "id": null}
]""")
)
def test_rpc_call_batch(self):
req = """[
{"jsonrpc": "2.0", "method": "sum", "params": [1,2,4], "id": "1"},
{"jsonrpc": "2.0", "method": "notify_hello", "params": [7]},
{"jsonrpc": "2.0", "method": "subtract",
"params": [42,23], "id": "2"},
{"foo": "boo"},
{"jsonrpc": "2.0", "method": "foo.get",
"params": {"name": "myself"}, "id": "5"},
{"jsonrpc": "2.0", "method": "get_data", "id": "9"}
]"""
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(
response,
json.loads("""[
{"jsonrpc": "2.0", "result": 7, "id": "1"},
{"jsonrpc": "2.0", "result": 19, "id": "2"},
{"jsonrpc": "2.0", "error": {"code": -32600,
"message": "Invalid Request"}, "id": null},
{"jsonrpc": "2.0", "error": {"code": -32601,
"message": "Method not found"}, "id": "5"},
{"jsonrpc": "2.0", "result": ["hello", 5], "id": "9"}
]""")
)
def test_rpc_call_batch_all_notifications(self):
req = """[
{"jsonrpc": "2.0", "method": "notify_sum", "params": [1,2,4]},
{"jsonrpc": "2.0", "method": "notify_hello", "params": [7]}
]"""
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertEqual(response, None)
def test_rpc_call_response_request(self):
req = '{"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": 1}' # noqa
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(isinstance(
response.request,
JSONRPC20Request
))
self.assertTrue(isjsonequal(
response.request.json,
req
))
def test_rpc_call_response_request_batch(self):
req = """[
{"jsonrpc": "2.0", "method": "sum", "params": [1,2,4], "id": "1"},
{"jsonrpc": "2.0", "method": "notify_hello", "params": [7]},
{"jsonrpc": "2.0", "method": "subtract",
"params": [42,23], "id": "2"},
{"jsonrpc": "2.0", "method": "foo.get",
"params": {"name": "myself"}, "id": "5"},
{"jsonrpc": "2.0", "method": "get_data", "id": "9"}
]"""
response = JSONRPCResponseManager.handle(req, self.dispatcher)
self.assertTrue(isinstance(
response.request,
JSONRPC20BatchRequest
))
self.assertTrue(isjsonequal(
response.request.json,
req
))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Token provider interface."""
import abc
import six
from keystone.common import cache
from keystone.common import dependency
from keystone.common import manager
from keystone import config
from keystone import exception
from keystone.openstack.common import log
from keystone.openstack.common import timeutils
CONF = config.CONF
LOG = log.getLogger(__name__)
SHOULD_CACHE = cache.should_cache_fn('token')
# NOTE(blk-u): The config options are not available at import time.
EXPIRATION_TIME = lambda: CONF.token.cache_time
# supported token versions
V2 = 'v2.0'
V3 = 'v3.0'
VERSIONS = frozenset([V2, V3])
# default token providers
PKI_PROVIDER = 'keystone.token.providers.pki.Provider'
UUID_PROVIDER = 'keystone.token.providers.uuid.Provider'
class UnsupportedTokenVersionException(Exception):
"""Token version is unrecognizable or unsupported."""
pass
@dependency.requires('token_api')
@dependency.provider('token_provider_api')
class Manager(manager.Manager):
"""Default pivot point for the token provider backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
@classmethod
def get_token_provider(cls):
"""Return package path to the configured token provider.
The value should come from ``keystone.conf`` ``[token] provider``,
however this method ensures backwards compatibility for
``keystone.conf`` ``[signing] token_format`` until Havana + 2.
Return the provider based on ``token_format`` if ``provider`` is not
set. Otherwise, ignore ``token_format`` and return the configured
``provider`` instead.
"""
if CONF.token.provider is not None:
# NOTE(gyee): we are deprecating CONF.signing.token_format. This
# code is to ensure the token provider configuration agrees with
# CONF.signing.token_format.
if (CONF.signing.token_format and
((CONF.token.provider == PKI_PROVIDER and
CONF.signing.token_format != 'PKI') or
(CONF.token.provider == UUID_PROVIDER and
CONF.signing.token_format != 'UUID'))):
raise exception.UnexpectedError(
_('keystone.conf [signing] token_format (deprecated) '
'conflicts with keystone.conf [token] provider'))
return CONF.token.provider
else:
if not CONF.signing.token_format:
# No token provider and no format, so use default (PKI)
return PKI_PROVIDER
msg = _('keystone.conf [signing] token_format is deprecated in '
'favor of keystone.conf [token] provider')
if CONF.signing.token_format == 'PKI':
LOG.warning(msg)
return PKI_PROVIDER
elif CONF.signing.token_format == 'UUID':
LOG.warning(msg)
return UUID_PROVIDER
else:
raise exception.UnexpectedError(
_('Unrecognized keystone.conf [signing] token_format: '
'expected either \'UUID\' or \'PKI\''))
def __init__(self):
super(Manager, self).__init__(self.get_token_provider())
def validate_token(self, token_id, belongs_to=None):
unique_id = self.token_api.unique_id(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token = self._validate_token(unique_id)
self._token_belongs_to(token, belongs_to)
self._is_valid_token(token)
return token
def validate_v2_token(self, token_id, belongs_to=None):
unique_id = self.token_api.unique_id(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token = self._validate_v2_token(unique_id)
self._token_belongs_to(token, belongs_to)
self._is_valid_token(token)
return token
def validate_v3_token(self, token_id):
unique_id = self.token_api.unique_id(token_id)
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
token = self._validate_v3_token(unique_id)
self._is_valid_token(token)
return token
def check_v2_token(self, token_id, belongs_to=None):
"""Check the validity of the given V2 token.
:param token_id: identity of the token
:param belongs_to: optional identity of the scoped project
:returns: None
:raises: keystone.exception.Unauthorized
"""
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
unique_id = self.token_api.unique_id(token_id)
self.validate_v2_token(unique_id, belongs_to=belongs_to)
def check_v3_token(self, token_id):
"""Check the validity of the given V3 token.
:param token_id: identity of the token
:returns: None
:raises: keystone.exception.Unauthorized
"""
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
unique_id = self.token_api.unique_id(token_id)
self.validate_v3_token(unique_id)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def _validate_token(self, token_id):
return self.driver.validate_token(token_id)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def _validate_v2_token(self, token_id):
return self.driver.validate_v2_token(token_id)
@cache.on_arguments(should_cache_fn=SHOULD_CACHE,
expiration_time=EXPIRATION_TIME)
def _validate_v3_token(self, token_id):
return self.driver.validate_v3_token(token_id)
def _is_valid_token(self, token):
# Verify the token has not expired.
current_time = timeutils.normalize_time(timeutils.utcnow())
try:
# Get the data we need from the correct location (V2 and V3 tokens
# differ in structure, Try V3 first, fall back to V2 second)
token_data = token.get('token', token.get('access'))
expires_at = token_data.get('expires_at',
token_data.get('expires'))
if not expires_at:
expires_at = token_data['token']['expires']
expiry = timeutils.normalize_time(
timeutils.parse_isotime(expires_at))
if current_time < expiry:
# Token is has not expired and has not been revoked.
return None
except Exception:
LOG.exception(_('Unexpected error or malformed token determining '
'token expiry: %s'), token)
# FIXME(morganfainberg): This error message needs to be updated to
# reflect the token couldn't be found, but this change needs to wait
# until Icehouse due to string freeze in Havana. This should be:
# "Failed to find valid token" or something similar.
raise exception.TokenNotFound(_('Failed to validate token'))
def _token_belongs_to(self, token, belongs_to):
"""Check if the token belongs to the right tenant.
This is only used on v2 tokens. The structural validity of the token
will have already been checked before this method is called.
"""
if belongs_to:
token_data = token['access']['token']
if ('tenant' not in token_data or
token_data['tenant']['id'] != belongs_to):
raise exception.Unauthorized()
def invalidate_individual_token_cache(self, token_id):
# NOTE(morganfainberg): invalidate takes the exact same arguments as
# the normal method, this means we need to pass "self" in (which gets
# stripped off).
# FIXME(morganfainberg): Does this cache actually need to be
# invalidated? We maintain a cached revocation list, which should be
# consulted before accepting a token as valid. For now we will
# do the explicit individual token invalidation.
self._validate_token.invalidate(self, token_id)
self._validate_v2_token.invalidate(self, token_id)
self._validate_v3_token.invalidate(self, token_id)
@six.add_metaclass(abc.ABCMeta)
class Provider(object):
"""Interface description for a Token provider."""
@abc.abstractmethod
def get_token_version(self, token_data):
"""Return the version of the given token data.
If the given token data is unrecognizable,
UnsupportedTokenVersionException is raised.
:param token_data: token_data
:type token_data: dict
:returns: token version string
:raises: keystone.token.provider.UnsupportedTokenVersionException
"""
raise exception.NotImplemented()
@abc.abstractmethod
def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
"""Issue a V2 token.
:param token_ref: token data to generate token from
:type token_ref: dict
:param roles_ref: optional roles list
:type roles_ref: dict
:param catalog_ref: optional catalog information
:type catalog_ref: dict
:returns: (token_id, token_data)
"""
raise exception.NotImplemented()
@abc.abstractmethod
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
metadata_ref=None, include_catalog=True):
"""Issue a V3 Token.
:param user_id: identity of the user
:type user_id: string
:param method_names: names of authentication methods
:type method_names: list
:param expires_at: optional time the token will expire
:type expires_at: string
:param project_id: optional project identity
:type project_id: string
:param domain_id: optional domain identity
:type domain_id: string
:param auth_context: optional context from the authorization plugins
:type auth_context: dict
:param metadata_ref: optional metadata reference
:type metadata_ref: dict
:param include_catalog: optional, include the catalog in token data
:type include_catalog: boolean
:returns: (token_id, token_data)
"""
raise exception.NotImplemented()
@abc.abstractmethod
def revoke_token(self, token_id):
"""Revoke a given token.
:param token_id: identity of the token
:type token_id: string
:returns: None.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def validate_token(self, token_id):
"""Detect token version and validate token and return the token data.
Must raise Unauthorized exception if unable to validate token.
:param token_id: identity of the token
:type token_id: string
:returns: token_data
:raises: keystone.exception.TokenNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def validate_v2_token(self, token_id):
"""Validate the given V2 token and return the token data.
Must raise Unauthorized exception if unable to validate token.
:param token_id: identity of the token
:type token_id: string
:returns: token data
:raises: keystone.exception.TokenNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def validate_v3_token(self, token_id):
"""Validate the given V3 token and return the token_data.
:param token_id: identity of the token
:type token_id: string
:returns: token data
:raises: keystone.exception.TokenNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def _get_token_id(self, token_data):
"""Generate the token_id based upon the data in token_data.
:param token_data: token information
:type token_data: dict
returns: token identifier
"""
raise exception.NotImplemented()
|
|
import json
import logging
import os.path
import tornado.ioloop
import tornado.web
import tornado.websocket
from smokematic.baster import Baster
from smokematic.blower import Blower
from smokematic.probe import Probe
from smokematic.controller import Controller
class StatusWebSocket(tornado.websocket.WebSocketHandler):
"""
WebSocket that feeds status data to the remote web client
"""
def open(self):
"""
Sets up the periodic status sending and also sends all data points
collected this execution
"""
self._update_handle = tornado.ioloop.PeriodicCallback(
self.send_update_info,
5000
)
self._update_handle.start()
self.send_full_info()
def on_message(self, message):
"""
Not used as client does not send messages
"""
pass
def send_full_info(self):
"""
Sends all collected data points to initialize the client
"""
controller = self.application.settings['controller']
stat_points = controller.get_stat_history(1)
initial_message_data = {}
for time_offset, data in stat_points.items():
initial_message_data[time_offset] = {
'pit_temp': data.pit_temp,
'food_temp': data.food_temps,
'setpoint': data.setpoint,
'blower_speed': data.blower_speed}
self.write_message({
'type': 'initial',
'data': initial_message_data})
def send_update_info(self):
"""
Gets called periodically to send a data snapshot to the client
"""
self.write_message({
'type': 'update',
'data': {
'pit_temp': self.application.settings['probes']['pit'].get_temp(),
'food_temp': [probe.get_temp() for probe in self.application.settings['probes']['food']],
'setpoint': self.application.settings['controller'].get_setpoint(),
'food_alarms': self.application.settings['food_alarms'],
'blower_speed': self.application.settings['blower'].get_speed()}})
def on_close(self):
"""
Stops the periodic callback
"""
self._update_handle.stop()
class AlarmsHandler(tornado.web.RequestHandler):
"""
RequestHandler that handles all operations related to the food item alarms
"""
def get(self):
"""
Sends the current list of food item alarm setpoints
"""
self.content_type = 'application/json'
self.finish('{}\n'.format(
json.dumps(
{
'status': 'success',
'data': {
'food_alarms': self.application.settings['food_alarms']}})))
def put(self):
"""
Receives and processes a complete list of food item alarm set points
"""
try:
data = json.loads(self.request.body)
food_alarms = data['food_alarms']
numeric_alarms = []
for alarm in food_alarms:
numeric_alarms.append(float(alarm))
if len(numeric_alarms) != len(self.application.settings['food_alarms']):
raise ValueError('Insufficient number of alarms declared')
self.application.settings['food_alarms'] = numeric_alarms
ret_dict = {
'status': 'success',
'data': {'food_alarms': numeric_alarms}}
self.set_status(200)
except KeyError:
ret_dict = {
'status': 'fail',
'data': {'message': 'food_alarms setting must be present'}}
self.set_status(400)
except ValueError:
ret_dict = {
'status': 'fail',
'data': {'message': 'all food_alarms must be present in JSON'}}
self.set_status(400)
self.content_type = 'application/json'
self.finish('{}\n'.format(json.dumps(ret_dict)))
class BasteHandler(tornado.web.RequestHandler):
"""
RequestHandler that handles all operations related to the basting/mopping
"""
def get(self):
"""
Sends the current basting/mopping settings
"""
baster = self.application.settings['baster']
baster_settings = baster.get_settings()
self.content_type = 'application/json'
self.finish('{}\n'.format(
json.dumps(
{
'status': 'success',
'data': {
'frequency': baster_settings[0],
'duration': baster_settings[1]}})))
def put(self):
"""
Receives and processes the basting settings update. Also causes an
immediate baste.
"""
baster = self.application.settings['baster']
try:
data = json.loads(self.request.body)
duration = float(data['duration'])
frequency = float(data['frequency'])
try:
baster.config(frequency, duration)
ret_dict = {
'status': 'success',
'data': {'duration': duration, 'frequency': frequency}}
self.set_status(200)
except ValueError as e:
ret_dict = {
'status': 'fail',
'data': {'message' : str(e)}}
self.set_status(400)
except Exception as e:
ret_dict = {
'status': 'error',
'message': str(e)}
self.set_status(500)
except KeyError:
ret_dict = {
'status': 'fail',
'data': {
'message': 'frequency and duration setting must be present'}}
self.set_status(400)
except ValueError:
ret_dict = {
'status': 'fail',
'data': {
'message': 'frequency and duration setting must be present in JSON'}}
self.set_status(400)
self.content_type = 'application/json'
self.finish('{}\n'.format(json.dumps(ret_dict)))
class OverrideHandler(tornado.web.RequestHandler):
"""
RequestHandler that handles all operations related to the manual
temperature override
"""
def get(self):
"""
Sends the current manual temperature override settings
"""
controller = self.application.settings['controller']
override_status = controller.get_state() == Controller.OVERRIDE
self.content_type = 'application/json'
self.finish('{}\n'.format(
json.dumps(
{
'status': 'success',
'data': {
'override': override_status,
'temperature': controller.get_setpoint() if override_status else None}})))
def put(self):
"""
Receives and processes the manual temperature override update
"""
try:
data = json.loads(self.request.body)
controller = self.application.settings['controller']
temperature = float(data['temperature'])
try:
controller.override_temp(temperature)
ret_dict = {
'status': 'success',
'data': {'temperature': temperature}}
self.set_status(200)
except ValueError as e:
ret_dict = {
'status': 'fail',
'data': {'duration' : str(e)}}
self.set_status(400)
except Exception as e:
ret_dict = {
'status': 'error',
'message': str(e)}
self.set_status(500)
except KeyError:
ret_dict = {
'status': 'fail',
'data': {'duration': 'temperature setting must be present'}}
self.set_status(400)
except ValueError:
ret_dict = {
'status': 'fail',
'data': {
'duration': 'temperature setting must be present in JSON'}}
self.set_status(400)
self.content_type = 'application/json'
self.finish('{}\n'.format(json.dumps(ret_dict)))
def delete(self):
"""
Removes the manual temperature override
"""
controller = self.application.settings['controller']
ret_dict = {}
if controller.get_state() != Controller.OVERRIDE:
ret_dict = {
'status': 'fail',
'data': 'Currently not in override mode'
}
self.set_status(400)
else:
controller.resume_profile()
ret_dict = {
'status': 'success',
'data': 'Cooking profile resumed'
}
self.content_type = 'application/json'
self.finish('{}\n'.format(json.dumps(ret_dict)))
class ProfileHandler(tornado.web.RequestHandler):
"""
RequestHandler that handles all operations related to the cooking profiles
"""
def get(self):
"""
Generates and sends a cooking profile generated off of observed pit
conditions sampled every 5 minutes
"""
controller = self.application.settings['controller']
self.set_header('Content-Type', 'application/octet-stream')
self.set_header(
'Content-Disposition',
'attachment; filename=cooking_profile.json'
)
stat_points = controller.get_stat_history(5)
self.finish('{}\n'.format(json.dumps({k:v.pit_temp for k, v in stat_points.items()})))
def put(self):
"""
Receives and processes a new cooking profile. Clears all previous stored data.
"""
try:
data = json.loads(self.request.body)
controller = self.application.settings['controller']
input_profile = data['profile']
profile = {}
for k, v in input_profile.items():
profile[int(k)] = float(v)
try:
controller.set_profile(profile)
ret_dict = {
'status': 'success',
'data': {'profile': profile}}
self.set_status(200)
except ValueError as e:
ret_dict = {
'status': 'fail',
'data': {'profile' : str(e)}}
self.set_status(400)
except Exception as e:
ret_dict = {
'status': 'error',
'message': str(e)}
self.set_status(500)
except KeyError:
ret_dict = {
'status': 'fail',
'data': {'profile': 'profile setting must be present'}}
self.set_status(400)
except ValueError:
ret_dict = {
'status': 'fail',
'data': {'profile': 'profile setting must be present in JSON'}}
self.set_status(400)
self.content_type = 'application/json'
self.finish('{}\n'.format(json.dumps(ret_dict)))
class PidHandler(tornado.web.RequestHandler):
"""
RequestHandler that handles all operations related to the PID controls
"""
def get(self):
"""
Sends the current PID controller settings
"""
controller = self.application.settings['controller']
coefficients = controller.get_pid_coefficients()
self.content_type = 'application/json'
self.finish('{}\n'.format(
json.dumps(
{
'status': 'success',
'data': {
'coefficients': {
'p': coefficients[0],
'i': coefficients[1],
'd': coefficients[2]}}})))
def put(self):
"""
Receives and processes new PID settings.
"""
try:
data = json.loads(self.request.body)
controller = self.application.settings['controller']
input_coefficients = data['coefficients']
coefficients = {}
for k, v in input_coefficients.items():
coefficients[k] = float(v)
try:
controller.set_pid_coefficients(
coefficients['p'],
coefficients['i'],
coefficients['d'])
ret_dict = {
'status': 'success',
'data': {'coefficients': coefficients}}
self.set_status(200)
except ValueError as e:
ret_dict = {
'status': 'fail',
'data': {'coefficients' : str(e)}}
self.set_status(400)
except Exception as e:
ret_dict = {
'status': 'error',
'message': str(e)}
self.set_status(500)
except KeyError:
ret_dict = {
'status': 'fail',
'data': {
'coefficients': 'coefficients setting must be present with p, i, and d values'}}
self.set_status(400)
except ValueError:
ret_dict = {
'status': 'fail',
'data': {'coefficients': 'coefficients setting must be present in JSON'}}
self.set_status(400)
self.content_type = 'application/json'
self.finish('{}\n'.format(json.dumps(ret_dict)))
def main(config):
"""
Initializes all the Smokematic peripherals and web request handlers
:param config: The configuration dictionary
:type config: dict
"""
logging_mapping = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL
}
logging_level = logging_mapping[config['logging']['level']]
logging.basicConfig(level=logging_level)
# Tornado is a bit chatty on the log so never go to DEBUG
tornado_logger = logging.getLogger('tornado')
tornado_logger.setLevel(max(logging_level, logging.INFO))
current_path = os.path.dirname(__file__)
blower = Blower(config['blower']['pin'])
baster = Baster(config['baster']['pin'])
pit_probe = Probe(
config['pit_probe']['pin'],
config['pit_probe']['sh_a'],
config['pit_probe']['sh_b'],
config['pit_probe']['sh_c']
)
food_probes = []
food_alarms = []
for food_probe in config['food_probes']:
food_probes.append(
Probe(
food_probe['pin'],
food_probe['sh_a'],
food_probe['sh_b'],
food_probe['sh_c']
)
)
food_alarms.append(None)
controller = Controller(
blower,
pit_probe,
*food_probes)
controller.set_pid_coefficients(
config['pid_coefficients']['k_p'],
config['pid_coefficients']['k_i'],
config['pid_coefficients']['k_d'],
)
controller.set_profile({0: config['initial_setpoint']})
application = tornado.web.Application(
[
(r'/status', StatusWebSocket),
(r'/profile', ProfileHandler),
(r'/override', OverrideHandler),
(r'/pid', PidHandler),
(r'/alarms', AlarmsHandler),
(r'/baste', BasteHandler)],
static_path=os.path.join(current_path, 'webgui'),
blower=blower,
baster=baster,
controller=controller,
food_alarms=food_alarms,
probes={'food': food_probes, 'pit': pit_probe})
application.listen(config['server']['port'])
tornado.ioloop.IOLoop.instance().start()
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.core import exceptions
from telemetry.timeline import trace_data
from telemetry.timeline import model
class InspectorNetworkException(Exception):
pass
class InspectorNetworkResponseData(object):
def __init__(self, inspector_network, params, initiator):
"""Creates a new InspectorNetworkResponseData instance.
Args:
inspector_network: InspectorNetwork instance.
params: the 'params' field of the devtools Network.responseReceived event.
initiator: initiator of the request, as gathered from
Network.requestWillBeSent.
"""
self._inspector_network = inspector_network
self._request_id = params['requestId']
self._timestamp = params['timestamp']
self._initiator = initiator
self._response = params['response']
if not self._response:
raise InspectorNetworkException('response must exist')
# Response headers.
headers = self._response['headers']
self._header_map = {}
for k, v in headers.iteritems():
# Camel-case header keys.
self._header_map[k.title()] = v
# Request headers.
self._request_header_map = {}
if 'requestHeaders' in self._response:
# Camel-case header keys.
for k, v in self._response['requestHeaders'].iteritems():
self._request_header_map[k.title()] = v
self._body = None
self._base64_encoded = False
if self._inspector_network:
self._served_from_cache = (
self._inspector_network.HTTPResponseServedFromCache(self._request_id))
else:
self._served_from_cache = False
# Whether constructed from a timeline event.
self._from_event = False
@property
def status(self):
return self._response['status']
@property
def status_text(self):
return self._response['status_text']
@property
def headers(self):
return self._header_map
@property
def request_headers(self):
return self._request_header_map
@property
def timestamp(self):
return self._timestamp
@property
def timing(self):
if 'timing' in self._response:
return self._response['timing']
return None
@property
def url(self):
return self._response['url']
@property
def request_id(self):
return self._request_id
@property
def served_from_cache(self):
return self._served_from_cache
@property
def initiator(self):
return self._initiator
def GetHeader(self, name):
if name in self.headers:
return self.headers[name]
return None
def GetBody(self, timeout=60):
if not self._body and not self._from_event:
self._body, self._base64_encoded = (
self._inspector_network.GetHTTPResponseBody(self._request_id, timeout))
return self._body, self._base64_encoded
def AsTimelineEvent(self):
event = {}
event['type'] = 'HTTPResponse'
event['startTime'] = self.timestamp
# There is no end time. Just return the timestamp instead.
event['endTime'] = self.timestamp
event['requestId'] = self.request_id
event['response'] = self._response
event['body'], event['base64_encoded_body'] = self.GetBody()
event['served_from_cache'] = self.served_from_cache
event['initiator'] = self._initiator
return event
@staticmethod
def FromTimelineEvent(event):
assert event.name == 'HTTPResponse'
params = {}
params['timestamp'] = event.start
params['requestId'] = event.args['requestId']
params['response'] = event.args['response']
recorded = InspectorNetworkResponseData(None, params, None)
# pylint: disable=protected-access
recorded._body = event.args['body']
recorded._base64_encoded = event.args['base64_encoded_body']
recorded._served_from_cache = event.args['served_from_cache']
recorded._initiator = event.args.get('initiator', None)
recorded._from_event = True
return recorded
class InspectorNetwork(object):
def __init__(self, inspector_websocket):
self._inspector_websocket = inspector_websocket
self._http_responses = []
self._served_from_cache = set()
self._timeline_recorder = None
self._initiators = {}
self._finished = {}
def ClearCache(self, timeout=60):
"""Clears the browser's disk and memory cache."""
res = self._inspector_websocket.SyncRequest({
'method': 'Network.canClearBrowserCache'
}, timeout)
assert res['result'], 'Cache clearing is not supported by this browser.'
self._inspector_websocket.SyncRequest({
'method': 'Network.clearBrowserCache'
}, timeout)
def StartMonitoringNetwork(self, timeout=60):
"""Starts monitoring network notifications and recording HTTP responses."""
self.ClearResponseData()
self._inspector_websocket.RegisterDomain(
'Network',
self._OnNetworkNotification)
request = {
'method': 'Network.enable'
}
self._inspector_websocket.SyncRequest(request, timeout)
def StopMonitoringNetwork(self, timeout=60):
"""Stops monitoring network notifications and recording HTTP responses."""
request = {
'method': 'Network.disable'
}
self._inspector_websocket.SyncRequest(request, timeout)
# There may be queued messages that don't appear until the SyncRequest
# happens. Wait to unregister until after sending the disable command.
self._inspector_websocket.UnregisterDomain('Network')
def GetResponseData(self):
"""Returns all recorded HTTP responses."""
return [self._AugmentResponse(rsp) for rsp in self._http_responses]
def ClearResponseData(self):
"""Clears recorded HTTP responses."""
self._http_responses = []
self._served_from_cache.clear()
self._initiators.clear()
def _AugmentResponse(self, response):
"""Augments an InspectorNetworkResponseData for final output.
Join the loadingFinished timing event to the response. This event is
timestamped with epoch seconds. In the response timing object, all timing
aside from requestTime is in millis relative to requestTime, so
loadingFinished is converted to be consistent.
Args:
response: an InspectorNetworkResponseData instance to augment.
Returns:
The same response, modifed as described above.
"""
if response.timing is None:
return response
if response.request_id not in self._finished:
response.timing['loadingFinished'] = -1
else:
delta_ms = 1000 * (self._finished[response.request_id] -
response.timing['requestTime'])
if delta_ms < 0:
delta_ms = -1
response.timing['loadingFinished'] = delta_ms
return response
def _OnNetworkNotification(self, msg):
if msg['method'] == 'Network.requestWillBeSent':
self._ProcessRequestWillBeSent(msg['params'])
if msg['method'] == 'Network.responseReceived':
self._RecordHTTPResponse(msg['params'])
elif msg['method'] == 'Network.requestServedFromCache':
self._served_from_cache.add(msg['params']['requestId'])
elif msg['method'] == 'Network.loadingFinished':
assert msg['params']['requestId'] not in self._finished
self._finished[msg['params']['requestId']] = msg['params']['timestamp']
def _ProcessRequestWillBeSent(self, params):
request_id = params['requestId']
self._initiators[request_id] = params['initiator']
def _RecordHTTPResponse(self, params):
required_fields = ['requestId', 'timestamp', 'response']
for field in required_fields:
if field not in params:
logging.warning('HTTP Response missing required field: %s', field)
return
request_id = params['requestId']
assert request_id in self._initiators
initiator = self._initiators[request_id]
self._http_responses.append(
InspectorNetworkResponseData(self, params, initiator))
def GetHTTPResponseBody(self, request_id, timeout=60):
try:
res = self._inspector_websocket.SyncRequest({
'method': 'Network.getResponseBody',
'params': {
'requestId': request_id,
}
}, timeout)
except exceptions.TimeoutException:
logging.warning('Timeout during fetching body for %s' % request_id)
return None, False
if 'error' in res:
return None, False
return res['result']['body'], res['result']['base64Encoded']
def HTTPResponseServedFromCache(self, request_id):
return request_id and request_id in self._served_from_cache
@property
def timeline_recorder(self):
if not self._timeline_recorder:
self._timeline_recorder = TimelineRecorder(self)
return self._timeline_recorder
class TimelineRecorder(object):
def __init__(self, inspector_network):
self._inspector_network = inspector_network
self._is_recording = False
def Start(self):
assert not self._is_recording, 'Start should only be called once.'
self._is_recording = True
self._inspector_network.StartMonitoringNetwork()
def Stop(self):
if not self._is_recording:
return None
responses = self._inspector_network.GetResponseData()
events = [r.AsTimelineEvent() for r in list(responses)]
self._inspector_network.StopMonitoringNetwork()
self._is_recording = False
if len(events) == 0:
return None
builder = trace_data.TraceDataBuilder()
builder.AddEventsTo(trace_data.INSPECTOR_TRACE_PART, events)
return model.TimelineModel(builder.AsData(), shift_world_to_zero=False)
|
|
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import fixture as config_fixture
from oslo_serialization import jsonutils
from murano.api.v1 import environments
from murano.api.v1 import sessions
from murano.db import models
from murano.db import session as db_session
from murano.services import states
import murano.tests.unit.api.base as tb
from webob import exc
class TestSessionsApi(tb.ControllerTest, tb.MuranoApiTestCase):
def setUp(self):
super(TestSessionsApi, self).setUp()
self.environments_controller = environments.Controller()
self.sessions_controller = sessions.Controller()
self.fixture = self.useFixture(config_fixture.Config())
self.fixture.conf(args=[])
def test_deploy_session(self):
CREDENTIALS = {'tenant': 'test_tenant_1', 'user': 'test_user_1'}
self._set_policy_rules(
{'create_environment': '@'}
)
self.expect_policy_check('create_environment')
request = self._post(
'/environments',
jsonutils.dump_as_bytes({'name': 'test_environment_1'}),
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
ENVIRONMENT_ID = response_body['id']
request = self._post(
'/environments/{environment_id}/configure'
.format(environment_id=ENVIRONMENT_ID),
b'',
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
SESSION_ID = response_body['id']
request = self._post(
'/environments/{environment_id}/sessions/'
'{session_id}/deploy'.format(environment_id=ENVIRONMENT_ID,
session_id=SESSION_ID),
b'',
**CREDENTIALS
)
response = request.get_response(self.api)
self.assertEqual(response.status_code, 200)
request = self._get(
'/environments/{environment_id}/sessions/{session_id}'
.format(environment_id=ENVIRONMENT_ID, session_id=SESSION_ID),
b'',
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
self.assertIn(response_body['state'], [states.SessionState.DEPLOYED,
states.SessionState.DEPLOYING])
def test_cant_deploy_from_another_tenant(self):
"""Test to prevent deployment under another tenant user's creds
If user from one tenant uses session id and environment id
of user from another tenant - he is not able to deploy
the environment.
Bug: #1382026
"""
CREDENTIALS_1 = {'tenant': 'test_tenant_1', 'user': 'test_user_1'}
CREDENTIALS_2 = {'tenant': 'test_tenant_2', 'user': 'test_user_2'}
self._set_policy_rules(
{'create_environment': '@'}
)
self.expect_policy_check('create_environment')
# Create environment for user #1
request = self._post(
'/environments',
jsonutils.dump_as_bytes({'name': 'test_environment_1'}),
**CREDENTIALS_1
)
response_body = jsonutils.loads(request.get_response(self.api).body)
self.assertEqual(CREDENTIALS_1['tenant'],
response_body['tenant_id'])
ENVIRONMENT_ID = response_body['id']
# Create session of user #1
request = self._post(
'/environments/{environment_id}/configure'
.format(environment_id=ENVIRONMENT_ID),
b'',
**CREDENTIALS_1
)
response_body = jsonutils.loads(request.get_response(self.api).body)
SESSION_ID = response_body['id']
# Deploy the environment using environment id and session id of user #1
# by user #2
request = self._post(
'/environments/{environment_id}/sessions/'
'{session_id}/deploy'
.format(environment_id=ENVIRONMENT_ID, session_id=SESSION_ID),
b'',
**CREDENTIALS_2
)
response = request.get_response(self.api)
# Should be forbidden!
self.assertEqual(403, response.status_code)
def test_session_show(self):
CREDENTIALS_1 = {'tenant': 'test_tenant_1', 'user': 'test_user_1'}
CREDENTIALS_2 = {'tenant': 'test_tenant_2', 'user': 'test_user_2'}
self._set_policy_rules(
{'create_environment': '@'}
)
self.expect_policy_check('create_environment')
# Create environment for user #1
request = self._post(
'/environments',
jsonutils.dump_as_bytes({'name': 'test_environment_1'}),
**CREDENTIALS_1
)
response_body = jsonutils.loads(request.get_response(self.api).body)
self.assertEqual(CREDENTIALS_1['tenant'],
response_body['tenant_id'])
ENVIRONMENT_ID = response_body['id']
# Create session of user #1
request = self._post(
'/environments/{environment_id}/configure'
.format(environment_id=ENVIRONMENT_ID),
b'',
**CREDENTIALS_1
)
response_body = jsonutils.loads(request.get_response(self.api).body)
SESSION_ID = response_body['id']
# Show environment with correct credentials
request = self._get(
'/environments/{environment_id}/sessions/{session_id}'
.format(environment_id=ENVIRONMENT_ID, session_id=SESSION_ID),
b'',
**CREDENTIALS_1
)
response_body = jsonutils.loads(request.get_response(self.api).body)
self.assertEqual(SESSION_ID, response_body['id'])
# Show environment with incorrect credentials
request = self._get(
'/environments/{environment_id}/sessions/{session_id}'
.format(environment_id=ENVIRONMENT_ID, session_id=SESSION_ID),
b'',
**CREDENTIALS_2
)
response = request.get_response(self.api)
self.assertEqual(403, response.status_code)
def test_session_delete(self):
CREDENTIALS = {'tenant': 'test_tenant_1', 'user': 'test_user_1'}
self._set_policy_rules(
{'create_environment': '@'}
)
self.expect_policy_check('create_environment')
# Create environment
request = self._post(
'/environments',
jsonutils.dump_as_bytes({'name': 'test_environment_1'}),
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
self.assertEqual(CREDENTIALS['tenant'],
response_body['tenant_id'])
ENVIRONMENT_ID = response_body['id']
# Create session
request = self._post(
'/environments/{environment_id}/configure'
.format(environment_id=ENVIRONMENT_ID),
b'',
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
SESSION_ID = response_body['id']
# Delete session
request = self._delete(
'/environments/{environment_id}/delete/{session_id}'
.format(environment_id=ENVIRONMENT_ID, session_id=SESSION_ID),
b'',
**CREDENTIALS
)
response = self.sessions_controller.delete(
request, ENVIRONMENT_ID, SESSION_ID)
# Make sure the session was deleted
request = self._get(
'/environments/{environment_id}/sessions/{session_id}'
.format(environment_id=ENVIRONMENT_ID, session_id=SESSION_ID),
b'',
**CREDENTIALS
)
response = request.get_response(self.api)
self.assertEqual(404, response.status_code)
unit = db_session.get_session()
session = unit.query(models.Session).get(SESSION_ID)
self.assertIsNone(session)
@mock.patch('murano.db.services.environments.EnvironmentServices.'
'get_status')
def test_configure_handle_exc(self, mock_function):
"""Test whether env status in DEPLOYING, DELETING throws exception."""
CREDENTIALS = {'tenant': 'test_tenant_1', 'user': 'test_user_1'}
self._set_policy_rules(
{'create_environment': '@'}
)
self.expect_policy_check('create_environment')
request = self._post(
'/environments',
jsonutils.dump_as_bytes({'name': 'test_environment_1'}),
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
ENVIRONMENT_ID = response_body['id']
env_statuses = [states.EnvironmentStatus.DEPLOYING,
states.EnvironmentStatus.DELETING]
for env_status in env_statuses:
mock_function.return_value = env_status
request = self._post(
'/environments/{environment_id}/configure'
.format(environment_id=ENVIRONMENT_ID),
b'',
**CREDENTIALS
)
response = request.get_response(self.api)
self.assertEqual(response.status_code, 403)
self.assertEqual(mock_function.call_count, len(env_statuses))
def test_show_handle_exc(self):
"""Test whether invalid user/invalid session throws exception."""
CREDENTIALS = {'tenant': 'test_tenant_1', 'user': 'test_user_1'}
self._set_policy_rules(
{'create_environment': '@'}
)
self.expect_policy_check('create_environment')
request = self._post(
'/environments',
jsonutils.dump_as_bytes({'name': 'test_environment_1'}),
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
ENVIRONMENT_ID = response_body['id']
request = self._post(
'/environments/{environment_id}/configure'
.format(environment_id=ENVIRONMENT_ID),
b'',
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
SESSION_ID = response_body['id']
unit = db_session.get_session()
environment = unit.query(models.Environment).get(ENVIRONMENT_ID)
mock_context = mock.MagicMock(user_id=None,
tenant=environment.tenant_id)
mock_request = mock.MagicMock(context=mock_context)
self.assertRaises(exc.HTTPUnauthorized,
self.sessions_controller.show,
mock_request,
ENVIRONMENT_ID,
SESSION_ID)
with mock.patch('murano.db.services.sessions.SessionServices.'
'validate') as mock_validate:
mock_validate.return_value = False
request = self._get(
'/environments/{environment_id}/sessions/{session_id}'
.format(environment_id=ENVIRONMENT_ID, session_id=SESSION_ID),
b'',
**CREDENTIALS
)
response = request.get_response(self.api)
self.assertEqual(response.status_code, 403)
def test_delete_handle_exc(self):
"""Test whether invalid user/invalid session throws exception."""
CREDENTIALS = {'tenant': 'test_tenant_1', 'user': 'test_user_1'}
self._set_policy_rules(
{'create_environment': '@'}
)
self.expect_policy_check('create_environment')
request = self._post(
'/environments',
jsonutils.dump_as_bytes({'name': 'test_environment_1'}),
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
ENVIRONMENT_ID = response_body['id']
request = self._post(
'/environments/{environment_id}/configure'
.format(environment_id=ENVIRONMENT_ID),
b'',
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
SESSION_ID = response_body['id']
unit = db_session.get_session()
environment = unit.query(models.Environment).get(ENVIRONMENT_ID)
mock_context = mock.MagicMock(user_id=None,
tenant=environment.tenant_id)
mock_request = mock.MagicMock(context=mock_context)
self.assertRaises(exc.HTTPUnauthorized,
self.sessions_controller.delete, mock_request,
ENVIRONMENT_ID, SESSION_ID)
with mock.patch('murano.services.states.SessionState') as mock_state:
unit = db_session.get_session()
session = unit.query(models.Session).get(SESSION_ID)
mock_state.DEPLOYING = session.state
request = self._delete(
'/environments/{environment_id}/delete/{session_id}'
.format(environment_id=ENVIRONMENT_ID, session_id=SESSION_ID),
b'',
**CREDENTIALS
)
self.assertRaises(exc.HTTPForbidden,
self.sessions_controller.delete, request,
ENVIRONMENT_ID, SESSION_ID)
def test_deploy_handle_exc(self):
"""Test whether invalid user/invalid session throws exception."""
CREDENTIALS = {'tenant': 'test_tenant_1', 'user': 'test_user_1'}
self._set_policy_rules(
{'create_environment': '@'}
)
self.expect_policy_check('create_environment')
request = self._post(
'/environments',
jsonutils.dump_as_bytes({'name': 'test_environment_1'}),
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
ENVIRONMENT_ID = response_body['id']
request = self._post(
'/environments/{environment_id}/configure'
.format(environment_id=ENVIRONMENT_ID),
b'',
**CREDENTIALS
)
response_body = jsonutils.loads(request.get_response(self.api).body)
SESSION_ID = response_body['id']
with mock.patch('murano.db.services.sessions.SessionServices.'
'validate') as mock_validate:
mock_validate.return_value = False
request = self._post(
'/environments/{environment_id}/sessions/'
'{session_id}/deploy'.format(environment_id=ENVIRONMENT_ID,
session_id=SESSION_ID),
b'',
**CREDENTIALS
)
self.assertRaises(exc.HTTPForbidden,
self.sessions_controller.deploy, request,
ENVIRONMENT_ID, SESSION_ID)
with mock.patch('murano.db.services.sessions.SessionServices.'
'validate') as mock_validate:
with mock.patch('murano.services.states.SessionState')\
as mock_state:
mock_validate.return_value = True
mock_state.OPENED = 'NOT OPENED STATE'
request = self._post(
'/environments/{environment_id}/deploy/{session_id}'
.format(environment_id=ENVIRONMENT_ID,
session_id=SESSION_ID),
b'',
**CREDENTIALS
)
self.assertRaises(exc.HTTPForbidden,
self.sessions_controller.deploy, request,
ENVIRONMENT_ID, SESSION_ID)
|
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import json
import tempfile
import shutil
from botocore.docs.bcdoc.restdoc import DocumentStructure
from tests import mock
from tests import unittest
from botocore.compat import OrderedDict
from botocore.hooks import HierarchicalEmitter
from botocore.model import ServiceModel, OperationModel
from botocore.client import ClientCreator
from botocore.configprovider import ConfigValueStore
from botocore.loaders import Loader
class BaseDocsTest(unittest.TestCase):
def setUp(self):
self.root_dir = tempfile.mkdtemp()
self.version_dirs = os.path.join(
self.root_dir, 'myservice', '2014-01-01')
os.makedirs(self.version_dirs)
self.model_file = os.path.join(self.version_dirs, 'service-2.json')
self.waiter_model_file = os.path.join(
self.version_dirs, 'waiters-2.json')
self.paginator_model_file = os.path.join(
self.version_dirs, 'paginators-1.json')
self.example_model_file = os.path.join(
self.version_dirs, 'examples-1.json')
self.json_model = {}
self.nested_json_model = {}
self._setup_models()
self.build_models()
self.events = HierarchicalEmitter()
self.setup_client()
self.doc_name = 'MyDoc'
self.doc_structure = DocumentStructure(self.doc_name, target='html')
def tearDown(self):
shutil.rmtree(self.root_dir)
def setup_client(self):
with open(self.example_model_file, 'w') as f:
json.dump(self.example_json_model, f)
with open(self.waiter_model_file, 'w') as f:
json.dump(self.waiter_json_model, f)
with open(self.paginator_model_file, 'w') as f:
json.dump(self.paginator_json_model, f)
with open(self.model_file, 'w') as f:
json.dump(self.json_model, f)
self.loader = Loader(extra_search_paths=[self.root_dir])
endpoint_resolver = mock.Mock()
endpoint_resolver.construct_endpoint.return_value = {
'hostname': 'foo.us-east-1',
'partition': 'aws',
'endpointName': 'us-east-1',
'signatureVersions': ['v4']
}
self.creator = ClientCreator(
loader=self.loader, endpoint_resolver=endpoint_resolver,
user_agent='user-agent', event_emitter=self.events,
retry_handler_factory=mock.Mock(),
retry_config_translator=mock.Mock(),
exceptions_factory=mock.Mock(),
config_store=ConfigValueStore()
)
self.client = self.creator.create_client('myservice', 'us-east-1')
def _setup_models(self):
self.json_model = {
'metadata': {
'apiVersion': '2014-01-01',
'endpointPrefix': 'myservice',
'signatureVersion': 'v4',
'serviceFullName': 'AWS MyService',
'uid': 'myservice-2014-01-01',
'protocol': 'query',
'serviceId': 'MyService',
},
'operations': {
'SampleOperation': {
'name': 'SampleOperation',
'input': {'shape': 'SampleOperationInputOutput'},
'output': {'shape': 'SampleOperationInputOutput'}
}
},
'shapes': {
'SampleOperationInputOutput': {
'type': 'structure',
'members': OrderedDict()
},
'String': {
'type': 'string'
}
},
'documentation': 'AWS MyService Description'
}
self.waiter_json_model = {
"version": 2,
"waiters": {
"SampleOperationComplete": {
"delay": 15,
"operation": "SampleOperation",
"maxAttempts": 40,
"acceptors": [
{"expected": "complete",
"matcher": "pathAll",
"state": "success",
"argument": "Biz"},
{"expected": "failed",
"matcher": "pathAny",
"state": "failure",
"argument": "Biz"}
]
}
}
}
self.paginator_json_model = {
"pagination": {
"SampleOperation": {
"input_token": "NextResult",
"output_token": "NextResult",
"limit_key": "MaxResults",
"result_key": "Biz"
}
}
}
self.example_json_model = {
"version": 1,
"examples": {
"SampleOperation": [{
"id": "sample-id",
"title": "sample-title",
"description": "Sample Description.",
"input": OrderedDict([
("Biz", "foo"),
]),
"comments": {
"input": {
"Biz": "bar"
},
}
}]
}
}
def build_models(self):
self.service_model = ServiceModel(self.json_model)
self.operation_model = OperationModel(
self.json_model['operations']['SampleOperation'],
self.service_model
)
def add_shape(self, shape):
shape_name = list(shape.keys())[0]
self.json_model['shapes'][shape_name] = shape[shape_name]
def add_shape_to_params(self, param_name, shape_name, documentation=None,
is_required=False):
params_shape = self.json_model['shapes']['SampleOperationInputOutput']
member = {'shape': shape_name}
if documentation is not None:
member['documentation'] = documentation
params_shape['members'][param_name] = member
if is_required:
required_list = params_shape.get('required', [])
required_list.append(param_name)
params_shape['required'] = required_list
def add_shape_to_errors(self, shape_name):
operation = self.json_model['operations']['SampleOperation']
errors = operation.get('errors', [])
errors.append({'shape': shape_name})
operation['errors'] = errors
def assert_contains_line(self, line):
contents = self.doc_structure.flush_structure().decode('utf-8')
self.assertIn(line, contents)
def assert_contains_lines_in_order(self, lines):
contents = self.doc_structure.flush_structure().decode('utf-8')
for line in lines:
self.assertIn(line, contents)
beginning = contents.find(line)
contents = contents[(beginning + len(line)):]
def assert_not_contains_line(self, line):
contents = self.doc_structure.flush_structure().decode('utf-8')
self.assertNotIn(line, contents)
def assert_not_contains_lines(self, lines):
contents = self.doc_structure.flush_structure().decode('utf-8')
for line in lines:
self.assertNotIn(line, contents)
|
|
#!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.contrib.admin.utils import NestedObjects
from django.contrib.auth import get_permission_codename
# from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import ValidationError
from django.core.exceptions import ImproperlyConfigured
# from django.core.paginator import EmptyPage
# from django.core.paginator import PageNotAnInteger
# from django.core.paginator import Paginator
from django.core.urlresolvers import reverse as django_reverse
from django.db import router
from django.db.models import Q
from django.forms.fields import CharField
from django.forms.fields import FloatField
from django.forms.fields import DecimalField
from django.forms.models import ModelChoiceField
from django.http import Http404
from django.http import QueryDict
from django.views.generic import CreateView
from django.views.generic import DeleteView
from django.views.generic import DetailView
from django.views.generic import UpdateView
from django.views.generic.base import View
from django.views.generic.base import ContextMixin
from django.views.generic.edit import BaseFormView
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.detail import SingleObjectTemplateResponseMixin
# from django.template.loader import get_template
# from django.template.loader import select_template
# from django.template import TemplateDoesNotExist
from django.utils.encoding import force_text
from django.utils.html import format_html
# from django.utils.timezone import make_aware
# from django.utils.timezone import get_current_timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from .mixins import ModuleSearchMixin
from .mixins import ModuleAjaxMixin
# from .mixins import ModuleFilesMixin
from .mixins import ModuleFormMixin
from .mixins import ReadOnlyMixin
from djangobmf.permissions import AjaxPermission
from djangobmf.permissions import ModuleClonePermission
from djangobmf.permissions import ModuleCreatePermission
from djangobmf.permissions import ModuleDeletePermission
from djangobmf.permissions import ModuleUpdatePermission
from djangobmf.signals import activity_create
from djangobmf.signals import activity_update
# from djangobmf.utils.deprecation import RemovedInNextBMFVersionWarning
# from rest_framework.reverse import reverse
import copy
# import datetime
import logging
import operator
# import types
# import warnings
from functools import reduce
# from django_filters.views import FilterView
logger = logging.getLogger(__name__)
# --- detail, forms and api ---------------------------------------------------
class ModuleDetail(SingleObjectTemplateResponseMixin, ContextMixin, View):
context_object_name = 'object'
template_name_suffix = '_bmfdetail'
default_template = "djangobmf/api/detail-default.html"
model = None
module = None
def get_object(self):
raise ImproperlyConfigured(
"ModuleDetail must be provided with an object or "
"an implementation of 'get_object()'"
)
def get(self, request, object=None, *args, **kwargs):
self.object = object or self.get_object()
context = self.get_context_data(**{
self.context_object_name: self.object,
})
return self.render_to_response(context)
def get_template_names(self):
return super(ModuleDetail, self).get_template_names() + [self.default_template]
class ModuleCloneView(ModuleFormMixin, ModuleAjaxMixin, UpdateView):
"""
clone a object
"""
default_permission_classes = [ModuleClonePermission, AjaxPermission]
context_object_name = 'object'
template_name_suffix = '_bmfclone'
fields = []
def get_template_names(self):
return super(ModuleCloneView, self).get_template_names() \
+ ["djangobmf/module_clone_default.html"]
def clone_object(self, formdata, instance):
pass
def clone_related_objects(self, formdata, old_object, new_object):
pass
def form_object_save(self, form):
self.object = form.save()
def form_valid(self, form):
# messages.success(self.request, 'Object cloned')
old_object = copy.copy(self.object)
self.clone_object(form.cleaned_data, form.instance)
form.instance.pk = None
if form.instance._bmfmeta.workflow:
setattr(
form.instance,
form.instance._bmfmeta.workflow_field_name,
form.instance._bmfmeta.workflow.default
)
form.instance.created_by = self.request.user
form.instance.modified_by = self.request.user
self.form_object_save(form)
self.clone_related_objects(form.cleaned_data, old_object, self.object)
activity_create.send(sender=self.object.__class__, instance=self.object)
return self.render_valid_form({
'object_pk': self.object.pk,
# 'redirect': self.object.bmfmodule_detail(),
'message': True,
'reload': True,
})
class ModuleUpdateView(ModuleFormMixin, ModuleAjaxMixin, ReadOnlyMixin, UpdateView):
"""
"""
permission_classes = [ModuleUpdatePermission, AjaxPermission]
context_object_name = 'object'
template_name_suffix = '_bmfupdate'
exclude = []
def get_template_names(self):
return super(ModuleUpdateView, self).get_template_names() \
+ ["djangobmf/module_update_default.html"]
def form_valid(self, form):
# messages.success(self.request, 'Object updated')
form.instance.modified_by = self.request.user
# TODO: get the values of all observed fields
self.object = form.save()
# TODO: compare the lists of observed fields
# TODO: generate change signal
# return dict([(field, getattr(self, field)) for field in self._bmfmeta.observed_fields])
activity_update.send(sender=self.object.__class__, instance=self.object)
if self.model._bmfmeta.only_related:
return self.render_valid_form({
'object_pk': self.object.pk,
'message': True,
'reload': True,
})
else:
return self.render_valid_form({
'object_pk': self.object.pk,
# 'redirect': self.object.bmfmodule_detail(),
'message': True,
'reload': True,
})
class ModuleCreateView(ModuleFormMixin, ModuleAjaxMixin, ReadOnlyMixin, CreateView):
"""
create a new instance
"""
permission_classes = [ModuleCreatePermission, AjaxPermission]
context_object_name = 'object'
template_name_suffix = '_bmfcreate'
def get_template_names(self):
return super(ModuleCreateView, self).get_template_names() \
+ ["djangobmf/module_create_default.html"]
def form_object_save(self, form):
self.object = form.save()
activity_create.send(sender=self.object.__class__, instance=self.object)
def form_valid(self, form):
# messages.success(self.request, 'Object created')
form.instance.modified_by = self.request.user
form.instance.created_by = self.request.user
self.form_object_save(form)
return self.render_valid_form({
'object_pk': self.object.pk,
'message': True,
'reload': True,
})
class ModuleDeleteView(ModuleAjaxMixin, DeleteView):
"""
delete an instance
"""
permission_classes = [ModuleDeletePermission, AjaxPermission]
context_object_name = 'object'
template_name_suffix = '_bmfdelete'
def get_template_names(self):
return super(ModuleDeleteView, self).get_template_names() \
+ ["djangobmf/module_delete.html"]
def get_deleted_objects(self):
collector = NestedObjects(using=router.db_for_write(self.model))
collector.collect([self.object])
perms_needed = set()
def format_callback(obj):
p = '%s.%s' % (
obj._meta.app_label,
get_permission_codename('delete', obj._meta)
)
if not self.request.user.has_perm(p):
perms_needed.add(obj._meta.verbose_name)
registered = self.request.djangobmf_appconfig.has_module(obj.__class__)
# only show bmf modules
if not registered:
return None
return format_html(
'{0}: {1}',
obj._meta.verbose_name,
obj
)
def format_protected_callback(obj):
# if obj.__class__ in self.request.djangobmf_site.modules and not obj._bmfmeta.only_related:
# return format_html(
# '{0}: <a href="{1}">{2}</a>',
# obj._meta.verbose_name,
# obj.bmfmodule_detail(),
# obj
# )
# else:
return format_html(
'{0}: {1}',
obj._meta.verbose_name,
obj
)
to_delete = collector.nested(format_callback)
protected = [
format_protected_callback(obj) for obj in collector.protected
]
return to_delete, perms_needed, protected
def get_success_url(self):
# TODO redirect to active dashboard
return django_reverse('djangobmf:dashboard', kwargs={
'dashboard': None,
})
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
if self.model._bmfmeta.only_related:
return self.render_valid_form({
'message': ugettext('Object deleted'),
'reload': True,
})
else:
return self.render_valid_form({
'redirect': self.request.GET.get('redirect', success_url),
'message': ugettext('Object deleted'),
})
def clean_list(self, lst):
if not isinstance(lst, (list, tuple)):
return lst
else:
return [x for x in map(self.clean_list, lst) if x]
def get_context_data(self, **kwargs):
context = super(ModuleDeleteView, self).get_context_data(**kwargs)
to_delete, perms_needed, protected = self.get_deleted_objects()
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {
"name": force_text(self.model._meta.verbose_name)
}
else:
title = _("Are you sure?")
context['deleted_objects'] = self.clean_list(to_delete)
context['object_name'] = self.model._meta.verbose_name
context['perms_lacking'] = perms_needed
context['protected'] = protected
context['title'] = title
return context
class ModuleWorkflowView(ModuleAjaxMixin, DetailView):
"""
update the state of a workflow
"""
permission_classes = [AjaxPermission]
context_object_name = 'object'
template_name_suffix = '_bmfworkflow'
def get_template_names(self):
return super(ModuleWorkflowView, self).get_template_names() \
+ ["djangobmf/module_workflow.html"]
def get_permissions(self, perms):
info = self.model._meta.app_label, self.model._meta.model_name
perms.append('%s.change_%s' % info)
perms.append('%s.view_%s' % info)
return super(ModuleWorkflowView, self).get_permissions(perms)
def get(self, request, transition, *args, **kwargs):
self.object = self.get_object()
try:
success_url = self.object._bmfmeta.workflow.transition(transition, self.request.user)
except ValidationError as e:
return self.render_to_response({
'error': e,
})
return self.render_valid_form({
'message': True,
'redirect': success_url,
'reload': not bool(success_url),
})
class ModuleFormAPI(ModuleFormMixin, ModuleAjaxMixin, ModuleSearchMixin, SingleObjectMixin, BaseFormView):
"""
"""
permission_classes = [AjaxPermission]
model = None
queryset = None
form_view = None
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
"""
if hasattr(self, 'object'):
return self.object
# Use a custom queryset if provided; this is required for subclasses
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get('pk', None)
if pk is None:
return None
try:
obj = queryset.get(pk=pk)
except ObjectDoesNotExist:
raise Http404(_("No %(verbose_name)s found matching the query") % {
'verbose_name': queryset.model._meta.verbose_name
})
return obj
def get_field(self, form, auto_id):
"""
Get the field from the auto_id value of this form
needed for ajax-interaction (search)
"""
for field in form:
if field.auto_id == auto_id:
return field
return None
def get_all_fields(self, form):
"""
Get all the fields in this form
needed for ajax-interaction (changed value)
"""
return [field for field in form]
def get_changes(self, form):
"""
needed for ajax calls. return fields, which changed between the validation
"""
# do form validation
valid = form.is_valid()
# also do model clean's, which are usually done, if the model is valid
try:
form.instance.clean()
except ValidationError:
pass
data = []
for field in self.get_all_fields(form):
# input-type fields
val_instance = getattr(field.form.instance, field.name, None)
if isinstance(field.field, (CharField, DecimalField, FloatField)):
if not field.value() and val_instance:
data.append({'field': field.auto_id, 'value': val_instance})
continue
if isinstance(field.field, ModelChoiceField):
try: # inline formsets cause a attribute errors
if val_instance and field.value() != str(val_instance.pk):
data.append({'field': field.auto_id, 'value': val_instance.pk, 'name': str(val_instance)})
except AttributeError:
pass
continue
logger.info("Formatting is missing for %s" % field.field.__class__)
logger.debug("Form (%s) changes: %s" % (
'valid' if valid else 'invalid',
data
))
return valid, data
# Don't react on get requests
def get(self, request, *args, **kwargs):
raise Http404
def post(self, request, *args, **kwargs):
form_class = self.form_view(model=self.model, object=self.get_object()).get_form_class()
data = self.request.POST['form'].encode('ASCII')
form = form_class(
prefix=self.get_prefix(),
data=QueryDict(data),
instance=self.get_object())
if "search" in self.request.GET:
# do form validation to fill form.instance with data
valid = form.is_valid()
field = self.get_field(form, self.request.POST['field'])
if not field:
logger.info("Field %s was not found" % self.request.POST['field'])
raise Http404
qs = field.field.queryset
# use permissions from module
try:
module = self.request.djangobmf_appconfig.get_module(qs.model)
qs = module.permissions().filter_queryset(
qs,
self.request.user,
)
except KeyError:
pass
func = getattr(form.instance, 'get_%s_queryset' % field.name, None)
if func:
qs = func(qs)
if self.request.POST['string']:
for bit in self.normalize_query(self.request.POST['string']):
lookups = [self.construct_search(str(f)) for f in qs.model._bmfmeta.search_fields]
queries = [Q(**{l: bit}) for l in lookups]
qs = qs.filter(reduce(operator.or_, queries))
data = []
for item in qs:
data.append({'pk': item.pk, 'value': str(item)})
return self.render_to_json_response(data)
if "changed" in self.request.GET:
"""
validate one form and compare it to an new form created with the validated instance
"""
valid, data = self.get_changes(form)
return self.render_to_json_response(data)
raise Http404
def get_form_kwargs(self):
kwargs = super(ModuleFormAPI, self).get_form_kwargs()
kwargs.update({
'instance': self.get_object(),
})
return kwargs
|
|
import os
from datetime import datetime
# Django settings for tapiriik project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
ALLOWED_HOSTS = ["tapiriik.com", ".tapiriik.com", "localhost"]
USE_X_FORWARDED_HOST = True
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = 'C:/wamp/www/tapiriik/tapiriik/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
PIPELINE_JS = {
'tapiriik-js': {
'source_filenames': (
'js/jquery.address-1.5.min.js',
'js/tapiriik.js',
),
'output_filename': 'js/tapiriik.min.js',
},
'tapiriik-user-js': {
'source_filenames': (
'js/jstz.min.js',
'js/tapiriik-ng.js',
),
'output_filename': 'js/tapiriik-user.min.js',
}
}
PIPELINE_CSS = {
'tapiriik-css': {
'source_filenames': (
'css/style.css',
),
'output_filename': 'css/style.min.css',
},
}
PIPELINE_DISABLE_WRAPPER = True
# Make this unique, and don't share it with anybody.
# and yes, this is overriden in local_settings.py
SECRET_KEY = 'vag26gs^t+_y0msoemqo%_5gb*th(i!v$l6##bq9tu2ggcsn13'
# In production, webservers must have only the public key
CREDENTIAL_STORAGE_PUBLIC_KEY = b"NotTheRealKeyFYI"
CREDENTIAL_STORAGE_PRIVATE_KEY = None
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'tapiriik.web.startup.Startup',
'tapiriik.web.startup.ServiceWebStartup',
'tapiriik.auth.SessionAuth'
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies" # file-based sessions on windows are terrible
ROOT_URLCONF = 'tapiriik.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tapiriik.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"I:/wamp/www/tapiriik/tapiriik/web/templates",
)
TEMPLATE_CONTEXT_PROCESSORS = (
'tapiriik.web.views.ab_experiment_context',
'tapiriik.web.context_processors.user',
'tapiriik.web.context_processors.config',
'tapiriik.web.context_processors.js_bridge',
'tapiriik.web.context_processors.stats',
'tapiriik.web.context_processors.providers',
'tapiriik.web.context_processors.celebration_mode',
'django.core.context_processors.static',)
INSTALLED_APPS = (
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tapiriik.web',
'pipeline'
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'ERROR',
'class': 'logging.StreamHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'tapiriik.testing.MongoDBTestRunner'
MONGO_HOST = "localhost"
MONGO_REPLICA_SET = None
MONGO_CLIENT_OPTIONS = {}
MONGO_FULL_WRITE_CONCERN = 1
REDIS_HOST = "localhost"
REDIS_CLIENT_OPTIONS = {}
WEB_ROOT = 'http://localhost:8000'
PP_WEBSCR = "https://www.sandbox.paypal.com/cgi-bin/webscr"
PP_BUTTON_ID = "XD6G9Z7VMRM3Q"
PP_RECEIVER_ID = "NR6NTNSRT7NDJ"
PAYMENT_AMOUNT = 2
PAYMENT_SYNC_DAYS = 365.25
PAYMENT_CURRENCY = "USD"
# Celebration mode config
# Because why not, I'm waiting for my account to get to the front of the sync queue.
CELEBRATION_MODES = {
(
datetime(day=21, month=6, year=datetime.now().year, hour=0, minute=0),
datetime(day=21, month=6, year=datetime.now().year, hour=23, minute=59)
): {
"Logo": "tapiriik-inuktitut.png",
"Subtitle": "National Aboriginal Day",
"TitleText": "Our Home on Native Land"
}
}
# Hidden from regular signup
SOFT_LAUNCH_SERVICES = []
# Visibly disabled + excluded from synchronization
DISABLED_SERVICES = []
# Rejected by synchronization worker
REJECTED_SERVICES = []
# Services no longer available - will be removed across the site + excluded from sync.
WITHDRAWN_SERVICES = []
# Where to put per-user sync logs
USER_SYNC_LOGS = "./"
# Set at startup
SITE_VER = "unknown"
# Cache lots of stuff to make local debugging faster
AGGRESSIVE_CACHE = True
# Diagnostics auth, None = no auth
DIAG_AUTH_TOTP_SECRET = DIAG_AUTH_PASSWORD = None
SPORTTRACKS_OPENFIT_ENDPOINT = "https://api.sporttracks.mobi/api/v2"
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = './sent_emails'
WORKER_INDEX = int(os.environ.get("TAPIRIIK_WORKER_INDEX", 0))
# Used for distributing outgoing calls across multiple interfaces
HTTP_SOURCE_ADDR = "0.0.0.0"
RABBITMQ_BROKER_URL = "amqp://guest@localhost//"
RABBITMQ_USER_QUEUE_STATS_URL = "http://guest:guest@localhost:15672/api/queues/%2F/tapiriik-users?lengths_age=3600&lengths_incr=60&msg_rates_age=3600&msg_rates_incr=60"
GARMIN_CONNECT_USER_WATCH_ACCOUNTS = {}
from .local_settings import *
|
|
"""
This software is an implementation of
Deep MRI brain extraction: A 3D convolutional neural network for skull stripping
You can download the paper at http://dx.doi.org/10.1016/j.neuroimage.2016.01.024
If you use this software for your projects please cite:
Kleesiek and Urban et al, Deep MRI brain extraction: A 3D convolutional neural network for skull stripping,
NeuroImage, Volume 129, April 2016, Pages 460-469.
The MIT License (MIT)
Copyright (c) 2016 Gregor Urban, Jens Kleesiek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy
import numpy as np
import time
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
def max_pool_along_second_axis(sym_input, pool_factor):
""" for MLP and 2D conv"""
s = None
for i in range(pool_factor):
t = sym_input[:,i::pool_factor]
if s is None:
s = t
else:
s = T.maximum(s, t)
return s
class PerceptronLayer(object):
def __init__(self, input, n_in, n_out, batchsize, bDropoutEnabled_, ActivationFunction = 'tanh',
InputNoise=None, W=None, input_layer=None, b_experimental_inhibition_groups=False, flatW=False):
"""
Typical hidden layer of a MLP: units are fully-connected.
Weight matrix W is of shape (n_in,n_out), the bias vector b is of shape (n_out,).
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int or tuple/list
:param n_out: number of hidden units
:ActivationFunction: relu,sigmoid,tanh
:InputNoise: theano.shared, float32 range 0 to 1 (0 = no noise)
"""
self.input_layer = input_layer
self.ActivationFunction = ActivationFunction
if np.all(np.prod(n_out)==n_out):
self.output_shape = (batchsize, n_out)
else:
self.output_shape = n_out#(batchsize, n_out)
n_out = np.prod(n_out[1:])
if InputNoise!=None:
self.InputNoise=InputNoise
print("PerceptronLayer::"+str(PerceptronLayer)+"InputNoise =",InputNoise)
rng = numpy.random.RandomState(int(time.time()))
theano_rng = RandomStreams(rng.randint(2 ** 30))
self.input = theano_rng.binomial(size=input.shape, n=1, p=1 - self.InputNoise,dtype=theano.config.floatX) * input
else:
self.input = input
self.InputNoise=None
print("PerceptronLayer( #Inputs =",n_in,"#Outputs =",n_out,")")
if W==None:
W_values = numpy.asarray(
numpy.random.uniform(
-numpy.sqrt(6. / (n_in + n_out)),
numpy.sqrt(6. / (n_in + n_out)),
(n_in, n_out)),
dtype=theano.config.floatX)
if flatW:
self.flatW = theano.shared(value=W_values.flatten(), name='W_perceptron_flat_'+str(n_in)+'.'+str(n_out), borrow=True)
self.W = self.flatW.reshape(W_values.shape)
else:
self.W = theano.shared(value=W_values, name='W_perceptron'+str(n_in)+'.'+str(n_out), borrow=True)
else:
print("Directly using given W (",W,"), not training on it in this layer!") #as this should happen in the other layer where this W came from.
self.W = W
b_values = numpy.asarray(numpy.random.uniform(-1e-8,1e-8,(n_out,)), dtype=theano.config.floatX) #1e-2*numpy.ones((n_out,), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, name='b_perceptron'+str(n_in)+'.'+str(n_out), borrow=True)
self.conv_output = None
lin_output = T.dot(self.input,self.W) # + self.b
self.Activation_noise = None
if bDropoutEnabled_:
print("Dropout...")
self.Activation_noise = theano.shared(np.float32(0.5))
rng = T.shared_randomstreams.RandomStreams(int(time.time()))
#(n_out,)
self.dropout_gate = np.float32(2.)*rng.binomial(lin_output.shape,np.float32( 1.), np.float32(1.)-self.Activation_noise ,dtype=theano.config.floatX) #rng.binomial((n_out,),1,1-self.Activations_noise_min,dtype=theano.config.floatX)
lin_output = lin_output * self.dropout_gate#.dimshuffle(('x', 0)) #( 1 - T.maximum(0.8-self.output, T.zeros_like(self.output))*
lin_output = lin_output + self.b #add b after dropout
if ActivationFunction=='tanh': #range = [-1,1]
self.output = T.tanh(lin_output)# shape: (batch_size, num_outputs)
elif ActivationFunction=='relu' or ActivationFunction=='ReLU': #rectified linear unit ,range = [0,inf]
self.ActivationFunction = 'relu'
self.output = T.maximum(lin_output,T.zeros_like(lin_output)) # 137.524226165 iterations/sec
elif ActivationFunction=='abs': #symmetrically rectified linear unit ,range = [0,inf]
self.output = T.abs_(lin_output)
elif ActivationFunction=='sigmoid': #range = [0,1]
print("WARNING: sig() used! Consider using abs() or relu() instead") # (abs > relu > tanh > sigmoid)
b_values = 0.5*numpy.ones( (n_out,), dtype=theano.config.floatX)
self.b.set_value(b_values)
self.output = T.nnet.sigmoid(lin_output)#1/(1 + T.exp(-lin_output))
elif ActivationFunction=='linear':
if b_experimental_inhibition_groups==0:
print("Warning: linear activation function! I hope this is the output layer?")
self.output = (lin_output)
elif ActivationFunction.startswith("maxout"):
r=int(ActivationFunction.split(" ")[1])
assert r>=2
n_out = n_out/r
self.output = max_pool_along_second_axis(lin_output,r)
else:
raise NotImplementedError("options are: ActivationFunction={tanh, relu, sigmoid,abs}")
self.lin_output=lin_output
self.class_probabilities = T.nnet.softmax(lin_output)# shape: (batch_size, num_outputs), num_outputs being e.g. the number of classes
# compute prediction as class whose probability is maximal (in symbolic form)
self.class_prediction = T.argmax(self.class_probabilities, axis=1)# shape: (batch_size,)
if len(self.output_shape)>2:
self.output = self.output.reshape(self.output_shape)
self.n_in = n_in
if W==None:
try:
a = self.flatW
self.params = [self.flatW, self.b]
except:
self.params = [self.W, self.b]
else:
self.params = [self.b]
def random_sparse_initialization(self, num_nonzero = 15, scale = 1.):
""" exactly <num_nonzero> incoming weights per neuron will have a value of <scale>, the others will have a tiny random value"""
n_in = self.n_in
n_out = self.output_shape[1]
print("MLP::random_sparse_initialization::(num_nonzero =",num_nonzero,", scale =",scale,")")
assert n_in > num_nonzero
w = numpy.asarray(numpy.random.uniform(
-numpy.sqrt(0.1 / (n_in + n_out)),
numpy.sqrt(0.1 / (n_in + n_out)),
(n_in, n_out)), dtype=theano.config.floatX)
base = np.random.permutation(range(n_in))
for i in range(n_out):
pick = np.random.permutation(base)[:num_nonzero]
w[:,i][pick] = scale
self.W.set_value(w)
def randomize_weights(self, scale_w = 1.0):
n_in = self.n_in
n_out = self.output_shape[1]
self.W.set_value(numpy.asarray(numpy.random.uniform(
-numpy.sqrt(scale_w * 6. / (n_in + n_out)),
numpy.sqrt(scale_w * 6. / (n_in + n_out)),
(n_in, n_out)), dtype=theano.config.floatX))
if self.ActivationFunction == 'relu':
b = 1.
elif self.ActivationFunction == 'sigmoid':
b=0.5
else:
b=0
self.b.set_value(b * numpy.ones((n_out,), dtype=theano.config.floatX))
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
return -T.mean(T.log(self.class_probabilities)[T.arange(y.shape[0]), y])
def negative_log_likelihood_modulated_margin(self, y, modulation=1, margin=0.7, penalty_multiplier = 0):
print("negative_log_likelihood_modulated_margin:: Penalty down to ",100.*penalty_multiplier,"% if prediction is close to the target! Threshold is",margin)
penalty_multiplier = np.float32(penalty_multiplier)
margin = np.float32(margin)
selected = self.class_probabilities[T.arange(y.shape[0]),y]
r = modulation*T.log(selected)
return -T.mean(r*(selected<margin) + (0 if penalty_multiplier==0 else penalty_multiplier*r*(selected>=margin)) )
def negative_log_likelihood_array(self, y):
"""Return the negative log-likelihood of the prediction
of this model under a given target distribution for each element of the batch individually.
"""
return -T.log(self.class_probabilities)[T.arange(y.shape[0]), y]
def negative_log_likelihood_weighted(self, y, weight_vect):
"""
weight_vect must be a vector of float32 of length = number_of_classes.
Values: 1.0 (default), w < 1.0 (less important), w > 1.0 (more important class)
"""
return -T.mean( weight_vect.dimshuffle('x',0)*(T.log(self.class_probabilities )[T.arange(y.shape[0]), y]))
def squared_distance(self, Target, Mask = None):
"""Target is the TARGET image (vectorized), -> shape(x) = (batchsize, imgsize**2)
output: scalar float32
mask: vectorized, 1==hole, 0==no_hole (== DOES NOT TRAIN ON NON-HOLES)
"""
if Mask==None:
return T.mean( (self.output - Target)**2 )
else:
print("squared_distance::Masked")
return T.mean( ((self.output - Target)*T.concatenate( (Mask,Mask,Mask),axis=1 ) )**2 ) #assuming RBG input
def squared_distance_array(self, Target, Mask = None):
"""Target is the TARGET image (vectorized), -> shape(x) = (batchsize, imgsize**2)
output: scalar float32
mask: vectorized, 1==hole, 0==no_hole (== DOES NOT TRAIN ON NON-HOLES)
"""
if Mask==None:
return T.mean( (self.output - Target)**2 ,axis=1)
else:
return T.mean( ((self.output - Target)*T.concatenate( (Mask,Mask,Mask),axis=1 ))**2 ,axis=1)#assuming RBG input
def __make_window(self):
print("window is on 32x32, fixed sigma, assuming RGB.")
denom = 29.8
x0= 16
sig = 19
fun = lambda z,x,y: (32/denom* np.exp(-(abs(x - x0))**3/(2*sig**3)))*(32/denom*np.exp(-(abs(y - x0))**3/(2*sig**3)))#, {x, 0, 32}, {y, 0, 32}
return np.fromfunction(fun,(3,32,32))
def cross_entropy(self, Target, Mask = None):#, index, new_shape):
"""Target is the TARGET image (vectorized), -> shape(x) = (batchsize, imgsize**2)
output: scalar float32
"""
if Mask==None:
#XX = window#T.TensorConstant(T.TensorType(theano.config.floatX,[True,False])(),data=window)
return -T.mean( (T.log(self.class_probabilities )*Target + T.log(1.0 - self.class_probabilities)*(1.0-Target)) )# #.reshape(new_shape)[index[0]:index[2],index[1]:index[3]]
else:
print("cross_entropy::Masked, no window")
return -T.mean( (T.log(self.class_probabilities )*Target + T.log(1.0 - self.class_probabilities)*(1.0-Target))*T.concatenate( (Mask,Mask,Mask),axis=1 ) )# #.reshape(new_shape)[index[0]:index[2],index[1]:index[3]]#assuming RBG input
def cross_entropy_array(self, Target, Mask = None):#, index, new_shape):
"""Target is the TARGET image (vectorized), -> shape(x) = (batchsize, imgsize**2)
the output is of length: <batchsize>, Use cross_entropy() to get a scalar output.
"""
if Mask==None:
return -T.mean( (T.log(self.class_probabilities )*Target + T.log(1.0 - self.class_probabilities)*(1.0-Target)) ,axis=1)
else:
return -T.mean( (T.log(self.class_probabilities )*Target + T.log(1.0 - self.class_probabilities)*(1.0-Target) )*T.concatenate( (Mask,Mask,Mask),axis=1 ),axis=1)#assuming RBG input
def errors(self, y):
""" Return a float representing the rel. number of errors in the minibatch (0 to 1=all wrong)
0-1 loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of class_prediction
if y.ndim != self.class_prediction.ndim:
raise TypeError('y should have the same shape as self.class_prediction',
('y', y.type, 'class_prediction', self.class_prediction.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.class_prediction, y), dtype='float32')
else:
raise NotImplementedError()
def CompileAutoencoderTrainingFunction(self, cnn_symbolic_input_x, cnn_symbolic_SGD_LR , b_use_cross_entropy_err=True, mode="sgd"):
"""
using no momentum
cnn_symbolic_input_x = cnn.x
"""
all_params = self.params
xin = self.input_layer
layerz = 1
while xin!=None:
all_params+=xin.params
xin = xin.input_layer
layerz += 1
print("CompileAutoencoderTrainingFunction... ChainLength =",layerz)
TARGET = T.fmatrix('x_raw_input')
if b_use_cross_entropy_err==False:
print("Using squared error (not using cross_entropy): training on output (e.g. sigmoid!) directly instead of softmax")
cost = self.squared_distance(TARGET)
else:
cost = self.cross_entropy(TARGET)
# create a list of gradients for all model parameters
self.output_layer_Gradients = T.grad(cost, all_params)
assert len(all_params)==len(self.output_layer_Gradients)
if mode!="sgd":
RPROP_penalty=0.25
RPORP_gain=0.25
self.RPROP_LRs=[]
self.last_grads=[]
for para in all_params:
self.RPROP_LRs.append(theano.shared( 1e-4*np.ones(para.get_value().shape,dtype=theano.config.floatX) , name=para.name+str('_RPORP') , borrow=0))
self.last_grads.append(theano.shared( np.zeros(para.get_value().shape,dtype=theano.config.floatX) , name=para.name+str('_LG') , borrow=0))
else:
self.last_grads = self.RPROP_LRs = [0]*len(all_params)
self.SGD_updates=[]
for param_i, grad_i, last_grad_i, pLR_i in zip(all_params, self.output_layer_Gradients, self.last_grads, self.RPROP_LRs ):
print("warning: not sgd")
if mode=="sgd":
self.SGD_updates.append((param_i, param_i - cnn_symbolic_SGD_LR * grad_i ))#last_grad_i )) # use if Global_use_unique_LR==1 (1-self.SGD_weight_decay)*param_i
else:
self.SGD_updates.append((pLR_i, T.minimum( T.maximum( pLR_i * ( 1 - np.float32(RPROP_penalty)* ((last_grad_i*grad_i) < -1e-9) + np.float32(RPORP_gain)* ((last_grad_i*grad_i) > 1e-11) ) , 2e-7*T.ones_like(pLR_i) ),8e-3 * T.ones_like(pLR_i)) ))
self.SGD_updates.append((param_i, param_i - pLR_i * grad_i/(T.abs_(grad_i) + 1e-6) ))
self.SGD_updates.append((last_grad_i, grad_i ))
self.train_model_regression = theano.function([cnn_symbolic_input_x, TARGET], cost, updates=self.SGD_updates)# first input: image with holes etc. second input: clean image
self.show_reconstruction = theano.function([cnn_symbolic_input_x], self.output if b_use_cross_entropy_err==False else self.class_probabilities) #input: holed or normal....
return self.train_model_regression, self.show_reconstruction
|
|
from weblib.error import ResponseNotValid
from tests.util import BaseGrabTestCase, build_grab, build_spider
from grab import Grab
from grab.spider import base
from grab.spider import Spider, Task, SpiderMisuseError, NoTaskHandler
from grab.spider.error import SpiderError
class SimpleSpider(Spider):
def task_baz(self, grab, unused_task):
pass
class TestSpiderTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_task_priority(self):
# Automatic random priority
base.RANDOM_TASK_PRIORITY_RANGE = (10, 20)
bot = build_spider(SimpleSpider, priority_mode='random')
bot.setup_queue()
task = Task('baz', url='http://xxx.com')
self.assertEqual(task.priority, None)
bot.add_task(task)
self.assertTrue(10 <= task.priority <= 20)
# Automatic constant priority
base.DEFAULT_TASK_PRIORITY = 33
bot = build_spider(SimpleSpider, priority_mode='const')
bot.setup_queue()
task = Task('baz', url='http://xxx.com')
self.assertEqual(task.priority, None)
bot.add_task(task)
self.assertEqual(33, task.priority)
# Automatic priority does not override explictily setted priority
base.DEFAULT_TASK_PRIORITY = 33
bot = build_spider(SimpleSpider, priority_mode='const')
bot.setup_queue()
task = Task('baz', url='http://xxx.com', priority=1)
self.assertEqual(1, task.priority)
bot.add_task(task)
self.assertEqual(1, task.priority)
self.assertRaises(SpiderMisuseError,
lambda: SimpleSpider(priority_mode='foo'))
def test_task_url(self):
bot = build_spider(SimpleSpider, )
bot.setup_queue()
task = Task('baz', url='http://xxx.com')
self.assertEqual('http://xxx.com', task.url)
bot.add_task(task)
self.assertEqual('http://xxx.com', task.url)
self.assertEqual(None, task.grab_config)
grab = Grab(url='http://yyy.com')
task = Task('baz', grab=grab)
bot.add_task(task)
self.assertEqual('http://yyy.com', task.url)
self.assertEqual('http://yyy.com', task.grab_config['url'])
def test_task_clone(self):
bot = build_spider(SimpleSpider, )
bot.setup_queue()
task = Task('baz', url='http://xxx.com')
bot.add_task(task.clone())
# Pass grab to clone
task = Task('baz', url='http://xxx.com')
grab = Grab()
grab.setup(url='zzz')
bot.add_task(task.clone(grab=grab))
# Pass grab_config to clone
task = Task('baz', url='http://xxx.com')
grab = Grab()
grab.setup(url='zzz')
bot.add_task(task.clone(grab_config=grab.config))
def test_task_clone_with_url_param(self):
task = Task('baz', url='http://example.com/path')
task2 = task.clone(url='http://example.com/new')
self.assertEqual(task2.name, 'baz')
self.assertEqual(task2.url, 'http://example.com/new')
def test_task_useragent(self):
bot = build_spider(SimpleSpider, )
bot.setup_queue()
grab = Grab()
grab.setup(url=self.server.get_url())
grab.setup(user_agent='Foo')
task = Task('baz', grab=grab)
bot.add_task(task.clone())
bot.run()
self.assertEqual(self.server.request['headers']['User-Agent'], 'Foo')
def test_task_nohandler_error(self):
class TestSpider(Spider):
pass
bot = build_spider(TestSpider, )
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url()))
self.assertRaises(NoTaskHandler, bot.run)
def test_task_raw(self):
class TestSpider(Spider):
def task_page(self, grab, unused_task):
self.stat.collect('codes', grab.doc.code)
self.server.response['code'] = 502
bot = build_spider(TestSpider, network_try_limit=1)
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url()))
bot.add_task(Task('page', url=self.server.get_url()))
bot.run()
self.assertEqual(0, len(bot.stat.collections['codes']))
bot = build_spider(TestSpider, network_try_limit=1)
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url(), raw=True))
bot.add_task(Task('page', url=self.server.get_url(), raw=True))
bot.run()
self.assertEqual(2, len(bot.stat.collections['codes']))
def test_task_callback(self):
class TestSpider(Spider):
def task_page(self, unused_grab, unused_task):
self.meta['tokens'].append('0_handler')
class FuncWithState(object):
def __init__(self, tokens):
self.tokens = tokens
def __call__(self, grab, task):
self.tokens.append('1_func')
tokens = []
func = FuncWithState(tokens)
bot = build_spider(TestSpider, )
bot.meta['tokens'] = tokens
bot.setup_queue()
# classic handler
bot.add_task(Task('page', url=self.server.get_url()))
# callback option overried classic handler
bot.add_task(Task('page', url=self.server.get_url(), callback=func))
# callback and null task name
bot.add_task(Task(name=None, url=self.server.get_url(), callback=func))
# callback and default task name
bot.add_task(Task(url=self.server.get_url(), callback=func))
bot.run()
self.assertEqual(['0_handler', '1_func', '1_func', '1_func'],
sorted(tokens))
def test_task_url_and_grab_options(self):
class TestSpider(Spider):
def setup(self):
# pylint: disable=attribute-defined-outside-init
self.done = False
def task_page(self, unused_grab, unused_task):
# pylint: disable=attribute-defined-outside-init
self.done = True
bot = build_spider(TestSpider, )
bot.setup_queue()
grab = Grab()
grab.setup(url=self.server.get_url())
self.assertRaises(SpiderMisuseError, Task,
'page', grab=grab, url=self.server.get_url())
def test_task_invalid_name(self):
self.assertRaises(SpiderMisuseError, Task,
'generator', url='http://example.com')
def test_task_constructor_invalid_args(self):
# no url, no grab, no grab_config
self.assertRaises(SpiderMisuseError, Task, 'foo')
# both url and grab_config
self.assertRaises(SpiderMisuseError, Task, 'foo',
url=1, grab_config=1)
# both grab and grab_config
self.assertRaises(SpiderMisuseError, Task, 'foo',
grab=1, grab_config=1)
def test_task_clone_invalid_args(self):
task = Task('foo', url='http://example.com')
# both url and grab
self.assertRaises(SpiderMisuseError, task.clone,
url=1, grab=1)
# both url and grab_config
self.assertRaises(SpiderMisuseError, task.clone,
url=1, grab_config=1)
# both grab_config and grab
self.assertRaises(SpiderMisuseError, task.clone,
grab=1, grab_config=1)
def test_task_clone_grab_config_and_url(self):
grab = build_grab()
grab.setup(url='http://foo.com/')
task = Task('foo', grab=grab)
task2 = task.clone(url='http://bar.com/')
self.assertEqual(task2.url, 'http://bar.com/')
self.assertEqual(task2.grab_config['url'], 'http://bar.com/')
def test_task_clone_kwargs(self):
grab = build_grab()
grab.setup(url='http://foo.com/')
task = Task('foo', grab=grab, foo=1)
task2 = task.clone(foo=2)
self.assertEqual(2, task2.foo) # pylint: disable=no-member
def test_task_comparison(self):
task1 = Task('foo', url='http://foo.com/', priority=1)
task2 = Task('foo', url='http://foo.com/', priority=2)
task3 = Task('foo', url='http://foo.com/')
# If both tasks have priorities then task are
# compared by their priorities
self.assertTrue(task1 < task2)
# If any of compared tasks does not have priority
# than tasks are equal
self.assertTrue(task1 == task3)
self.assertTrue(task3 == task3)
def test_task_get_fallback_handler(self):
class TestSpider(Spider):
def do_smth(self, task):
pass
def task_bar_fallback(self, task):
pass
task1 = Task('foo', url='http://foo.com/', fallback_name='do_smth')
task2 = Task('bar', url='http://foo.com/')
task3 = Task(url='http://foo.com/')
bot = build_spider(TestSpider, )
self.assertEqual(task1.get_fallback_handler(bot), bot.do_smth)
self.assertEqual(task2.get_fallback_handler(bot),
bot.task_bar_fallback)
self.assertEqual(task3.get_fallback_handler(bot), None)
def test_update_grab_instance(self):
class TestSpider(Spider):
def update_grab_instance(self, grab):
grab.setup(timeout=77)
def task_generator(self):
yield Task('page', url=self.meta['server'].get_url())
yield Task('page', grab=Grab(url=self.meta['server'].get_url(),
timeout=1))
def task_page(self, grab, unused_task):
self.stat.collect('points', grab.config['timeout'])
bot = build_spider(TestSpider, meta={'server': self.server})
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url()))
bot.add_task(Task('page', grab=Grab(url=self.server.get_url(),
timeout=1)))
bot.run()
self.assertEqual(set([77]), set(bot.stat.collections['points']))
def test_create_grab_instance(self):
class TestSpider(Spider):
def create_grab_instance(self, **kwargs):
grab = super(TestSpider, self).create_grab_instance(**kwargs)
grab.setup(timeout=77)
return grab
def task_generator(self):
yield Task('page', url=self.meta['server'].get_url())
yield Task('page', grab=Grab(url=self.meta['server'].get_url(),
timeout=76))
def task_page(self, grab, unused_task):
self.stat.collect('points', grab.config['timeout'])
bot = build_spider(TestSpider, meta={'server': self.server})
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url()))
bot.add_task(Task('page', grab=Grab(url=self.server.get_url(),
timeout=75)))
bot.run()
self.assertEqual(set([77, 76, 75]),
set(bot.stat.collections['points']))
def test_add_task_invalid_url_no_error(self):
class TestSpider(Spider):
pass
bot = build_spider(TestSpider, )
bot.setup_queue()
bot.add_task(Task('page', url='zz://zz'))
self.assertEqual(0, bot.task_queue.size())
bot.add_task(Task('page', url='zz://zz'), raise_error=False)
self.assertEqual(0, bot.task_queue.size())
bot.add_task(Task('page', url='http://example.com/'))
self.assertEqual(1, bot.task_queue.size())
def test_add_task_invalid_url_raise_error(self):
class TestSpider(Spider):
pass
bot = build_spider(TestSpider, )
bot.setup_queue()
self.assertRaises(SpiderError, bot.add_task,
Task('page', url='zz://zz'), raise_error=True)
self.assertEqual(0, bot.task_queue.size())
bot.add_task(Task('page', url='http://example.com/'))
self.assertEqual(1, bot.task_queue.size())
def test_worker_restored(self):
class TestSpider(Spider):
def task_page(self, unused_grab, unused_task):
pass
bot = build_spider(
TestSpider,
parser_requests_per_process=2,
)
bot.setup_queue()
for _ in range(5):
bot.add_task(Task('page', url=self.server.get_url()))
bot.run()
self.assertTrue(bot.stat.counters['parser:worker-restarted'] == 2)
def test_task_clone_post_request(self):
class TestSpider(Spider):
def task_foo(self, unused_grab, task):
if not task.get('fin'):
yield task.clone(fin=True)
bot = build_spider(TestSpider)
bot.setup_queue()
grab = Grab()
grab.setup(url=self.server.get_url(), post={'x': 'y'})
task = Task('foo', grab=grab)
bot.add_task(task)
bot.run()
self.assertEqual('POST', self.server.request['method'])
def test_response_not_valid(self):
class SomeSimpleSpider(Spider):
def task_page(self, unused_grab, unused_task):
self.stat.inc('xxx')
raise ResponseNotValid
bot = build_spider(SomeSimpleSpider)
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url()))
bot.run()
self.assertEqual(bot.task_try_limit, bot.stat.counters['xxx'])
def test_task_clone_without_modification(self):
class TestSpider(Spider):
def task_page(self, grab, unused_task):
grab2 = grab.clone()
yield Task('page2', grab=grab2)
def task_page2(self, grab, task):
pass
bot = build_spider(TestSpider)
bot.setup_queue()
task = Task('page', url=self.server.get_url())
bot.add_task(task)
bot.run()
self.assertEqual(1, bot.stat.counters['spider:task-page'])
self.assertEqual(1, bot.stat.counters['spider:task-page2'])
def test_task_generator_no_yield(self):
class TestSpider(Spider):
def task_page(self, unused_grab, unused_task):
self.stat.inc('foo')
def task_generator(self):
# pylint: disable=using-constant-test
if False:
yield None
bot = build_spider(TestSpider)
bot.setup_queue()
task = Task('page', url=self.server.get_url())
bot.add_task(task)
bot.run()
self.assertEqual(1, bot.stat.counters['foo'])
def test_initial_urls(self):
url = self.server.get_url()
class TestSpider(Spider):
initial_urls = [url]
def task_initial(self, unused_grab, unused_task):
self.stat.inc('foo', 1)
bot = build_spider(TestSpider)
bot.run()
self.assertEqual(1, bot.stat.counters['foo'])
|
|
"""Interface specification for publishing and receiving data in MCL.
This module defines an interface for publishing and receiving data in MCL. This
is done by providing abstract objects for broadcasting and listening for
data. The interface defined by these objects helps insure new interface
implementations will integrate with MCL.
The following abstract objects are defined:
- :class:`~.abstract.Connection`
- :class:`.RawBroadcaster`
- :class:`.RawListener`
For examples of how to use :mod:`.abstract` to integrate a new network
interface into MCL see :mod:`.network.udp`.
.. sectionauthor:: Asher Bender <a.bender@acfr.usyd.edu.au>
.. codeauthor:: Asher Bender <a.bender@acfr.usyd.edu.au>
"""
import abc
import sys
import keyword
import operator
import textwrap
import mcl.event.event
class _ConnectionMeta(type):
"""Meta-class for manufacturing network interface connection objects.
The :class:`._ConnectionMeta` object is a meta-class designed to
manufacture MCL network interface :class:`~.abstract.Connection`
classes. Connection objects behave like namedtuples. The meta-class works
by dynamically adding mandatory and optional attributes to a class
definition at run time if and ONLY if the class inherits from
:class:`~.abstract.Connection`.
Classes that inherit from :class:`~.abstract.Connection` must implement the
attributes `mandatory`, `broadcaster` and `listener` where:
- `mandatory` is a list of strings defining the names of mandatory
message attributes that must be present when instances of the new
:class:`.Message` objects are created. During instantiation the input
list *args is mapped to the attributes defined by `mandatory`. If
`mandatory` is not present, a TypeError will be raised.
- `broadcaster` is a reference to the :class:`.RawBroadcaster` object
associated with the :class:`~.abstract.Connection` object.
- `listener` is a reference to the :class:`.RawListener` object
associated with the :class:`~.abstract.Connection` object.
Classes that inherit from :class:`~.abstract.Connection` can optionally
implement the `optional` attribute where:
- `optional` is a dictionary of optional connection parameters and
their defaults. Keywords represent attribute names and the
corresponding value represents the default value. During
instantiation of the new Connection object, **kwargs is mapped to the
attributes defined by `optional`. Note that `optional` is not
required.
These attributes are used to manufacture an object to contain the
definition. See :class:`~.abstract.Connection` for implementation detail.
Note that classes that do not inherit from :class:`~.abstract.Connection`
will be left unmodified. These are the :class:`~.abstract.Connection`
object and objects which sub-class a sub-class of
:class:`~.abstract.Connection`.
Raises:
TypeError: If the parent class is a :class:`~.abstract.Connection`
object or any of the mandatory or optional attributes are
ill-specified.
ValueError: If any of the mandatory or optional attribute names are
ill-specified.
"""
def __new__(cls, name, bases, dct):
"""Manufacture a network interface connection class.
Manufacture a network interface class for objects inheriting from
:class:`~.abstract.Connection`. This is done by searching the input
dictionary `dct` for the keys `mandatory` and `optional` where:
- `mandatory` is a list of strings defining the names of mandatory
message attributes that must be present when instances of the new
:class:`.Message` objects are created. During instantiation the
input list *args is mapped to the attributes defined by
`mandatory`. If `mandatory` is not present, a TypeError will be
raised.
- `broadcaster` is a reference to the :class:`.RawBroadcaster`
object associated with the :class:`~.abstract.Connection`
object.
- `listener` is a reference to the :class:`.RawListener` object
associated with the :class:`~.abstract.Connection` object.
- `optional` is a dictionary of optional connection parameters and
their defaults. Keywords represent attribute names and the
corresponding value represents the default value. During
instantiation of the new Connection object, the input dictionary
**kwargs is mapped to the attributes defined by
`optional`. `optional` is not required.
A new connection class is manufactured using the definition specified
by the attributes. Note that none of the attribute names can be set to
`mandatory`, `broadcaster` or `listener`.
Args:
cls (class): is the class being instantiated.
name (string): is the name of the new class.
bases (tuple): base classes of the new class.
dct (dict): dictionary mapping the class attribute names to objects.
Returns:
:class:`~.abstract.Connection`: sub-class of
:class:`~.abstract.Connection` with attributes defined by the
original `mandatory` and `optional` attributes.
Raises:
TypeError: If the mandatory or optional attributes are
ill-specified.
ValueError: If any of the mandatory or optional attribute names are
ill-specified.
"""
# NOTE: This code essentially manufactures a 'namedtuple' object using
# code adapted from the python library:
#
# https://docs.python.org/2/library/collections.html#collections.namedtuple
#
# This allows the attributes in the object to be immutable
# (read-only) one created. Note that all of the objects that are
# manufactured also inherit from the Connection() class.
# Do not look for 'mandatory'/'optional' attributes in the Connection()
# base class.
if (name == 'Connection') and (bases == (tuple,)):
return super(_ConnectionMeta, cls).__new__(cls, name, bases, dct)
# Do not look for 'mandatory'/'optional' attributes in sub-classes of
# the Connection() base class.
elif bases != (Connection,):
return super(_ConnectionMeta, cls).__new__(cls, name, bases, dct)
# Objects inheriting from Connection() are required to have a
# 'mandatory' attribute. The 'optional' and 'docstring' are optional.
mandatory = dct.get('mandatory', {})
broadcaster = dct.get('broadcaster', None)
listener = dct.get('listener', None)
optional = dct.get('optional', {})
# Ensure 'mandatory' is a list or tuple of strings.
if (((not isinstance(mandatory, (list, tuple))) or
(not all(isinstance(item, basestring) for item in mandatory)))):
msg = "'mandatory' must be a string or a list/tuple or strings."
raise TypeError(msg)
# Ensure 'broadcaster' is a RawBroadcaster() object.
if not broadcaster or not issubclass(broadcaster, RawBroadcaster):
msg = "'broadcaster' must reference a RawBroadcaster() sub-class."
raise TypeError(msg)
# Ensure 'listener' is a RawListener() object.
if not listener or not issubclass(listener, RawListener):
msg = "'listener' must reference a RawListener() sub-class."
raise TypeError(msg)
# Ensure 'optional' is a list or tuple.
if not isinstance(optional, (dict,)):
msg = "'optional' must be a dictionary."
raise TypeError(msg)
# Ensure all keys in 'optional' are a string.
if not all(isinstance(key, basestring) for key in optional.keys()):
msg = "All keys in 'optional' must be strings."
raise TypeError(msg)
# Add optional fields.
attrs = tuple(list(mandatory) + list(optional.keys()))
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template
# injection attacks.
for attr in (name,) + attrs:
if not all(c.isalnum() or c == '_' for c in attr):
msg = 'Type names and field names can only contain '
msg += 'alphanumeric characters and underscores: %r'
raise ValueError(msg % attr)
if keyword.iskeyword(attr):
msg = 'Type names and field names cannot be a keyword: %r'
raise ValueError(msg % attr)
if attr[0].isdigit():
msg = 'Type names and field names cannot start with a number: '
msg += '%r'
raise ValueError(msg % attr)
# Detect duplicate attribute names.
invalid = ['_mandatory', '_optional', 'broadcaster', 'listener', ]
seen_attr = set()
for attr in attrs:
if attr in invalid:
msg = "Field names cannot be %r." % invalid
raise ValueError(msg)
if attr.startswith('_'):
msg = 'Field names cannot start with an underscore: %r' % attr
raise ValueError(msg)
if attr in seen_attr:
raise ValueError('Encountered duplicate field name: %r' % attr)
seen_attr.add(attr)
# Create 'prototype' for defining a new object.
numfields = len(attrs)
inputtxt = ', '.join(mandatory)
if optional:
for key, value in optional.iteritems():
inputtxt += ", %s=%r" % (key, value)
# Create strings for arguments and printing.
argtxt = repr(attrs).replace("'", "")[1:-1]
reprtxt = ', '.join('%s=%%r' % attr for attr in attrs)
# Create mapping object (key-value pairs).
dicttxt = ['%r: t[%d]' % (n, p) for p, n in enumerate(attrs)]
dicttxt = ', '.join(dicttxt)
def execute_template(template, key, namespace={}):
template = textwrap.dedent(template)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
return namespace[key]
__new__ = execute_template("""
def __new__(cls, %s):
return tuple.__new__(cls, (%s))
""" % (inputtxt, argtxt), '__new__')
_make = execute_template("""
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %d:
msg = 'Expected %d arguments, got %%d' %% len(result)
raise TypeError(msg)
return result
""" % (name, numfields, numfields), '_make')
__repr__ = execute_template("""
def __repr__(self):
return '%s(%s)' %% self
""" % (name, reprtxt), '__repr__')
to_dict = execute_template("""
def to_dict(t):
'Return a new dict which maps field names to their values'
return {%s}
""" % (dicttxt), 'to_dict')
from_dict = execute_template("""
@classmethod
def from_dict(cls, dictionary):
'''Make a new %s object from a dictionary
If optional attributes are not specified, their default values are
used.
'''
# Gather mandatory attributes.
args = list()
for attr in %r:
if attr not in dictionary:
msg = "Expected the attribute: '%%s'." %% attr
raise AttributeError(msg)
else:
args.append(dictionary[attr])
# Gather optional attributes.
for attr, value in cls._optional.iteritems():
if attr in dictionary:
args.append(dictionary[attr])
else:
args.append(value)
return tuple.__new__(cls, tuple(args))
""" % (name, mandatory), 'from_dict')
_replace = execute_template("""
def _replace(self, **kwds):
'Return a new %s object replacing specified fields with new values'
result = self._make(map(kwds.pop, %r, self))
if kwds:
msg = 'Got unexpected field names: %%r' %% kwds.keys()
raise ValueError(msg)
return result
""" % (name, attrs), '_replace')
def __getnewargs__(self): # pragma: no cover
return tuple(self)
# Remove specification.
if 'mandatory' in dct: del dct['mandatory']
if 'optional' in dct: del dct['optional']
# Add methods to class definition.
dct['__slots__'] = ()
dct['_mandatory'] = mandatory
dct['_optional'] = optional
dct['__new__'] = __new__
dct['_make'] = _make
dct['__repr__'] = __repr__
dct['to_dict'] = to_dict
dct['from_dict'] = from_dict
dct['_replace'] = _replace
dct['__getnewargs__'] = __getnewargs__
# Add broadcaster and listener.
dct['broadcaster'] = property(lambda self: broadcaster)
dct['listener'] = property(lambda self: listener)
# Add properties (read-only access).
for i, attr in enumerate(attrs):
dct[attr] = property(operator.itemgetter(i))
# Create object.
obj = super(_ConnectionMeta, cls).__new__(cls, name, bases, dct)
# For pickling to work, the __module__ variable needs to be set to the
# frame where the named tuple is created. Bypass this step in
# enviroments where sys._getframe is not defined (Jython for example).
if hasattr(sys, '_getframe'):
obj.__module__ = sys._getframe(1).f_globals.get('__name__',
'__main__')
return obj
class Connection(tuple):
"""Base class for MCL network interface connection objects.
The :class:`~.abstract.Connection` object provides a base class for
defining MCL network interface connection objects. Classes that inherit
from :class:`~.abstract.Connection` **must** implement the attributes
`mandatory`, `broadcaster` and `listener` where:
- `mandatory` is a list of strings defining the names of mandatory
message attributes that must be present when instances of the new
:class:`.Message` objects are created. During instantiation the input
list \*args is mapped to the attributes defined by `mandatory`. If
`mandatory` is not present, a :exc:`~python:exceptions.TypeError`
will be raised.
- `broadcaster` is a reference to the :class:`.RawBroadcaster` object
associated with the :class:`~.abstract.Connection` object.
- `listener` is a reference to the :class:`.RawListener` object
associated with the :class:`~.abstract.Connection` object.
Classes that inherit from :class:`~.abstract.Connection` can **optionally**
implement the attribute `optional` where:
- `optional` is a dictionary of optional connection parameters and
their defaults. Keywords represent attribute names and the
corresponding value represents the default value. During
instantiation of the new Connection object, \**kwargs is mapped to
the attributes defined by `optional`.
These attributes form the definition of the network interface connection
and allow :class:`~.abstract.Connection` to manufacture a connection class
adhering to the specified definition. None of the attribute names can be
set to `mandatory`, `broadcaster`, `listener` or `optional`.
:class:`~.abstract.Connection` objects behave like
:obj:`python:collections.namedtuple` objects. That is,
:class:`~.abstract.Connection` objects have fields accessible by attribute
lookup as well as being indexable and iterable. However, since
:class:`~.abstract.Connection` objects are tuple-like, the data they
contain is immutable after instantiation.
Example usage:
.. testcode::
from mcl.network.abstract import Connection
from mcl.network.abstract import RawListener
from mcl.network.abstract import RawBroadcaster
# Define new connection object WITH NO optional parameters (abstract
# RawBroadcaster/Listener used for illustration).
class ExampleConnection(Connection):
mandatory = ('A',)
broadcaster = RawBroadcaster
listener = RawListener
# Instantiate connection object.
example = ExampleConnection('A')
print example
# Define new connection object WITH optional parameters.
class ExampleConnection(Connection):
mandatory = ('A',)
optional = {'B': 1, 'C': 2, 'D': 3}
broadcaster = RawBroadcaster
listener = RawListener
# Instantiate connection object.
example = ExampleConnection('A', D=5)
print example
.. testoutput::
:hide:
ExampleConnection(A='A')
ExampleConnection(A='A', C=2, B=1, D=5)
Raises:
TypeError: If the mandatory or optional attributes are ill-specified.
ValueError: If any of the mandatory or optional attribute names are
ill-specified.
"""
__metaclass__ = _ConnectionMeta
class RawBroadcaster(object):
"""Abstract base class for sending data over a network interface.
The :class:`.RawBroadcaster` is an abstract base class designed to provide
a template for objects in the MCL ecosystem which broadcast data over a
network interface. Broadcasters inheriting from this template are likely to
integrate safely with the MCL system.
Args:
connection (:class:`~.abstract.Connection`): Connection object.
topic (str): Default topic associated with the network interface.
Attributes:
connection (:class:`~.abstract.Connection`): Connection object.
topic (str): Default topic associated with the network interface.
is_open (bool): Returns :data:`True` if the network interface is
open. Otherwise returns :data:`False`.
counter (int): Number of broadcasts issued.
Raises:
TypeError: If any of the inputs are ill-specified.
"""
# Ensure abstract methods are redefined in child classes.
__metaclass__ = abc.ABCMeta
def __init__(self, connection, topic=None):
"""Document the __init__ method at the class level."""
# Ensure the connection object is properly specified.
if not isinstance(connection, Connection):
msg = "The argument 'connection' must be an instance of a "
msg += "Connection() subclass."
raise TypeError(msg)
# Broadcasters can only store ONE default topic. Enforce this behaviour
# by only accepting a string.
if topic is not None and not isinstance(topic, basestring):
raise TypeError("The argument 'topic' must be None or a string.")
# Save connection parameters.
self.__connection = connection
self.__topic = topic
@property
def connection(self):
return self.__connection
@property
def topic(self):
return self.__topic
@abc.abstractproperty
def is_open(self):
pass # pragma: no cover
@abc.abstractmethod
def _open(self):
"""Virtual: Open connection to network interface.
Returns:
:class:`bool`: Returns :data:`True` if the network interface was
opened. If the network interface was already opened, the
request is ignored and the method returns :data:`False`.
"""
pass # pragma: no cover
@abc.abstractmethod
def publish(self, data, topic=None):
"""Virtual: Send data over network interface.
Args:
data (obj): Serialisable object to publish over the network
interface.
topic (str): Topic associated with published data. This option will
temporarily override the topic specified during instantiation.
"""
# Ensure topic is a string..
if topic is not None and not isinstance(topic, basestring):
raise TypeError("The argument 'topic' must be None or a string.")
@abc.abstractmethod
def close(self):
"""Virtual: Close connection to network interface.
Returns:
:class:`bool`: Returns :data:`True` if the network interface was
closed. If the network interface was already closed, the
request is ignored and the method returns :data:`False`.
"""
pass # pragma: no cover
class RawListener(mcl.event.event.Event):
"""Abstract base class for receiving data over a network interface.
The :class:`.RawListener` is an abstract base class designed to provide a
template for objects in the MCL ecosystem which listen for data over a
network interface. Listeners inheriting from this template are likely to
integrate safely with the MCL system.
Network data is made available to subscribers by issuing callbacks, when
data arrives, in the following format::
{'topic': str,
'payload': obj()}
where:
- **<topic>** is a string containing the topic associated with the
received data.
- **<payload>** is the received (serialisable) data.
.. note::
:class:`.RawListener` implements the event-based programming paradigm
by inheriting from :class:`.Event`. Data can be issued to callback
functions by calling the RawListener.__trigger__ method. This method
has been removed from the public API to prevent *users* from calling
the method. In concrete implementations of the :class:`.RawListener,
*developers* can call the '__trigger__' method in I/O loops when
network data is available.
Args:
connection (:class:`~.abstract.Connection`): Connection object.
topics (str or list): Topics associated with the network interface
represented as either a string or list of strings.
Attributes:
connection (:class:`~.abstract.Connection`): Connection object.
topics (str or list): Topics associated with the network interface.
is_open (bool): Returns :data:`True` if the network interface is
open. Otherwise returns :data:`False`.
counter (int): Number of broadcasts received.
Raises:
TypeError: If any of the inputs are ill-specified.
"""
def __init__(self, connection, topics=None):
"""Document the __init__ method at the class level."""
# Ensure the connection object is properly specified.
if not isinstance(connection, Connection):
msg = "The argument 'connection' must be a Connection() subclass."
raise TypeError(msg)
# Broadcasters can only store ONE default topic. Enforce this behaviour
# by only accepting a string.
if topics is not None and not isinstance(topics, basestring) and not \
all(isinstance(item, basestring) for item in topics):
msg = "The argument 'topics' must be None, a string or a list of "
msg += "string."
raise TypeError(msg)
# Save connection parameters.
self.__connection = connection
self.__topics = topics
# Initialise Event() object.
super(RawListener, self).__init__()
@property
def connection(self):
return self.__connection
@property
def topics(self):
return self.__topics
@abc.abstractproperty
def is_open(self):
pass # pragma: no cover
@abc.abstractmethod
def _open(self):
"""Virtual: Open connection to network interface.
Returns:
:class:`bool`: Returns :data:`True` if the network interface was
opened. If the network interface was already opened, the
request is ignored and the method returns :data:`False`.
"""
pass # pragma: no cover
@abc.abstractmethod
def close(self):
"""Virtual: Close connection to network interface.
Returns:
:class:`bool`: Returns :data:`True` if the network interface was
closed. If the network interface was already closed, the
request is ignored and the method returns :data:`False`.
"""
pass # pragma: no cover
|
|
"""
This module contains data types used by Scrapy which are not included in the
Python Standard Library.
This module must not depend on any module outside the Standard Library.
"""
import copy
import six
import warnings
from collections import OrderedDict
from scrapy.exceptions import ScrapyDeprecationWarning
class MultiValueDictKeyError(KeyError):
def __init__(self, *args, **kwargs):
warnings.warn(
"scrapy.utils.datatypes.MultiValueDictKeyError is deprecated "
"and will be removed in future releases.",
category=ScrapyDeprecationWarning,
stacklevel=2
)
super(MultiValueDictKeyError, self).__init__(*args, **kwargs)
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
warnings.warn("scrapy.utils.datatypes.MultiValueDict is deprecated "
"and will be removed in future releases.",
category=ScrapyDeprecationWarning,
stacklevel=2)
dict.__init__(self, key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, dict.__repr__(self))
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = dict.__getitem__(self, key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
dict.__setitem__(self, key, [value])
def __copy__(self):
return self.__class__(dict.items(self))
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def get(self, key, default=None):
"Returns the default value if the requested data doesn't exist"
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"Returns an empty list if the requested data doesn't exist"
try:
return dict.__getitem__(self, key)
except KeyError:
return []
def setlist(self, key, list_):
dict.__setitem__(self, key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"Appends an item to the internal list associated with key"
self.setlistdefault(key, [])
dict.__setitem__(self, key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def lists(self):
"Returns a list of (key, list) pairs."
return dict.items(self)
def values(self):
"Returns a list of the last value on every key list."
return [self[key] for key in self.keys()]
def copy(self):
"Returns a copy of this object."
return self.__deepcopy__()
def update(self, *args, **kwargs):
"update() extends rather than replaces existing key lists. Also accepts keyword args."
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in six.iteritems(kwargs):
self.setlistdefault(key, []).append(value)
class SiteNode(object):
"""Class to represent a site node (page, image or any other file)"""
def __init__(self, url):
warnings.warn(
"scrapy.utils.datatypes.SiteNode is deprecated "
"and will be removed in future releases.",
category=ScrapyDeprecationWarning,
stacklevel=2
)
self.url = url
self.itemnames = []
self.children = []
self.parent = None
def add_child(self, node):
self.children.append(node)
node.parent = self
def to_string(self, level=0):
s = "%s%s\n" % (' '*level, self.url)
if self.itemnames:
for n in self.itemnames:
s += "%sScraped: %s\n" % (' '*(level+1), n)
for node in self.children:
s += node.to_string(level+1)
return s
class CaselessDict(dict):
__slots__ = ()
def __init__(self, seq=None):
super(CaselessDict, self).__init__()
if seq:
self.update(seq)
def __getitem__(self, key):
return dict.__getitem__(self, self.normkey(key))
def __setitem__(self, key, value):
dict.__setitem__(self, self.normkey(key), self.normvalue(value))
def __delitem__(self, key):
dict.__delitem__(self, self.normkey(key))
def __contains__(self, key):
return dict.__contains__(self, self.normkey(key))
has_key = __contains__
def __copy__(self):
return self.__class__(self)
copy = __copy__
def normkey(self, key):
"""Method to normalize dictionary key access"""
return key.lower()
def normvalue(self, value):
"""Method to normalize values prior to be setted"""
return value
def get(self, key, def_val=None):
return dict.get(self, self.normkey(key), self.normvalue(def_val))
def setdefault(self, key, def_val=None):
return dict.setdefault(self, self.normkey(key), self.normvalue(def_val))
def update(self, seq):
seq = seq.items() if isinstance(seq, dict) else seq
iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)
super(CaselessDict, self).update(iseq)
@classmethod
def fromkeys(cls, keys, value=None):
return cls((k, value) for k in keys)
def pop(self, key, *args):
return dict.pop(self, self.normkey(key), *args)
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def items(self):
item_list = []
for dict_ in self.dicts:
item_list.extend(dict_.items())
return item_list
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
class LocalCache(OrderedDict):
"""Dictionary with a finite number of keys.
Older items expires first.
"""
def __init__(self, limit=None):
super(LocalCache, self).__init__()
self.limit = limit
def __setitem__(self, key, value):
while len(self) >= self.limit:
self.popitem(last=False)
super(LocalCache, self).__setitem__(key, value)
class SequenceExclude(object):
"""Object to test if an item is NOT within some sequence."""
def __init__(self, seq):
self.seq = seq
def __contains__(self, item):
return item not in self.seq
|
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for decoding protocol buffer primitives.
This code is very similar to encoder.py -- read the docs for that module first.
A "decoder" is a function with the signature:
Decode(buffer, pos, end, message, field_dict)
The arguments are:
buffer: The string containing the encoded message.
pos: The current position in the string.
end: The position in the string where the current message ends. May be
less than len(buffer) if we're reading a sub-message.
message: The message object into which we're parsing.
field_dict: message._fields (avoids a hashtable lookup).
The decoder reads the field and stores it into field_dict, returning the new
buffer position. A decoder for a repeated field may proactively decode all of
the elements of that field, if they appear consecutively.
Note that decoders may throw any of the following:
IndexError: Indicates a truncated message.
struct.error: Unpacking of a fixed-width field failed.
message.DecodeError: Other errors.
Decoders are expected to raise an exception if they are called with pos > end.
This allows callers to be lax about bounds checking: it's fineto read past
"end" as long as you are sure that someone else will notice and throw an
exception later on.
Something up the call stack is expected to catch IndexError and struct.error
and convert them to message.DecodeError.
Decoders are constructed using decoder constructors with the signature:
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
The arguments are:
field_number: The field number of the field we want to decode.
is_repeated: Is the field a repeated field? (bool)
is_packed: Is the field a packed field? (bool)
key: The key to use when looking up the field within field_dict.
(This is actually the FieldDescriptor but nothing in this
file should depend on that.)
new_default: A function which takes a message object as a parameter and
returns a new instance of the default value for this field.
(This is called for repeated fields and sub-messages, when an
instance does not already exist.)
As with encoders, we define a decoder constructor for every type of field.
Then, for every field of every message class we construct an actual decoder.
That decoder goes into a dict indexed by tag, so when we decode a message
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
import sys
import six
_UCS2_MAXUNICODE = 65535
if six.PY3:
long = int
else:
import re # pylint: disable=g-import-not-at-top
_SURROGATE_PATTERN = re.compile(six.u(r'[\ud800-\udfff]'))
from google.protobuf.internal import containers
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
_NAN = _POS_INF * 0
# This is not for optimization, but rather to avoid conflicts with local
# variables named "message".
_DecodeError = message.DecodeError
def _VarintDecoder(mask, result_type):
"""Return an encoder for a basic varint value (does not include tag).
Decoded values will be bitwise-anded with the given mask before being
returned, e.g. to limit them to 32 bits. The returned decoder does not
take the usual "end" parameter -- the caller is expected to do bounds checking
after the fact (often the caller can defer such checking until later). The
decoder returns a (value, new_pos) pair.
"""
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = six.indexbytes(buffer, pos)
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
result = result_type(result)
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
def _SignedVarintDecoder(bits, result_type):
"""Like _VarintDecoder() but decodes signed values."""
signbit = 1 << (bits - 1)
mask = (1 << bits) - 1
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = six.indexbytes(buffer, pos)
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
result = (result ^ signbit) - signbit
result = result_type(result)
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
_DecodeVarint = _VarintDecoder((1 << 64) - 1, long)
_DecodeSignedVarint = _SignedVarintDecoder(64, long)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int)
_DecodeSignedVarint32 = _SignedVarintDecoder(32, int)
def ReadTag(buffer, pos):
"""Read a tag from the memoryview, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
Args:
buffer: memoryview object of the encoded bytes
pos: int of the current position to start from
Returns:
Tuple[bytes, int] of the tag data and new position.
"""
start = pos
while six.indexbytes(buffer, pos) & 0x80:
pos += 1
pos += 1
tag_bytes = buffer[start:pos].tobytes()
return tag_bytes, pos
# --------------------------------------------------------------------
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
"""Decode serialized float to a float and new position.
Args:
buffer: memoryview of the serialized bytes
pos: int, position in the memory view to start at.
Returns:
Tuple[float, int] of the deserialized float value and new position
in the serialized data.
"""
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos].tobytes()
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'):
# If at least one significand bit is set...
if float_bytes[0:3] != b'\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3:4] == b'\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
"""Decode serialized double to a double and new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
Returns:
Tuple[float, int] of the decoded double value and new position
in the serialized data.
"""
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos].tobytes()
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7:8] in b'\x7F\xFF')
and (double_bytes[6:7] >= b'\xF0')
and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
def EnumDecoder(field_number, is_repeated, is_packed, key, new_default):
enum_type = key.enum_type
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
"""Decode serialized packed enum to its value and a new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
value_start_pos = pos
(element, pos) = _DecodeSignedVarint32(buffer, pos)
# pylint: disable=protected-access
if element in enum_type.values_by_number:
value.append(element)
else:
if not message._unknown_fields:
message._unknown_fields = []
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_VARINT)
message._unknown_fields.append(
(tag_bytes, buffer[value_start_pos:pos].tobytes()))
# pylint: enable=protected-access
if pos > endpoint:
if element in enum_type.values_by_number:
del value[-1] # Discard corrupt value.
else:
del message._unknown_fields[-1]
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
"""Decode serialized repeated enum to its value and a new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = _DecodeSignedVarint32(buffer, pos)
# pylint: disable=protected-access
if element in enum_type.values_by_number:
value.append(element)
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append(
(tag_bytes, buffer[pos:new_pos].tobytes()))
# pylint: enable=protected-access
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
"""Decode serialized repeated enum to its value and a new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
value_start_pos = pos
(enum_value, pos) = _DecodeSignedVarint32(buffer, pos)
if pos > end:
raise _DecodeError('Truncated message.')
# pylint: disable=protected-access
if enum_value in enum_type.values_by_number:
field_dict[key] = enum_value
else:
if not message._unknown_fields:
message._unknown_fields = []
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_VARINT)
message._unknown_fields.append(
(tag_bytes, buffer[value_start_pos:pos].tobytes()))
# pylint: enable=protected-access
return pos
return DecodeField
# --------------------------------------------------------------------
Int32Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
Int64Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
SInt32Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
SInt64Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatDecoder = _FloatDecoder()
DoubleDecoder = _DoubleDecoder()
BoolDecoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
def StringDecoder(field_number, is_repeated, is_packed, key, new_default,
is_strict_utf8=False):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = six.text_type
def _ConvertToUnicode(memview):
"""Convert byte to unicode."""
byte_str = memview.tobytes()
try:
value = local_unicode(byte_str, 'utf-8')
except UnicodeDecodeError as e:
# add more information to the error message and re-raise it.
e.reason = '%s in field: %s' % (e, key.full_name)
raise
if is_strict_utf8 and six.PY2 and sys.maxunicode > _UCS2_MAXUNICODE:
# Only do the check for python2 ucs4 when is_strict_utf8 enabled
if _SURROGATE_PATTERN.search(value):
reason = ('String field %s contains invalid UTF-8 data when parsing'
'a protocol buffer: surrogates not allowed. Use'
'the bytes type if you intend to send raw bytes.') % (
key.full_name)
raise message.DecodeError(reason)
return value
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(_ConvertToUnicode(buffer[pos:new_pos]))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos])
return new_pos
return DecodeField
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos].tobytes())
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos].tobytes()
return new_pos
return DecodeField
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a message field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return new_pos
return DecodeField
# --------------------------------------------------------------------
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
def MessageSetItemDecoder(descriptor):
"""Returns a decoder for a MessageSet item.
The parameter is the message Descriptor.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
"""Decode serialized message set to its value and new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = message.Extensions._FindExtensionByNumber(type_id)
# pylint: disable=protected-access
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append(
(MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos].tobytes()))
# pylint: enable=protected-access
return pos
return DecodeItem
# --------------------------------------------------------------------
def MapDecoder(field_descriptor, new_default, is_message_map):
"""Returns a decoder for a map field."""
key = field_descriptor
tag_bytes = encoder.TagBytes(field_descriptor.number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
local_DecodeVarint = _DecodeVarint
# Can't read _concrete_class yet; might not be initialized.
message_type = field_descriptor.message_type
def DecodeMap(buffer, pos, end, message, field_dict):
submsg = message_type._concrete_class()
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
submsg.Clear()
if submsg._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
if is_message_map:
value[submsg.key].MergeFrom(submsg.value)
else:
value[submsg.key] = submsg.value
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeMap
# --------------------------------------------------------------------
# Optimization is not as heavy here because calls to SkipField() are rare,
# except for handling end-group tags.
def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
# Previously ord(buffer[pos]) raised IndexError when pos is out of range.
# With this code, ord(b'') raises TypeError. Both are handled in
# python_message.py to generate a 'Truncated message' error.
while ord(buffer[pos:pos+1].tobytes()) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipFixed64(buffer, pos, end):
"""Skip a fixed64 value. Returns the new position."""
pos += 8
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _DecodeFixed64(buffer, pos):
"""Decode a fixed64."""
new_pos = pos + 8
return (struct.unpack('<Q', buffer[pos:new_pos])[0], new_pos)
def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
def _DecodeUnknownFieldSet(buffer, pos, end_pos=None):
"""Decode UnknownFieldSet. Returns the UnknownFieldSet and new position."""
unknown_field_set = containers.UnknownFieldSet()
while end_pos is None or pos < end_pos:
(tag_bytes, pos) = ReadTag(buffer, pos)
(tag, _) = _DecodeVarint(tag_bytes, 0)
field_number, wire_type = wire_format.UnpackTag(tag)
if wire_type == wire_format.WIRETYPE_END_GROUP:
break
(data, pos) = _DecodeUnknownField(buffer, pos, wire_type)
# pylint: disable=protected-access
unknown_field_set._add(field_number, wire_type, data)
return (unknown_field_set, pos)
def _DecodeUnknownField(buffer, pos, wire_type):
"""Decode a unknown field. Returns the UnknownField and new position."""
if wire_type == wire_format.WIRETYPE_VARINT:
(data, pos) = _DecodeVarint(buffer, pos)
elif wire_type == wire_format.WIRETYPE_FIXED64:
(data, pos) = _DecodeFixed64(buffer, pos)
elif wire_type == wire_format.WIRETYPE_FIXED32:
(data, pos) = _DecodeFixed32(buffer, pos)
elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED:
(size, pos) = _DecodeVarint(buffer, pos)
data = buffer[pos:pos+size]
pos += size
elif wire_type == wire_format.WIRETYPE_START_GROUP:
(data, pos) = _DecodeUnknownFieldSet(buffer, pos)
elif wire_type == wire_format.WIRETYPE_END_GROUP:
return (0, -1)
else:
raise _DecodeError('Wrong wire type in tag.')
return (data, pos)
def _EndGroup(buffer, pos, end):
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
return -1
def _SkipFixed32(buffer, pos, end):
"""Skip a fixed32 value. Returns the new position."""
pos += 4
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _DecodeFixed32(buffer, pos):
"""Decode a fixed32."""
new_pos = pos + 4
return (struct.unpack('<I', buffer[pos:new_pos])[0], new_pos)
def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.')
def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = ord(tag_bytes[0:1]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField
SkipField = _FieldSkipper()
|
|
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Classes to handle advanced configuration in simple to complex applications.
Allows to load the configuration from a file or from command line
options, to generate a sample configuration file or to display
program's usage. Fills the gap between optik/optparse and ConfigParser
by adding data types (which are also available as a standalone optik
extension in the `optik_ext` module).
Quick start: simplest usage
---------------------------
.. python ::
>>> import sys
>>> from logilab.common.configuration import Configuration
>>> options = [('dothis', {'type':'yn', 'default': True, 'metavar': '<y or n>'}),
... ('value', {'type': 'string', 'metavar': '<string>'}),
... ('multiple', {'type': 'csv', 'default': ('yop',),
... 'metavar': '<comma separated values>',
... 'help': 'you can also document the option'}),
... ('number', {'type': 'int', 'default':2, 'metavar':'<int>'}),
... ]
>>> config = Configuration(options=options, name='My config')
>>> print config['dothis']
True
>>> print config['value']
None
>>> print config['multiple']
('yop',)
>>> print config['number']
2
>>> print config.help()
Usage: [options]
Options:
-h, --help show this help message and exit
--dothis=<y or n>
--value=<string>
--multiple=<comma separated values>
you can also document the option [current: none]
--number=<int>
>>> f = open('myconfig.ini', 'w')
>>> f.write('''[MY CONFIG]
... number = 3
... dothis = no
... multiple = 1,2,3
... ''')
>>> f.close()
>>> config.load_file_configuration('myconfig.ini')
>>> print config['dothis']
False
>>> print config['value']
None
>>> print config['multiple']
['1', '2', '3']
>>> print config['number']
3
>>> sys.argv = ['mon prog', '--value', 'bacon', '--multiple', '4,5,6',
... 'nonoptionargument']
>>> print config.load_command_line_configuration()
['nonoptionargument']
>>> print config['value']
bacon
>>> config.generate_config()
# class for simple configurations which don't need the
# manager / providers model and prefer delegation to inheritance
#
# configuration values are accessible through a dict like interface
#
[MY CONFIG]
dothis=no
value=bacon
# you can also document the option
multiple=4,5,6
number=3
>>>
"""
__docformat__ = "restructuredtext en"
__all__ = ('OptionsManagerMixIn', 'OptionsProviderMixIn',
'ConfigurationMixIn', 'Configuration',
'OptionsManager2ConfigurationAdapter')
import os
import sys
import re
from os.path import exists, expanduser
from copy import copy
from ConfigParser import ConfigParser, NoOptionError, NoSectionError, \
DuplicateSectionError
from warnings import warn
from logilab.common.compat import callable, raw_input, str_encode as _encode
from logilab.common.textutils import normalize_text, unquote
from logilab.common import optik_ext as optparse
OptionError = optparse.OptionError
REQUIRED = []
class UnsupportedAction(Exception):
"""raised by set_option when it doesn't know what to do for an action"""
def _get_encoding(encoding, stream):
encoding = encoding or getattr(stream, 'encoding', None)
if not encoding:
import locale
encoding = locale.getpreferredencoding()
return encoding
# validation functions ########################################################
def choice_validator(optdict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
if not value in optdict['choices']:
msg = "option %s: invalid value: %r, should be in %s"
raise optparse.OptionValueError(msg % (name, value, optdict['choices']))
return value
def multiple_choice_validator(optdict, name, value):
"""validate and return a converted value for option of type 'choice'
"""
choices = optdict['choices']
values = optparse.check_csv(None, name, value)
for value in values:
if not value in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise optparse.OptionValueError(msg % (name, value, choices))
return values
def csv_validator(optdict, name, value):
"""validate and return a converted value for option of type 'csv'
"""
return optparse.check_csv(None, name, value)
def yn_validator(optdict, name, value):
"""validate and return a converted value for option of type 'yn'
"""
return optparse.check_yn(None, name, value)
def named_validator(optdict, name, value):
"""validate and return a converted value for option of type 'named'
"""
return optparse.check_named(None, name, value)
def file_validator(optdict, name, value):
"""validate and return a filepath for option of type 'file'"""
return optparse.check_file(None, name, value)
def color_validator(optdict, name, value):
"""validate and return a valid color for option of type 'color'"""
return optparse.check_color(None, name, value)
def password_validator(optdict, name, value):
"""validate and return a string for option of type 'password'"""
return optparse.check_password(None, name, value)
def date_validator(optdict, name, value):
"""validate and return a mx DateTime object for option of type 'date'"""
return optparse.check_date(None, name, value)
def time_validator(optdict, name, value):
"""validate and return a time object for option of type 'time'"""
return optparse.check_time(None, name, value)
def bytes_validator(optdict, name, value):
"""validate and return an integer for option of type 'bytes'"""
return optparse.check_bytes(None, name, value)
VALIDATORS = {'string': unquote,
'int': int,
'float': float,
'file': file_validator,
'font': unquote,
'color': color_validator,
'regexp': re.compile,
'csv': csv_validator,
'yn': yn_validator,
'bool': yn_validator,
'named': named_validator,
'password': password_validator,
'date': date_validator,
'time': time_validator,
'bytes': bytes_validator,
'choice': choice_validator,
'multiple_choice': multiple_choice_validator,
}
def _call_validator(opttype, optdict, option, value):
if opttype not in VALIDATORS:
raise Exception('Unsupported type "%s"' % opttype)
try:
return VALIDATORS[opttype](optdict, option, value)
except TypeError:
try:
return VALIDATORS[opttype](value)
except optparse.OptionValueError:
raise
except:
raise optparse.OptionValueError('%s value (%r) should be of type %s' %
(option, value, opttype))
# user input functions ########################################################
def input_password(optdict, question='password:'):
from getpass import getpass
while True:
value = getpass(question)
value2 = getpass('confirm: ')
if value == value2:
return value
print 'password mismatch, try again'
def input_string(optdict, question):
value = raw_input(question).strip()
return value or None
def _make_input_function(opttype):
def input_validator(optdict, question):
while True:
value = raw_input(question)
if not value.strip():
return None
try:
return _call_validator(opttype, optdict, None, value)
except optparse.OptionValueError, ex:
msg = str(ex).split(':', 1)[-1].strip()
print 'bad value: %s' % msg
return input_validator
INPUT_FUNCTIONS = {
'string': input_string,
'password': input_password,
}
for opttype in VALIDATORS.keys():
INPUT_FUNCTIONS.setdefault(opttype, _make_input_function(opttype))
def expand_default(self, option):
"""monkey patch OptionParser.expand_default since we have a particular
way to handle defaults to avoid overriding values in the configuration
file
"""
if self.parser is None or not self.default_tag:
return option.help
optname = option._long_opts[0][2:]
try:
provider = self.parser.options_manager._all_options[optname]
except KeyError:
value = None
else:
optdict = provider.get_option_def(optname)
optname = provider.option_name(optname, optdict)
value = getattr(provider.config, optname, optdict)
value = format_option_value(optdict, value)
if value is optparse.NO_DEFAULT or not value:
value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(value))
def convert(value, optdict, name=''):
"""return a validated value for an option according to its type
optional argument name is only used for error message formatting
"""
try:
_type = optdict['type']
except KeyError:
# FIXME
return value
return _call_validator(_type, optdict, name, value)
def comment(string):
"""return string as a comment"""
lines = [line.strip() for line in string.splitlines()]
return '# ' + ('%s# ' % os.linesep).join(lines)
def format_time(value):
if not value:
return '0'
if value != int(value):
return '%.2fs' % value
value = int(value)
nbmin, nbsec = divmod(value, 60)
if nbsec:
return '%ss' % value
nbhour, nbmin_ = divmod(nbmin, 60)
if nbmin_:
return '%smin' % nbmin
nbday, nbhour_ = divmod(nbhour, 24)
if nbhour_:
return '%sh' % nbhour
return '%sd' % nbday
def format_bytes(value):
if not value:
return '0'
if value != int(value):
return '%.2fB' % value
value = int(value)
prevunit = 'B'
for unit in ('KB', 'MB', 'GB', 'TB'):
next, remain = divmod(value, 1024)
if remain:
return '%s%s' % (value, prevunit)
prevunit = unit
value = next
return '%s%s' % (value, unit)
def format_option_value(optdict, value):
"""return the user input's value from a 'compiled' value"""
if isinstance(value, (list, tuple)):
value = ','.join(value)
elif isinstance(value, dict):
value = ','.join(['%s:%s' % (k, v) for k, v in value.items()])
elif hasattr(value, 'match'): # optdict.get('type') == 'regexp'
# compiled regexp
value = value.pattern
elif optdict.get('type') == 'yn':
value = value and 'yes' or 'no'
elif isinstance(value, (str, unicode)) and value.isspace():
value = "'%s'" % value
elif optdict.get('type') == 'time' and isinstance(value, (float, int, long)):
value = format_time(value)
elif optdict.get('type') == 'bytes' and hasattr(value, '__int__'):
value = format_bytes(value)
return value
def ini_format_section(stream, section, options, encoding=None, doc=None):
"""format an options section using the INI format"""
encoding = _get_encoding(encoding, stream)
if doc:
print >> stream, _encode(comment(doc), encoding)
print >> stream, '[%s]' % section
ini_format(stream, options, encoding)
def ini_format(stream, options, encoding):
"""format options using the INI format"""
for optname, optdict, value in options:
value = format_option_value(optdict, value)
help = optdict.get('help')
if help:
help = normalize_text(help, line_len=79, indent='# ')
print >> stream
print >> stream, _encode(help, encoding)
else:
print >> stream
if value is None:
print >> stream, '#%s=' % optname
else:
value = _encode(value, encoding).strip()
print >> stream, '%s=%s' % (optname, value)
format_section = ini_format_section
def rest_format_section(stream, section, options, encoding=None, doc=None):
"""format an options section using the INI format"""
encoding = _get_encoding(encoding, stream)
if section:
print >> stream, '%s\n%s' % (section, "'"*len(section))
if doc:
print >> stream, _encode(normalize_text(doc, line_len=79, indent=''),
encoding)
print >> stream
for optname, optdict, value in options:
help = optdict.get('help')
print >> stream, ':%s:' % optname
if help:
help = normalize_text(help, line_len=79, indent=' ')
print >> stream, _encode(help, encoding)
if value:
value = _encode(format_option_value(optdict, value), encoding)
print >> stream, ''
print >> stream, ' Default: ``%s``' % value.replace("`` ", "```` ``")
class OptionsManagerMixIn(object):
"""MixIn to handle a configuration from both a configuration file and
command line options
"""
def __init__(self, usage, config_file=None, version=None, quiet=0):
self.config_file = config_file
self.reset_parsers(usage, version=version)
# list of registered options providers
self.options_providers = []
# dictionary associating option name to checker
self._all_options = {}
self._short_options = {}
self._nocallback_options = {}
self._mygroups = dict()
# verbosity
self.quiet = quiet
self._maxlevel = 0
def reset_parsers(self, usage='', version=None):
# configuration file parser
self.cfgfile_parser = ConfigParser()
# command line parser
self.cmdline_parser = optparse.OptionParser(usage=usage, version=version)
self.cmdline_parser.options_manager = self
self._optik_option_attrs = set(self.cmdline_parser.option_class.ATTRS)
def register_options_provider(self, provider, own_group=True):
"""register an options provider"""
assert provider.priority <= 0, "provider's priority can't be >= 0"
for i in range(len(self.options_providers)):
if provider.priority > self.options_providers[i].priority:
self.options_providers.insert(i, provider)
break
else:
self.options_providers.append(provider)
non_group_spec_options = [option for option in provider.options
if 'group' not in option[1]]
groups = getattr(provider, 'option_groups', ())
if own_group and non_group_spec_options:
self.add_option_group(provider.name.upper(), provider.__doc__,
non_group_spec_options, provider)
else:
for opt, optdict in non_group_spec_options:
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
for gname, gdoc in groups:
gname = gname.upper()
goptions = [option for option in provider.options
if option[1].get('group', '').upper() == gname]
self.add_option_group(gname, gdoc, goptions, provider)
def add_option_group(self, group_name, doc, options, provider):
"""add an option group including the listed options
"""
assert options
# add option group to the command line parser
if group_name in self._mygroups:
group = self._mygroups[group_name]
else:
group = optparse.OptionGroup(self.cmdline_parser,
title=group_name.capitalize())
self.cmdline_parser.add_option_group(group)
group.level = provider.level
self._mygroups[group_name] = group
# add section to the config file
if group_name != "DEFAULT":
self.cfgfile_parser.add_section(group_name)
# add provider's specific options
for opt, optdict in options:
self.add_optik_option(provider, group, opt, optdict)
def add_optik_option(self, provider, optikcontainer, opt, optdict):
if 'inputlevel' in optdict:
warn('[0.50] "inputlevel" in option dictionary for %s is deprecated,'
' use "level"' % opt, DeprecationWarning)
optdict['level'] = optdict.pop('inputlevel')
args, optdict = self.optik_option(provider, opt, optdict)
option = optikcontainer.add_option(*args, **optdict)
self._all_options[opt] = provider
self._maxlevel = max(self._maxlevel, option.level or 0)
def optik_option(self, provider, opt, optdict):
"""get our personal option definition and return a suitable form for
use with optik/optparse
"""
optdict = copy(optdict)
others = {}
if 'action' in optdict:
self._nocallback_options[provider] = opt
else:
optdict['action'] = 'callback'
optdict['callback'] = self.cb_set_provider_option
# default is handled here and *must not* be given to optik if you
# want the whole machinery to work
if 'default' in optdict:
if (optparse.OPTPARSE_FORMAT_DEFAULT and 'help' in optdict and
optdict.get('default') is not None and
not optdict['action'] in ('store_true', 'store_false')):
optdict['help'] += ' [current: %default]'
del optdict['default']
args = ['--' + str(opt)]
if 'short' in optdict:
self._short_options[optdict['short']] = opt
args.append('-' + optdict['short'])
del optdict['short']
# cleanup option definition dict before giving it to optik
for key in optdict.keys():
if not key in self._optik_option_attrs:
optdict.pop(key)
return args, optdict
def cb_set_provider_option(self, option, opt, value, parser):
"""optik callback for option setting"""
if opt.startswith('--'):
# remove -- on long option
opt = opt[2:]
else:
# short option, get its long equivalent
opt = self._short_options[opt[1:]]
# trick since we can't set action='store_true' on options
if value is None:
value = 1
self.global_set_option(opt, value)
def global_set_option(self, opt, value):
"""set option on the correct option provider"""
self._all_options[opt].set_option(opt, value)
def generate_config(self, stream=None, skipsections=(), encoding=None):
"""write a configuration file according to the current configuration
into the given stream or stdout
"""
options_by_section = {}
sections = []
for provider in self.options_providers:
for section, options in provider.options_by_section():
if section is None:
section = provider.name
if section in skipsections:
continue
options = [(n, d, v) for (n, d, v) in options
if d.get('type') is not None]
if not options:
continue
if not section in sections:
sections.append(section)
alloptions = options_by_section.setdefault(section, [])
alloptions += options
stream = stream or sys.stdout
encoding = _get_encoding(encoding, stream)
printed = False
for section in sections:
if printed:
print >> stream, '\n'
format_section(stream, section.upper(), options_by_section[section],
encoding)
printed = True
def generate_manpage(self, pkginfo, section=1, stream=None):
"""write a man page for the current configuration into the given
stream or stdout
"""
self._monkeypatch_expand_default()
try:
optparse.generate_manpage(self.cmdline_parser, pkginfo,
section, stream=stream or sys.stdout,
level=self._maxlevel)
finally:
self._unmonkeypatch_expand_default()
# initialization methods ##################################################
def load_provider_defaults(self):
"""initialize configuration using default values"""
for provider in self.options_providers:
provider.load_defaults()
def load_file_configuration(self, config_file=None):
"""load the configuration from file"""
self.read_config_file(config_file)
self.load_config_file()
def read_config_file(self, config_file=None):
"""read the configuration file but do not load it (i.e. dispatching
values to each options provider)
"""
helplevel = 1
while helplevel <= self._maxlevel:
opt = '-'.join(['long'] * helplevel) + '-help'
if opt in self._all_options:
break # already processed
def helpfunc(option, opt, val, p, level=helplevel):
print self.help(level)
sys.exit(0)
helpmsg = '%s verbose help.' % ' '.join(['more'] * helplevel)
optdict = {'action' : 'callback', 'callback' : helpfunc,
'help' : helpmsg}
provider = self.options_providers[0]
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
provider.options += ( (opt, optdict), )
helplevel += 1
if config_file is None:
config_file = self.config_file
if config_file is not None:
config_file = expanduser(config_file)
if config_file and exists(config_file):
parser = self.cfgfile_parser
parser.read([config_file])
# normalize sections'title
for sect, values in parser._sections.items():
if not sect.isupper() and values:
parser._sections[sect.upper()] = values
elif not self.quiet:
msg = 'No config file found, using default configuration'
print >> sys.stderr, msg
return
def input_config(self, onlysection=None, inputlevel=0, stream=None):
"""interactively get configuration values by asking to the user and generate
a configuration file
"""
if onlysection is not None:
onlysection = onlysection.upper()
for provider in self.options_providers:
for section, option, optdict in provider.all_options():
if onlysection is not None and section != onlysection:
continue
if not 'type' in optdict:
# ignore action without type (callback, store_true...)
continue
provider.input_option(option, optdict, inputlevel)
# now we can generate the configuration file
if stream is not None:
self.generate_config(stream)
def load_config_file(self):
"""dispatch values previously read from a configuration file to each
options provider)
"""
parser = self.cfgfile_parser
for provider in self.options_providers:
for section, option, optdict in provider.all_options():
try:
value = parser.get(section, option)
provider.set_option(option, value, optdict=optdict)
except (NoSectionError, NoOptionError), ex:
continue
def load_configuration(self, **kwargs):
"""override configuration according to given parameters
"""
for opt, opt_value in kwargs.items():
opt = opt.replace('_', '-')
provider = self._all_options[opt]
provider.set_option(opt, opt_value)
def load_command_line_configuration(self, args=None):
"""override configuration according to command line parameters
return additional arguments
"""
self._monkeypatch_expand_default()
try:
if args is None:
args = sys.argv[1:]
else:
args = list(args)
(options, args) = self.cmdline_parser.parse_args(args=args)
for provider in self._nocallback_options.keys():
config = provider.config
for attr in config.__dict__.keys():
value = getattr(options, attr, None)
if value is None:
continue
setattr(config, attr, value)
return args
finally:
self._unmonkeypatch_expand_default()
# help methods ############################################################
def add_help_section(self, title, description, level=0):
"""add a dummy option section for help purpose """
group = optparse.OptionGroup(self.cmdline_parser,
title=title.capitalize(),
description=description)
group.level = level
self._maxlevel = max(self._maxlevel, level)
self.cmdline_parser.add_option_group(group)
def _monkeypatch_expand_default(self):
# monkey patch optparse to deal with our default values
try:
self.__expand_default_backup = optparse.HelpFormatter.expand_default
optparse.HelpFormatter.expand_default = expand_default
except AttributeError:
# python < 2.4: nothing to be done
pass
def _unmonkeypatch_expand_default(self):
# remove monkey patch
if hasattr(optparse.HelpFormatter, 'expand_default'):
# unpatch optparse to avoid side effects
optparse.HelpFormatter.expand_default = self.__expand_default_backup
def help(self, level=0):
"""return the usage string for available options """
self.cmdline_parser.formatter.output_level = level
self._monkeypatch_expand_default()
try:
return self.cmdline_parser.format_help()
finally:
self._unmonkeypatch_expand_default()
class Method(object):
"""used to ease late binding of default method (so you can define options
on the class using default methods on the configuration instance)
"""
def __init__(self, methname):
self.method = methname
self._inst = None
def bind(self, instance):
"""bind the method to its instance"""
if self._inst is None:
self._inst = instance
def __call__(self, *args, **kwargs):
assert self._inst, 'unbound method'
return getattr(self._inst, self.method)(*args, **kwargs)
class OptionsProviderMixIn(object):
"""Mixin to provide options to an OptionsManager"""
# those attributes should be overridden
priority = -1
name = 'default'
options = ()
level = 0
def __init__(self):
self.config = optparse.Values()
for option in self.options:
try:
option, optdict = option
except ValueError:
raise Exception('Bad option: %r' % option)
if isinstance(optdict.get('default'), Method):
optdict['default'].bind(self)
elif isinstance(optdict.get('callback'), Method):
optdict['callback'].bind(self)
self.load_defaults()
def load_defaults(self):
"""initialize the provider using default values"""
for opt, optdict in self.options:
action = optdict.get('action')
if action != 'callback':
# callback action have no default
default = self.option_default(opt, optdict)
if default is REQUIRED:
continue
self.set_option(opt, default, action, optdict)
def option_default(self, opt, optdict=None):
"""return the default value for an option"""
if optdict is None:
optdict = self.get_option_def(opt)
default = optdict.get('default')
if callable(default):
default = default()
return default
def option_name(self, opt, optdict=None):
"""get the config attribute corresponding to opt
"""
if optdict is None:
optdict = self.get_option_def(opt)
return optdict.get('dest', opt.replace('-', '_'))
def option_value(self, opt):
"""get the current value for the given option"""
return getattr(self.config, self.option_name(opt), None)
def set_option(self, opt, value, action=None, optdict=None):
"""method called to set an option (registered in the options list)
"""
# print "************ setting option", opt," to value", value
if optdict is None:
optdict = self.get_option_def(opt)
if value is not None:
value = convert(value, optdict, opt)
if action is None:
action = optdict.get('action', 'store')
if optdict.get('type') == 'named': # XXX need specific handling
optname = self.option_name(opt, optdict)
currentvalue = getattr(self.config, optname, None)
if currentvalue:
currentvalue.update(value)
value = currentvalue
if action == 'store':
setattr(self.config, self.option_name(opt, optdict), value)
elif action in ('store_true', 'count'):
setattr(self.config, self.option_name(opt, optdict), 0)
elif action == 'store_false':
setattr(self.config, self.option_name(opt, optdict), 1)
elif action == 'append':
opt = self.option_name(opt, optdict)
_list = getattr(self.config, opt, None)
if _list is None:
if isinstance(value, (list, tuple)):
_list = value
elif value is not None:
_list = []
_list.append(value)
setattr(self.config, opt, _list)
elif isinstance(_list, tuple):
setattr(self.config, opt, _list + (value,))
else:
_list.append(value)
elif action == 'callback':
optdict['callback'](None, opt, value, None)
else:
raise UnsupportedAction(action)
def input_option(self, option, optdict, inputlevel=99):
default = self.option_default(option, optdict)
if default is REQUIRED:
defaultstr = '(required): '
elif optdict.get('level', 0) > inputlevel:
return
elif optdict['type'] == 'password' or default is None:
defaultstr = ': '
else:
defaultstr = '(default: %s): ' % format_option_value(optdict, default)
print ':%s:' % option
print optdict.get('help') or option
inputfunc = INPUT_FUNCTIONS[optdict['type']]
value = inputfunc(optdict, defaultstr)
while default is REQUIRED and not value:
print 'please specify a value'
value = inputfunc(optdict, '%s: ' % option)
if value is None and default is not None:
value = default
self.set_option(option, value, optdict=optdict)
def get_option_def(self, opt):
"""return the dictionary defining an option given it's name"""
assert self.options
for option in self.options:
if option[0] == opt:
return option[1]
raise OptionError('no such option %s in section %r'
% (opt, self.name), opt)
def all_options(self):
"""return an iterator on available options for this provider
option are actually described by a 3-uple:
(section, option name, option dictionary)
"""
for section, options in self.options_by_section():
if section is None:
if self.name is None:
continue
section = self.name.upper()
for option, optiondict, value in options:
yield section, option, optiondict
def options_by_section(self):
"""return an iterator on options grouped by section
(section, [list of (optname, optdict, optvalue)])
"""
sections = {}
for optname, optdict in self.options:
sections.setdefault(optdict.get('group'), []).append(
(optname, optdict, self.option_value(optname)))
if None in sections:
yield None, sections.pop(None)
for section, options in sections.items():
yield section.upper(), options
def options_and_values(self, options=None):
if options is None:
options = self.options
for optname, optdict in options:
yield (optname, optdict, self.option_value(optname))
class ConfigurationMixIn(OptionsManagerMixIn, OptionsProviderMixIn):
"""basic mixin for simple configurations which don't need the
manager / providers model
"""
def __init__(self, *args, **kwargs):
if not args:
kwargs.setdefault('usage', '')
kwargs.setdefault('quiet', 1)
OptionsManagerMixIn.__init__(self, *args, **kwargs)
OptionsProviderMixIn.__init__(self)
if not getattr(self, 'option_groups', None):
self.option_groups = []
for option, optdict in self.options:
try:
gdef = (optdict['group'].upper(), '')
except KeyError:
continue
if not gdef in self.option_groups:
self.option_groups.append(gdef)
self.register_options_provider(self, own_group=0)
def register_options(self, options):
"""add some options to the configuration"""
options_by_group = {}
for optname, optdict in options:
options_by_group.setdefault(optdict.get('group', self.name.upper()), []).append((optname, optdict))
for group, options in options_by_group.items():
self.add_option_group(group, None, options, self)
self.options += tuple(options)
def load_defaults(self):
OptionsProviderMixIn.load_defaults(self)
def __iter__(self):
return iter(self.config.__dict__.iteritems())
def __getitem__(self, key):
try:
return getattr(self.config, self.option_name(key))
except (optparse.OptionValueError, AttributeError):
raise KeyError(key)
def __setitem__(self, key, value):
self.set_option(key, value)
def get(self, key, default=None):
try:
return getattr(self.config, self.option_name(key))
except (OptionError, AttributeError):
return default
class Configuration(ConfigurationMixIn):
"""class for simple configurations which don't need the
manager / providers model and prefer delegation to inheritance
configuration values are accessible through a dict like interface
"""
def __init__(self, config_file=None, options=None, name=None,
usage=None, doc=None, version=None):
if options is not None:
self.options = options
if name is not None:
self.name = name
if doc is not None:
self.__doc__ = doc
super(Configuration, self).__init__(config_file=config_file, usage=usage, version=version)
class OptionsManager2ConfigurationAdapter(object):
"""Adapt an option manager to behave like a
`logilab.common.configuration.Configuration` instance
"""
def __init__(self, provider):
self.config = provider
def __getattr__(self, key):
return getattr(self.config, key)
def __getitem__(self, key):
provider = self.config._all_options[key]
try:
return getattr(provider.config, provider.option_name(key))
except AttributeError:
raise KeyError(key)
def __setitem__(self, key, value):
self.config.global_set_option(self.config.option_name(key), value)
def get(self, key, default=None):
provider = self.config._all_options[key]
try:
return getattr(provider.config, provider.option_name(key))
except AttributeError:
return default
def read_old_config(newconfig, changes, configfile):
"""initialize newconfig from a deprecated configuration file
possible changes:
* ('renamed', oldname, newname)
* ('moved', option, oldgroup, newgroup)
* ('typechanged', option, oldtype, newvalue)
"""
# build an index of changes
changesindex = {}
for action in changes:
if action[0] == 'moved':
option, oldgroup, newgroup = action[1:]
changesindex.setdefault(option, []).append((action[0], oldgroup, newgroup))
continue
if action[0] == 'renamed':
oldname, newname = action[1:]
changesindex.setdefault(newname, []).append((action[0], oldname))
continue
if action[0] == 'typechanged':
option, oldtype, newvalue = action[1:]
changesindex.setdefault(option, []).append((action[0], oldtype, newvalue))
continue
if action[1] in ('added', 'removed'):
continue # nothing to do here
raise Exception('unknown change %s' % action[0])
# build a config object able to read the old config
options = []
for optname, optdef in newconfig.options:
for action in changesindex.pop(optname, ()):
if action[0] == 'moved':
oldgroup, newgroup = action[1:]
optdef = optdef.copy()
optdef['group'] = oldgroup
elif action[0] == 'renamed':
optname = action[1]
elif action[0] == 'typechanged':
oldtype = action[1]
optdef = optdef.copy()
optdef['type'] = oldtype
options.append((optname, optdef))
if changesindex:
raise Exception('unapplied changes: %s' % changesindex)
oldconfig = Configuration(options=options, name=newconfig.name)
# read the old config
oldconfig.load_file_configuration(configfile)
# apply values reverting changes
changes.reverse()
done = set()
for action in changes:
if action[0] == 'renamed':
oldname, newname = action[1:]
newconfig[newname] = oldconfig[oldname]
done.add(newname)
elif action[0] == 'typechanged':
optname, oldtype, newvalue = action[1:]
newconfig[optname] = newvalue
done.add(optname)
for optname, optdef in newconfig.options:
if optdef.get('type') and not optname in done:
newconfig.set_option(optname, oldconfig[optname], optdict=optdef)
def merge_options(options):
"""preprocess options to remove duplicate"""
alloptions = {}
options = list(options)
for i in range(len(options)-1, -1, -1):
optname, optdict = options[i]
if optname in alloptions:
options.pop(i)
alloptions[optname].update(optdict)
else:
alloptions[optname] = optdict
return tuple(options)
|
|
# coding: utf-8
import json
import os
import re
from datetime import datetime, date, time, timedelta
from bson import json_util
from django.conf import settings
from django.core.files.base import File, ContentFile
from django.core.files.temp import NamedTemporaryFile
from django.core.files.storage import get_storage_class
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
from django.utils.text import slugify
from openpyxl.utils.datetime import to_excel, time_to_days, timedelta_to_days
from openpyxl.workbook import Workbook
from pyxform.constants import SELECT_ALL_THAT_APPLY
from pyxform.question import Question
from pyxform.section import Section, RepeatingSection
from onadata.apps.logger.models import Attachment, Instance, XForm
from onadata.apps.viewer.models.export import Export
from onadata.apps.api.mongo_helper import MongoHelper
from onadata.libs.utils.viewer_tools import create_attachments_zipfile
from onadata.libs.utils.common_tags import (
ID,
XFORM_ID_STRING,
STATUS,
ATTACHMENTS,
GEOLOCATION,
DELETEDAT,
USERFORM_ID,
INDEX,
PARENT_INDEX,
PARENT_TABLE_NAME,
SUBMISSION_TIME,
UUID,
TAGS,
NOTES
)
# this is Mongo Collection where we will store the parsed submissions
xform_instances = settings.MONGO_DB.instances
QUESTION_TYPES_TO_EXCLUDE = [
'note',
]
GEOPOINT_BIND_TYPE = "geopoint"
def to_str(row, key, encode_dates=False, empty_on_none=True):
val = row.get(key)
if empty_on_none and val is None:
return ''
if encode_dates and isinstance(val, datetime):
return val.strftime('%Y-%m-%dT%H:%M:%S%z')
if encode_dates and isinstance(val, date):
return val.strftime('%Y-%m-%d')
if isinstance(val, bytes):
return val.decode()
if not isinstance(val, str):
return str(val)
return val
def question_types_to_exclude(_type):
return _type in QUESTION_TYPES_TO_EXCLUDE
class DictOrganizer:
def set_dict_iterator(self, dict_iterator):
self._dict_iterator = dict_iterator
# Every section will get its own table
# I need to think of an easy way to flatten out a dictionary
# parent name, index, table name, data
def _build_obs_from_dict(self, d, obs, table_name,
parent_table_name, parent_index):
if table_name not in obs:
obs[table_name] = []
this_index = len(obs[table_name])
obs[table_name].append({
"_parent_table_name": parent_table_name,
"_parent_index": parent_index,
})
for k, v in d.items():
if type(v) != dict and type(v) != list:
assert k not in obs[table_name][-1]
obs[table_name][-1][k] = v
obs[table_name][-1]["_index"] = this_index
for k, v in d.items():
if type(v) == dict:
kwargs = {
"d": v,
"obs": obs,
"table_name": k,
"parent_table_name": table_name,
"parent_index": this_index
}
self._build_obs_from_dict(**kwargs)
if type(v) == list:
for i, item in enumerate(v):
kwargs = {
"d": item,
"obs": obs,
"table_name": k,
"parent_table_name": table_name,
"parent_index": this_index,
}
self._build_obs_from_dict(**kwargs)
return obs
def get_observation_from_dict(self, d):
result = {}
assert len(d.keys()) == 1
root_name = list(d)[0]
kwargs = {
"d": d[root_name],
"obs": result,
"table_name": root_name,
"parent_table_name": "",
"parent_index": -1,
}
self._build_obs_from_dict(**kwargs)
return result
def dict_to_joined_export(data, index, indices, name):
"""
Converts a dict into one or more tabular datasets
"""
output = {}
# TODO: test for _geolocation and attachment lists
if isinstance(data, dict):
for key, val in data.items():
if isinstance(val, list) and key not in [NOTES, TAGS]:
output[key] = []
for child in val:
if key not in indices:
indices[key] = 0
indices[key] += 1
child_index = indices[key]
new_output = dict_to_joined_export(
child, child_index, indices, key)
d = {INDEX: child_index, PARENT_INDEX: index,
PARENT_TABLE_NAME: name}
# iterate over keys within new_output and append to
# main output
for out_key, out_val in new_output.items():
if isinstance(out_val, list):
if out_key not in output:
output[out_key] = []
output[out_key].extend(out_val)
else:
d.update(out_val)
output[key].append(d)
else:
if name not in output:
output[name] = {}
if key in [TAGS]:
output[name][key] = ",".join(val)
elif key in [NOTES]:
output[name][key] = "\r\n".join(
[v['note'] for v in val])
else:
output[name][key] = val
return output
class ExportBuilder:
IGNORED_COLUMNS = [
XFORM_ID_STRING,
STATUS,
ATTACHMENTS,
GEOLOCATION,
DELETEDAT, # no longer used but may persist in old submissions
]
# fields we export but are not within the form's structure
EXTRA_FIELDS = [ID, UUID, SUBMISSION_TIME, INDEX, PARENT_TABLE_NAME,
PARENT_INDEX, TAGS, NOTES]
SPLIT_SELECT_MULTIPLES = True
BINARY_SELECT_MULTIPLES = False
# column group delimiters
GROUP_DELIMITER_SLASH = '/'
GROUP_DELIMITER_DOT = '.'
GROUP_DELIMITER = GROUP_DELIMITER_SLASH
GROUP_DELIMITERS = [GROUP_DELIMITER_SLASH, GROUP_DELIMITER_DOT]
TYPES_TO_CONVERT = ['int', 'decimal', 'date'] # , 'dateTime']
CONVERT_FUNCS = {
'int': lambda x: int(x),
'decimal': lambda x: float(x),
'date': lambda x: ExportBuilder.string_to_date_with_xls_validation(x),
'dateTime': lambda x: datetime.strptime(x[:19], '%Y-%m-%dT%H:%M:%S')
}
XLS_SHEET_NAME_MAX_CHARS = 31
@classmethod
def string_to_date_with_xls_validation(cls, date_str):
date_obj = datetime.strptime(date_str, '%Y-%m-%d').date()
try:
# SharedDate().datetime_to_julian(date_obj)
# Copy code from v2.0.5. Could not find where SharedDate is in
# latest version of openpyxl (and if it's useful)
if isinstance(date_obj, datetime):
to_excel(date_obj)
elif isinstance(date_obj, date):
to_excel(date_obj)
elif isinstance(date_obj, time):
time_to_days(date_obj)
elif isinstance(date_obj, timedelta):
timedelta_to_days(date_obj)
except ValueError:
return date_str
else:
return date_obj
@classmethod
def format_field_title(cls, abbreviated_xpath, field_delimiter):
if field_delimiter != '/':
return field_delimiter.join(abbreviated_xpath.split('/'))
return abbreviated_xpath
def set_survey(self, survey):
# TODO resolve circular import
from onadata.apps.viewer.models.data_dictionary import\
DataDictionary
def build_sections(
current_section, survey_element, sections, select_multiples,
gps_fields, encoded_fields, field_delimiter='/'):
for child in survey_element.children:
current_section_name = current_section['name']
# if a section, recurs
if isinstance(child, Section):
# if its repeating, build a new section
if isinstance(child, RepeatingSection):
# section_name in recursive call changes
section = {
'name': child.get_abbreviated_xpath(),
'elements': []}
self.sections.append(section)
build_sections(
section, child, sections, select_multiples,
gps_fields, encoded_fields, field_delimiter)
else:
# its a group, recurs using the same section
build_sections(
current_section, child, sections, select_multiples,
gps_fields, encoded_fields, field_delimiter)
elif isinstance(child, Question) and child.bind.get("type")\
not in QUESTION_TYPES_TO_EXCLUDE:
# add to survey_sections
if isinstance(child, Question):
child_xpath = child.get_abbreviated_xpath()
current_section['elements'].append({
'title': ExportBuilder.format_field_title(
child.get_abbreviated_xpath(),
field_delimiter),
'xpath': child_xpath,
'type': child.bind.get("type")
})
if MongoHelper.is_attribute_invalid(child_xpath):
if current_section_name not in encoded_fields:
encoded_fields[current_section_name] = {}
encoded_fields[current_section_name].update(
{child_xpath: MongoHelper.encode(child_xpath)})
# if its a select multiple, make columns out of its choices
if child.type == SELECT_ALL_THAT_APPLY\
and self.SPLIT_SELECT_MULTIPLES:
for c in child.children:
_xpath = c.get_abbreviated_xpath()
_title = ExportBuilder.format_field_title(
_xpath, field_delimiter)
choice = {
'title': _title,
'xpath': _xpath,
'type': 'string'
}
if choice not in current_section['elements']:
current_section['elements'].append(choice)
_append_xpaths_to_section(
current_section_name, select_multiples,
child.get_abbreviated_xpath(),
[c.get_abbreviated_xpath()
for c in child.children])
# split gps fields within this section
if child.bind.get("type") == GEOPOINT_BIND_TYPE:
# add columns for geopoint components
xpaths = DataDictionary.get_additional_geopoint_xpaths(
child.get_abbreviated_xpath())
current_section['elements'].extend(
[
{
'title': ExportBuilder.format_field_title(
xpath, field_delimiter),
'xpath': xpath,
'type': 'decimal'
}
for xpath in xpaths
])
_append_xpaths_to_section(
current_section_name, gps_fields,
child.get_abbreviated_xpath(), xpaths)
def _append_xpaths_to_section(current_section_name, field_list, xpath,
xpaths):
if current_section_name not in field_list:
field_list[current_section_name] = {}
field_list[
current_section_name][xpath] = xpaths
self.survey = survey
self.select_multiples = {}
self.gps_fields = {}
self.encoded_fields = {}
main_section = {'name': survey.name, 'elements': []}
self.sections = [main_section]
build_sections(
main_section, self.survey, self.sections,
self.select_multiples, self.gps_fields, self.encoded_fields,
self.GROUP_DELIMITER)
def section_by_name(self, name):
matches = [s for s in self.sections if s['name'] == name]
assert(len(matches) == 1)
return matches[0]
@classmethod
def split_select_multiples(cls, row, select_multiples):
# for each select_multiple, get the associated data and split it
for xpath, choices in select_multiples.items():
# get the data matching this xpath
data = row.get(xpath)
selections = []
if data:
selections = [
'{0}/{1}'.format(
xpath, selection) for selection in data.split()]
if not cls.BINARY_SELECT_MULTIPLES:
row.update(dict(
[(choice, choice in selections if selections else None)
for choice in choices]))
else:
YES = 1
NO = 0
row.update(dict(
[(choice, YES if choice in selections else NO)
for choice in choices]))
return row
@classmethod
def split_gps_components(cls, row, gps_fields):
# for each gps_field, get associated data and split it
for xpath, gps_components in gps_fields.items():
data = row.get(xpath)
if data:
gps_parts = data.split()
if len(gps_parts) > 0:
row.update(zip(gps_components, gps_parts))
return row
@classmethod
def decode_mongo_encoded_fields(cls, row, encoded_fields):
for xpath, encoded_xpath in encoded_fields.items():
if row.get(encoded_xpath):
val = row.pop(encoded_xpath)
row.update({xpath: val})
return row
@classmethod
def decode_mongo_encoded_section_names(cls, data):
return dict([(MongoHelper.decode(k), v) for k, v in data.items()])
@classmethod
def convert_type(cls, value, data_type):
"""
Convert data to its native type e.g. string '1' to int 1
@param value: the string value to convert
@param data_type: the native data type to convert to
@return: the converted value
"""
func = ExportBuilder.CONVERT_FUNCS.get(data_type, lambda x: x)
try:
return func(value)
except ValueError:
return value
def pre_process_row(self, row, section):
"""
Split select multiples, gps and decode . and $
"""
section_name = section['name']
# first decode fields so that subsequent lookups
# have decoded field names
if section_name in self.encoded_fields:
row = ExportBuilder.decode_mongo_encoded_fields(
row, self.encoded_fields[section_name])
if self.SPLIT_SELECT_MULTIPLES and\
section_name in self.select_multiples:
row = ExportBuilder.split_select_multiples(
row, self.select_multiples[section_name])
if section_name in self.gps_fields:
row = ExportBuilder.split_gps_components(
row, self.gps_fields[section_name])
# convert to native types
for elm in section['elements']:
# only convert if its in our list and its not empty, just to
# optimize
value = row.get(elm['xpath'])
if elm['type'] in ExportBuilder.TYPES_TO_CONVERT\
and value is not None and value != '':
row[elm['xpath']] = ExportBuilder.convert_type(
value, elm['type'])
return row
@classmethod
def get_valid_sheet_name(cls, desired_name, existing_names):
# a sheet name has to be <= 31 characters and not a duplicate of an
# existing sheet
# truncate sheet_name to XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS
new_sheet_name = \
desired_name[:cls.XLS_SHEET_NAME_MAX_CHARS]
# make sure its unique within the list
i = 1
generated_name = new_sheet_name
while generated_name in existing_names:
digit_length = len(str(i))
allowed_name_len = cls.XLS_SHEET_NAME_MAX_CHARS - \
digit_length
# make name the required len
if len(generated_name) > allowed_name_len:
generated_name = generated_name[:allowed_name_len]
generated_name = "{0}{1}".format(generated_name, i)
i += 1
return generated_name
def to_xls_export(self, path, data, *args):
def write_row(data, work_sheet, fields, work_sheet_titles):
# update parent_table with the generated sheet's title
data[PARENT_TABLE_NAME] = work_sheet_titles.get(
data.get(PARENT_TABLE_NAME))
work_sheet.append([data.get(f) for f in fields])
wb = Workbook(write_only=True)
work_sheets = {}
# map of section_names to generated_names
work_sheet_titles = {}
for section in self.sections:
section_name = section['name']
work_sheet_title = ExportBuilder.get_valid_sheet_name(
"_".join(section_name.split("/")), list(work_sheet_titles.values()))
work_sheet_titles[section_name] = work_sheet_title
work_sheets[section_name] = wb.create_sheet(
title=work_sheet_title)
# write the headers
for section in self.sections:
section_name = section['name']
headers = [
element['title'] for element in
section['elements']] + self.EXTRA_FIELDS
# get the worksheet
ws = work_sheets[section_name]
ws.append(headers)
index = 1
indices = {}
survey_name = self.survey.name
for d in data:
joined_export = dict_to_joined_export(d, index, indices,
survey_name)
output = ExportBuilder.decode_mongo_encoded_section_names(
joined_export)
# attach meta fields (index, parent_index, parent_table)
# output has keys for every section
if survey_name not in output:
output[survey_name] = {}
output[survey_name][INDEX] = index
output[survey_name][PARENT_INDEX] = -1
for section in self.sections:
# get data for this section and write to xls
section_name = section['name']
fields = [
element['xpath'] for element in
section['elements']] + self.EXTRA_FIELDS
ws = work_sheets[section_name]
# section might not exist within the output, e.g. data was
# not provided for said repeat - write test to check this
row = output.get(section_name, None)
if type(row) == dict:
write_row(
self.pre_process_row(row, section),
ws, fields, work_sheet_titles)
elif type(row) == list:
for child_row in row:
write_row(
self.pre_process_row(child_row, section),
ws, fields, work_sheet_titles)
index += 1
wb.save(filename=path)
def to_flat_csv_export(
self, path, data, username, id_string, filter_query):
# TODO resolve circular import
from onadata.apps.viewer.pandas_mongo_bridge import\
CSVDataFrameBuilder
csv_builder = CSVDataFrameBuilder(
username, id_string, filter_query, self.GROUP_DELIMITER,
self.SPLIT_SELECT_MULTIPLES, self.BINARY_SELECT_MULTIPLES)
csv_builder.export_to(path)
def dict_to_flat_export(d, parent_index=0):
pass
def generate_export(export_type, extension, username, id_string,
export_id=None, filter_query=None, group_delimiter='/',
split_select_multiples=True,
binary_select_multiples=False):
"""
Create appropriate export object given the export type
"""
export_type_func_map = {
Export.XLS_EXPORT: 'to_xls_export',
Export.CSV_EXPORT: 'to_flat_csv_export',
}
xform = XForm.objects.get(
user__username__iexact=username, id_string__exact=id_string)
# query mongo for the cursor
records = query_mongo(username, id_string, filter_query)
export_builder = ExportBuilder()
export_builder.GROUP_DELIMITER = group_delimiter
export_builder.SPLIT_SELECT_MULTIPLES = split_select_multiples
export_builder.BINARY_SELECT_MULTIPLES = binary_select_multiples
export_builder.set_survey(xform.data_dictionary().survey)
prefix = slugify('{}_export__{}__{}'.format(export_type, username, id_string))
temp_file = NamedTemporaryFile(prefix=prefix, suffix=("." + extension))
# get the export function by export type
func = getattr(export_builder, export_type_func_map[export_type])
func.__call__(
temp_file.name, records, username, id_string, filter_query)
# generate filename
basename = "%s_%s" % (
id_string, datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + "." + extension
# check filename is unique
while not Export.is_filename_unique(xform, filename):
filename = increment_index_in_filename(filename)
file_path = os.path.join(
username,
'exports',
id_string,
export_type,
filename)
# TODO: if s3 storage, make private - how will we protect local storage??
storage = get_storage_class()()
# seek to the beginning as required by storage classes
temp_file.seek(0)
export_filename = storage.save(
file_path,
File(temp_file, file_path))
temp_file.close()
dir_name, basename = os.path.split(export_filename)
# get or create export object
if export_id:
export = Export.objects.get(id=export_id)
else:
export = Export(xform=xform, export_type=export_type)
export.filedir = dir_name
export.filename = basename
export.internal_status = Export.SUCCESSFUL
# dont persist exports that have a filter
if filter_query is None:
export.save()
return export
def query_mongo(username, id_string, query=None):
query = json.loads(query, object_hook=json_util.object_hook)\
if query else {}
query = MongoHelper.to_safe_dict(query)
query[USERFORM_ID] = '{0}_{1}'.format(username, id_string)
return xform_instances.find(query, max_time_ms=settings.MONGO_DB_MAX_TIME_MS)
def should_create_new_export(xform, export_type):
if (
not Export.objects.filter(xform=xform, export_type=export_type).exists()
or Export.exports_outdated(xform, export_type=export_type)
):
return True
return False
def newset_export_for(xform, export_type):
"""
Make sure you check that an export exists before calling this,
it will a DoesNotExist exception otherwise
"""
return Export.objects.filter(xform=xform, export_type=export_type)\
.latest('created_on')
def increment_index_in_filename(filename):
"""
filename should be in the form file.ext or file-2.ext - we check for the
dash and index and increment appropriately
"""
# check for an index i.e. dash then number then dot extension
regex = re.compile(r"(.+?)\-(\d+)(\..+)")
match = regex.match(filename)
if match:
basename = match.groups()[0]
index = int(match.groups()[1]) + 1
ext = match.groups()[2]
else:
index = 1
# split filename from ext
basename, ext = os.path.splitext(filename)
new_filename = "%s-%d%s" % (basename, index, ext)
return new_filename
def generate_attachments_zip_export(
export_type, extension, username, id_string, export_id=None,
filter_query=None):
xform = XForm.objects.get(user__username=username, id_string=id_string)
attachments = Attachment.objects.filter(instance__xform=xform)
basename = "%s_%s" % (id_string,
datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + "." + extension
file_path = os.path.join(
username,
'exports',
id_string,
export_type,
filename)
export_filename = get_storage_class()().save(file_path, ContentFile(''))
with get_storage_class()().open(export_filename, 'wb') as destination_file:
create_attachments_zipfile(
attachments,
output_file=destination_file,
)
dir_name, basename = os.path.split(export_filename)
# get or create export object
if(export_id):
export = Export.objects.get(id=export_id)
else:
export = Export.objects.create(xform=xform, export_type=export_type)
export.filedir = dir_name
export.filename = basename
export.internal_status = Export.SUCCESSFUL
export.save()
return export
def generate_kml_export(
export_type, extension, username, id_string, export_id=None,
filter_query=None):
user = User.objects.get(username=username)
xform = XForm.objects.get(user__username=username, id_string=id_string)
response = render_to_response(
'survey.kml', {'data': kml_export_data(id_string, user)})
basename = "%s_%s" % (id_string,
datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + "." + extension
file_path = os.path.join(
username,
'exports',
id_string,
export_type,
filename)
storage = get_storage_class()()
temp_file = NamedTemporaryFile(suffix=extension)
temp_file.write(response.content)
temp_file.seek(0)
export_filename = storage.save(
file_path,
File(temp_file, file_path))
temp_file.close()
dir_name, basename = os.path.split(export_filename)
# get or create export object
if(export_id):
export = Export.objects.get(id=export_id)
else:
export = Export.objects.create(xform=xform, export_type=export_type)
export.filedir = dir_name
export.filename = basename
export.internal_status = Export.SUCCESSFUL
export.save()
return export
def kml_export_data(id_string, user):
# TODO resolve circular import
from onadata.apps.viewer.models.data_dictionary import DataDictionary
dd = DataDictionary.objects.get(id_string=id_string, user=user)
instances = Instance.objects.filter(
xform__user=user,
xform__id_string=id_string,
geom__isnull=False
).order_by('id')
data_for_template = []
for instance in instances:
point = instance.point
if point:
data_for_template.append({
'name': id_string,
'id': instance.uuid,
'lat': point.y,
'lng': point.x,
})
return data_for_template
|
|
from serializer import Serializer
from collections import deque
serializer = Serializer()
# general errors -ERR
wrong_arguements = 'wrong number of arguments for \'%s\' command'
not_integer_or_out_range = 'value is not an integer or out of range'
not_float_or_out_range = 'value is not a float or out of range'
# specific errors -WRONGTYPE
wrong_types = 'Operation against a key holding the wrong kind of value'
# TODO: use python dict for hash table, manage memory myself? Let's see how far we can go
pydis_table = {}
def echo(command):
if len(command) != 2:
return serializer.error(wrong_arguements % command[0])
return serializer.string(command[1])
def ping(command):
if len(command) > 2:
return serializer.error(wrong_arguements % command[0])
if len(command) == 1:
return serializer.string('PONG')
return serializer.string(command[1])
def get(command):
if len(command) != 2:
return serializer.error(wrong_arguements % command[0])
if command[1] not in pydis_table:
return serializer.string('nil')
return serializer.string(pydis_table[command[1]])
def getset(command):
if len(command) != 3:
return serializer.error(wrong_arguements % command[0])
ret = 'nil' if command[1] not in pydis_table else pydis_table[command[1]]
pydis_table[command[1]] = command[2]
return serializer.string(ret)
def getrange(command):
if len(command) != 4:
return serializer.error(wrong_arguements % command[0])
if command[1] not in pydis_table:
return serializer.string("")
try:
start, end = int(command[2]), int(command[3])
except ValueError:
return serializer.error(not_integer_or_out_range)
if end == -1:
return serializer.string(pydis_table[command[1]][start:])
return serializer.string(pydis_table[command[1]][start:end + 1])
def set(command):
if len(command) != 3:
return serializer.error(wrong_arguements % command[0])
pydis_table[command[1]] = command[2]
return serializer.string('OK')
def incr(command):
if len(command) != 2:
return serializer.error(wrong_arguements % command[0])
command.append("1")
return incrby(command)
def incrby(command):
if len(command) != 3:
return serializer.error(wrong_arguements % command[0])
if command[1] not in pydis_table:
pydis_table[command[1]] = "0"
try:
val = int(pydis_table[command[1]]) + int(command[2])
pydis_table[command[1]] = str(val)
return serializer.integer(val)
except ValueError:
return serializer.error(not_integer_or_out_range)
def incrbyfloat(command):
if len(command) != 3:
return serializer.error(wrong_arguements % command[0])
if command[1] not in pydis_table:
pydis_table[command[1]] = "0.0"
try:
val = float(pydis_table[command[1]]) + float(command[2])
pydis_table[command[1]] = str(val)
return serializer.bulk_string(str(val))
except ValueError:
return serializer.error(not_float_or_out_range)
def decr(command):
if len(command) != 2:
return serializer.error(wrong_arguements % command[0])
command.append("1")
return decrby(command)
def decrby(command):
if len(command) != 3:
return serializer.error(wrong_arguements % command[0])
if command[1] not in pydis_table:
pydis_table[command[1]] = "0"
try:
val = int(pydis_table[command[1]]) - int(command[2])
pydis_table[command[1]] = str(val)
return serializer.integer(val)
except ValueError:
return serializer.error(not_integer_or_out_range)
def exists(command):
if len(command) < 2:
return serializer.error(wrong_arguements % command[0])
ret = 0
for e in command[1:]:
if e in pydis_table:
ret += 1
return serializer.integer(ret)
def append(command):
if len(command) != 3:
return serializer.error(wrong_arguements % command[0])
val = pydis_table[command[1]] if command[1] in pydis_table else ''
val += command[2]
pydis_table[command[1]] = val
return serializer.integer(len(val))
def push_helper(command, to_left):
if len(command) != 3:
return serializer.error(wrong_arguements % command[0])
val = pydis_table[command[1]] if command[1] in pydis_table else deque()
if to_left:
val.appendleft(command[2])
else:
val.append(command[2])
pydis_table[command[1]] = val
return serializer.integer(len(val))
def lpush(command):
return push_helper(command, True)
def rpush(command):
return push_helper(command, False)
def pushx_helper(command, to_left):
if len(command) != 3:
return serializer.error(wrong_arguements % command[0])
if command[1] not in pydis_table or type(pydis_table[command[1]]) is not deque:
return serializer.integer(0)
if to_left:
pydis_table[command[1]].appendleft(command[2])
else:
pydis_table[command[1]].append(command[2])
return serializer.integer(len(pydis_table[command[1]]))
def lpushx(command):
return pushx_helper(command, True)
def rpushx(command):
return pushx_helper(command, False)
def llen(command):
if len(command) != 2:
return serializer.error(wrong_arguements % command[0])
if command[1] not in pydis_table:
return serializer.integer(0)
if type(pydis_table[command[1]]) is not deque:
return serializer.wrong_type(wrong_types)
return serializer.integer(len(pydis_table[command[1]]))
def pop_helper(command, to_left):
if len(command) != 2:
return serializer.error(wrong_arguements % command[0])
if command[1] not in pydis_table or len(pydis_table[command[1]]) == 0:
return serializer.null_string()
ret = pydis_table[command[1]][0]
if to_left:
pydis_table[command[1]].popleft()
else:
pydis_table[command[1]].pop()
return serializer.bulk_string(ret)
def lpop(command):
return pop_helper(command, True)
def rpop(command):
return pop_helper(command, False)
# TODO: list all commands and handlers here
lookup_table = {
'ECHO': echo,
'PING': ping,
'GET': get,
'GETSET': getset,
'GETRANGE': getrange,
'SET': set,
'INCR': incr,
'INCRBY': incrby,
'INCRBYFLOAT': incrbyfloat,
'DECR': decr,
'DECRBY': decrby,
'EXISTS': exists,
'APPEND': append,
'LPUSH': lpush,
'LPUSHX': lpushx,
'RPUSH': rpush,
'RPUSHX': rpushx,
'LLEN': llen,
'LPOP': lpop,
'RPOP': rpop,
}
|
|
RESPONSE_PEER_GROUPS = [
"Marketing",
"usa",
"101",
"Program Manager",
"Channel Administrator",
"Chief Marketing Officer",
"",
"Chief Strategy Officer",
"CN=Andrew",
"BitLockerUsersComputers"
]
RESPONSE_USER_LABELS = [
"privileged_user",
"service_account"
]
RESPONSE_WATCHLISTS = [
{
"category": "UserLabels",
"title": "Executive Users",
"watchlistId": "1234"
},
{
"category": "UserLabels",
"title": "Service Accounts",
"watchlistId": "1111"
},
{
"category": "Users",
"title": "user watchlist",
"watchlistId": "2222"
},
{
"category": "PeerGroups",
"title": "VP Operations",
"watchlistId": "3333"
}
]
RESPONSE_ASSET_DATA = {
"asset": {
"assetType": "Windows",
"compromisedTime": 0,
"firstSeen": 1530627660000,
"hostName": "name",
"ipAddress": "1.2.3.4",
"lastSeen": 1538324597000
}
}
RESPONSE_SESSION_INFO = { 'sessionInfo': {
"numOfAssets": 29,
"riskScore": 0,
"numOfAccounts": 1,
"accounts": [],
"zones": [],
"endTime": "1591071360000",
"numOfZones": 5,
"startTime": "1591021860000",
"loginHost": "lt-dummy-888",
"sessionId": "dummy-20200601143100",
"numOfReasons": 0,
"label": "",
"username": "dummy",
"numOfSecurityEvents": 0,
"numOfEvents": 62,
"initialRiskScore": 0
}
}
RESPONSE_MODEL_DATA = {
"agingWindow": 32,
"alpha": 0.8,
"binWidth": None,
"category": "Other",
"convergenceFilter": "confidence_factor>=0.8",
"cutOff": 5,
"description": "Models which security groups users are being added to in the organization",
"disabled": "FALSE",
"feature": "group_name",
"featureName": "group_name",
"featureType": "group_name",
"histogramEventTypes": "member-added",
"iconName": None,
"maxNumberOfBins": 1000000,
"modelTemplate": "Account management, groups which users are being added to",
"modelType": "CATEGORICAL",
"name": "dummy",
"scopeType": "ORG",
"scopeValue": "org",
"trainIf": "TRUE"
}
RESPONSE_NOTABLE_ASSET_DATA = {
'assets': [{
'asset': {
'hostName': 'host',
'ipAddress': '1.1.1.1',
'assetType': 'test',
'firstSeen': 1591022160000,
'lastSeen': 1593820320000
},
'highestRiskScore': 150,
'highestRiskSequence': {
'id': '1111',
'entityName': 'asset',
'entityValue': 'test',
'day': 1593648000000,
'triggeredRuleCountOpt': 15,
'riskScoreOpt': 150.0
},
'latestAssetComment': {
'commentId': 'test1111',
'commentType': 'asset',
'commentObjectId': 'test',
'text': 'test',
'exaUser': 'test',
'exaUserFullname': '',
'createTime': 1612275291188,
'updateTime': 1612275291188,
'edited': False
}
}]
}
RESPONSE_NOTABLE_SESSION_DETAILS = {
'totalCount': 2, 'sessions': [
{'sessionId': 'session1', 'username': 'username1', 'startTime': 1593704040000,
'endTime': 1593727380000, 'initialRiskScore': 0, 'riskScore': 110, 'numOfReasons': 9,
'loginHost': 'host1', 'label': '', 'accounts': ['account1', 'account2'], 'numOfAccounts': 2,
'zones': ['zone1', 'zone2'], 'numOfZones': 2, 'numOfAssets': 7, 'numOfEvents': 6,
'numOfSecurityEvents': 0},
{'sessionId': 'session2', 'username': 'username2', 'startTime': 1593682380000,
'endTime': 1593727260000, 'initialRiskScore': 26, 'riskScore': 313, 'numOfReasons': 39, 'loginHost': 'host2',
'label': '', 'accounts': ['account1', 'account2'], 'numOfAccounts': 2,
'zones': ['zone1', 'zone2', 'zone3', 'zone4'], 'numOfZones': 4,
'numOfAssets': 17, 'numOfEvents': 30, 'numOfSecurityEvents': 1, 'riskTransferScore': 126.0}],
'users': {
'username2': {'username': 'username2', 'riskScore': 313.18, 'averageRiskScore': 171.41,
'pastScores': [287.19, 218.36, 0.0, 0.0, 0.0, 0.0, 0.0], 'lastSessionId': 'session2',
'firstSeen': 1591021500000, 'lastSeen': 1593820320000, 'lastActivityType': 'Account is active',
'lastActivityTime': 1593818940000,
'info': {'location': 'us',
'photo': '',
'phoneCell': '1234567890',
'email': 'test@.com',
'employeeType': 'employee', 'fullName': 'user username2',
'departmentNumber': '000',
'dn': 'test',
'country': 'usa', 'division': 'division',
'department': 'department',
'manager': 'test',
'phoneOffice': '1234567890',
'employeeNumber': '1234',
'title': 'title',
'group': 'test'},
'labels': [],
'pendingRiskTransfers': []},
'mburgess': {'username': 'username1', 'riskScore': 109.73, 'averageRiskScore': 52.25,
'pastScores': [109.7382543963077], 'lastSessionId': 'session1',
'firstSeen': 1591025220000, 'lastSeen': 1593727380000, 'lastActivityType': 'Account is active',
'lastActivityTime': 1593704040000,
'info': {'location': 'us',
'photo': '',
'phoneCell': '1234567890',
'email': 'test@.com',
'employeeType': 'employee',
'fullName': 'user username1', 'departmentNumber': '000',
'dn': 'test',
'country': 'usa', 'division': 'division',
'department': 'department',
'manager': 'test',
'phoneOffice': '1234567890',
'employeeNumber': '1234',
'title': 'title',
'group': 'test'}, 'labels': [],
'pendingRiskTransfers': []}},
'executiveUserFlags': {'username1': False, 'username2': False}
}
RESPONSE_NOTABLE_SEQUENCE_DETAILS = [{
'sequenceId': 'ID',
'isWhitelisted': False,
'areAllTriggeredRulesWhiteListed': False,
'sequenceInfo': {
'startTime': 1593648000000,
'endTime': 1593734399999,
'riskScore': 150,
'numOfReasons': 8,
'numOfEvents': 18,
'numOfUsers': 4,
'numOfSecurityEvents': 0,
'numOfZones': 3,
'numOfAssets': 8,
'sequenceId': 'ID',
'assetId': 'ID'},
'hasBeenPartiallyWhiteListed': False
}]
RESPONSE_NOTABLE_SEQUENCE_EVENTS = [{
'eventType': 'type1',
'displayName': 'dn1',
'count': 1},
{'eventType': 'type2',
'displayName': 'dn2',
'count': 1},
{'eventType': 'type3',
'displayName': 'dn3',
'count': 1},
{'eventType': 'type4',
'displayName': 'dn4',
'count': 1},
{'eventType': 'type5',
'displayName': 'dn5',
'count': 2},
{'eventType': 'type6',
'displayName': 'dn6',
'count': 2},
{'eventType': 'type7',
'displayName': 'dn7',
'count': 8},
{'eventType': 'type8',
'displayName': 'dn8',
'count': 1},
{'eventType': 'type9',
'displayName': 'dn9',
'count': 1}
]
DELETE_RECORD_RESPONSE = {'sessionId': '56a5b19a-4193-4616-9978-0bbabb1e2d60',
'recordChanges': [{
'changeType': 'removed',
'changeId': '4aad5392-20e7-4423-abcb-a9680c566215',
'record': {'key': '', 'id': 'test_key'}
}],
'metadata': {'createdSize': 0, 'updatedSize': 0, 'removedSize': 1, 'duplicates': []}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.